file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/configprovider.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains the inteface for controlling how configuration
is loaded.
"""
import logging
import os
from botocore import utils
logger = logging.getLogger(__name__)
#: A default dictionary that maps the logical names for session variables
#: to the specific environment variables and configuration file names
#: that contain the values for these variables.
#: When creating a new Session object, you can pass in your own dictionary
#: to remap the logical names or to add new logical names. You can then
#: get the current value for these variables by using the
#: ``get_config_variable`` method of the :class:`botocore.session.Session`
#: class.
#: These form the keys of the dictionary. The values in the dictionary
#: are tuples of (<config_name>, <environment variable>, <default value>,
#: <conversion func>).
#: The conversion func is a function that takes the configuration value
#: as an argument and returns the converted value. If this value is
#: None, then the configuration value is returned unmodified. This
#: conversion function can be used to type convert config values to
#: values other than the default values of strings.
#: The ``profile`` and ``config_file`` variables should always have a
#: None value for the first entry in the tuple because it doesn't make
#: sense to look inside the config file for the location of the config
#: file or for the default profile to use.
#: The ``config_name`` is the name to look for in the configuration file,
#: the ``env var`` is the OS environment variable (``os.environ``) to
#: use, and ``default_value`` is the value to use if no value is otherwise
#: found.
BOTOCORE_DEFAUT_SESSION_VARIABLES = {
# logical: config_file, env_var, default_value, conversion_func
'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None),
'region': ('region', 'AWS_DEFAULT_REGION', None, None),
'data_path': ('data_path', 'AWS_DATA_PATH', None, None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None),
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
'api_versions': ('api_versions', None, {}, None),
# This is the shared credentials file amongst sdks.
'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE',
'~/.aws/credentials', None),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': (
'metadata_service_timeout',
'AWS_METADATA_SERVICE_TIMEOUT', 1, int),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': (
'metadata_service_num_attempts',
'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int),
'ec2_metadata_service_endpoint': (
'ec2_metadata_service_endpoint',
'AWS_EC2_METADATA_SERVICE_ENDPOINT',
None, None),
'imds_use_ipv6': (
'imds_use_ipv6',
'AWS_IMDS_USE_IPV6',
False, None),
'parameter_validation': ('parameter_validation', None, True, None),
# Client side monitoring configurations.
# Note: These configurations are considered internal to botocore.
# Do not use them until publicly documented.
'csm_enabled': (
'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean),
'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None),
'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int),
'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None),
# Endpoint discovery configuration
'endpoint_discovery_enabled': (
'endpoint_discovery_enabled', 'AWS_ENDPOINT_DISCOVERY_ENABLED',
'auto', None),
'sts_regional_endpoints': (
'sts_regional_endpoints', 'AWS_STS_REGIONAL_ENDPOINTS', 'legacy',
None
),
'retry_mode': ('retry_mode', 'AWS_RETRY_MODE', 'legacy', None),
# We can't have a default here for v1 because we need to defer to
# whatever the defaults are in _retry.json.
'max_attempts': ('max_attempts', 'AWS_MAX_ATTEMPTS', None, int),
}
# A mapping for the s3 specific configuration vars. These are the configuration
# vars that typically go in the s3 section of the config file. This mapping
# follows the same schema as the previous session variable mapping.
DEFAULT_S3_CONFIG_VARS = {
'addressing_style': (
('s3', 'addressing_style'), None, None, None),
'use_accelerate_endpoint': (
('s3', 'use_accelerate_endpoint'), None, None, utils.ensure_boolean
),
'use_dualstack_endpoint': (
('s3', 'use_dualstack_endpoint'), None, None, utils.ensure_boolean
),
'payload_signing_enabled': (
('s3', 'payload_signing_enabled'), None, None, utils.ensure_boolean
),
'use_arn_region': (
['s3_use_arn_region',
('s3', 'use_arn_region')],
'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean
),
'us_east_1_regional_endpoint': (
['s3_us_east_1_regional_endpoint',
('s3', 'us_east_1_regional_endpoint')],
'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', None, None
)
}
# A mapping for the proxy specific configuration vars. These are
# used to configure how botocore interacts with proxy setups while
# sending requests.
DEFAULT_PROXIES_CONFIG_VARS = {
'proxy_ca_bundle': ('proxy_ca_bundle', None, None, None),
'proxy_client_cert': ('proxy_client_cert', None, None, None),
'proxy_use_forwarding_for_https': (
'proxy_use_forwarding_for_https', None, None, utils.normalize_boolean),
}
def create_botocore_default_config_mapping(session):
chain_builder = ConfigChainFactory(session=session)
config_mapping = _create_config_chain_mapping(
chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES)
config_mapping['s3'] = SectionConfigProvider(
's3', session, _create_config_chain_mapping(
chain_builder, DEFAULT_S3_CONFIG_VARS)
)
config_mapping['proxies_config'] = SectionConfigProvider(
'proxies_config', session, _create_config_chain_mapping(
chain_builder, DEFAULT_PROXIES_CONFIG_VARS)
)
return config_mapping
def _create_config_chain_mapping(chain_builder, config_variables):
mapping = {}
for logical_name, config in config_variables.items():
mapping[logical_name] = chain_builder.create_config_chain(
instance_name=logical_name,
env_var_names=config[1],
config_property_names=config[0],
default=config[2],
conversion_func=config[3]
)
return mapping
class ConfigChainFactory(object):
"""Factory class to create our most common configuration chain case.
This is a convenience class to construct configuration chains that follow
our most common pattern. This is to prevent ordering them incorrectly,
and to make the config chain construction more readable.
"""
def __init__(self, session, environ=None):
"""Initialize a ConfigChainFactory.
:type session: :class:`botocore.session.Session`
:param session: This is the session that should be used to look up
values from the config file.
:type environ: dict
:param environ: A mapping to use for environment variables. If this
is not provided it will default to use os.environ.
"""
self._session = session
if environ is None:
environ = os.environ
self._environ = environ
def create_config_chain(self, instance_name=None, env_var_names=None,
config_property_names=None, default=None,
conversion_func=None):
"""Build a config chain following the standard botocore pattern.
In botocore most of our config chains follow the the precendence:
session_instance_variables, environment, config_file, default_value.
This is a convenience function for creating a chain that follow
that precendence.
:type instance_name: str
:param instance_name: This indicates what session instance variable
corresponds to this config value. If it is None it will not be
added to the chain.
:type env_var_names: str or list of str or None
:param env_var_names: One or more environment variable names to
search for this value. They are searched in order. If it is None
it will not be added to the chain.
:type config_property_names: str/tuple or list of str/tuple or None
:param config_property_names: One of more strings or tuples
representing the name of the key in the config file for this
config option. They are searched in order. If it is None it will
not be added to the chain.
:type default: Any
:param default: Any constant value to be returned.
:type conversion_func: None or callable
:param conversion_func: If this value is None then it has no effect on
the return type. Otherwise, it is treated as a function that will
conversion_func our provided type.
:rvalue: ConfigChain
:returns: A ConfigChain that resolves in the order env_var_names ->
config_property_name -> default. Any values that were none are
omitted form the chain.
"""
providers = []
if instance_name is not None:
providers.append(
InstanceVarProvider(
instance_var=instance_name,
session=self._session
)
)
if env_var_names is not None:
providers.extend(self._get_env_providers(env_var_names))
if config_property_names is not None:
providers.extend(
self._get_scoped_config_providers(config_property_names)
)
if default is not None:
providers.append(ConstantProvider(value=default))
return ChainProvider(
providers=providers,
conversion_func=conversion_func,
)
def _get_env_providers(self, env_var_names):
env_var_providers = []
if not isinstance(env_var_names, list):
env_var_names = [env_var_names]
for env_var_name in env_var_names:
env_var_providers.append(
EnvironmentProvider(name=env_var_name, env=self._environ)
)
return env_var_providers
def _get_scoped_config_providers(self, config_property_names):
scoped_config_providers = []
if not isinstance(config_property_names, list):
config_property_names = [config_property_names]
for config_property_name in config_property_names:
scoped_config_providers.append(
ScopedConfigProvider(
config_var_name=config_property_name,
session=self._session,
)
)
return scoped_config_providers
class ConfigValueStore(object):
"""The ConfigValueStore object stores configuration values."""
def __init__(self, mapping=None):
"""Initialize a ConfigValueStore.
:type mapping: dict
:param mapping: The mapping parameter is a map of string to a subclass
of BaseProvider. When a config variable is asked for via the
get_config_variable method, the corresponding provider will be
invoked to load the value.
"""
self._overrides = {}
self._mapping = {}
if mapping is not None:
for logical_name, provider in mapping.items():
self.set_config_provider(logical_name, provider)
def get_config_variable(self, logical_name):
"""
Retrieve the value associeated with the specified logical_name
from the corresponding provider. If no value is found None will
be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:returns: value of variable or None if not defined.
"""
if logical_name in self._overrides:
return self._overrides[logical_name]
if logical_name not in self._mapping:
return None
provider = self._mapping[logical_name]
return provider.provide()
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
self._overrides[logical_name] = value
def clear_config_variable(self, logical_name):
"""Remove an override config variable from the session.
:type logical_name: str
:param logical_name: The name of the parameter to clear the override
value from.
"""
self._overrides.pop(logical_name, None)
def set_config_provider(self, logical_name, provider):
"""Set the provider for a config value.
This provides control over how a particular configuration value is
loaded. This replaces the provider for ``logical_name`` with the new
``provider``.
:type logical_name: str
:param logical_name: The name of the config value to change the config
provider for.
:type provider: :class:`botocore.configprovider.BaseProvider`
:param provider: The new provider that should be responsible for
providing a value for the config named ``logical_name``.
"""
self._mapping[logical_name] = provider
class BaseProvider(object):
"""Base class for configuration value providers.
A configuration provider has some method of providing a configuration
value.
"""
def provide(self):
"""Provide a config value."""
raise NotImplementedError('provide')
class ChainProvider(BaseProvider):
"""This provider wraps one or more other providers.
Each provider in the chain is called, the first one returning a non-None
value is then returned.
"""
def __init__(self, providers=None, conversion_func=None):
"""Initalize a ChainProvider.
:type providers: list
:param providers: The initial list of providers to check for values
when invoked.
:type conversion_func: None or callable
:param conversion_func: If this value is None then it has no affect on
the return type. Otherwise, it is treated as a function that will
transform provided value.
"""
if providers is None:
providers = []
self._providers = providers
self._conversion_func = conversion_func
def provide(self):
"""Provide the value from the first provider to return non-None.
Each provider in the chain has its provide method called. The first
one in the chain to return a non-None value is the returned from the
ChainProvider. When no non-None value is found, None is returned.
"""
for provider in self._providers:
value = provider.provide()
if value is not None:
return self._convert_type(value)
return None
def _convert_type(self, value):
if self._conversion_func is not None:
return self._conversion_func(value)
return value
def __repr__(self):
return '[%s]' % ', '.join([str(p) for p in self._providers])
class InstanceVarProvider(BaseProvider):
"""This class loads config values from the session instance vars."""
def __init__(self, instance_var, session):
"""Initialize InstanceVarProvider.
:type instance_var: str
:param instance_var: The instance variable to load from the session.
:type session: :class:`botocore.session.Session`
:param session: The botocore session to get the loaded configuration
file variables from.
"""
self._instance_var = instance_var
self._session = session
def provide(self):
"""Provide a config value from the session instance vars."""
instance_vars = self._session.instance_variables()
value = instance_vars.get(self._instance_var)
return value
def __repr__(self):
return 'InstanceVarProvider(instance_var=%s, session=%s)' % (
self._instance_var,
self._session,
)
class ScopedConfigProvider(BaseProvider):
def __init__(self, config_var_name, session):
"""Initialize ScopedConfigProvider.
:type config_var_name: str or tuple
:param config_var_name: The name of the config variable to load from
the configuration file. If the value is a tuple, it must only
consist of two items, where the first item represents the section
and the second item represents the config var name in the section.
:type session: :class:`botocore.session.Session`
:param session: The botocore session to get the loaded configuration
file variables from.
"""
self._config_var_name = config_var_name
self._session = session
def provide(self):
"""Provide a value from a config file property."""
scoped_config = self._session.get_scoped_config()
if isinstance(self._config_var_name, tuple):
section_config = scoped_config.get(self._config_var_name[0])
if not isinstance(section_config, dict):
return None
return section_config.get(self._config_var_name[1])
return scoped_config.get(self._config_var_name)
def __repr__(self):
return 'ScopedConfigProvider(config_var_name=%s, session=%s)' % (
self._config_var_name,
self._session,
)
class EnvironmentProvider(BaseProvider):
"""This class loads config values from environment variables."""
def __init__(self, name, env):
"""Initialize with the keys in the dictionary to check.
:type name: str
:param name: The key with that name will be loaded and returned.
:type env: dict
:param env: Environment variables dictionary to get variables from.
"""
self._name = name
self._env = env
def provide(self):
"""Provide a config value from a source dictionary."""
if self._name in self._env:
return self._env[self._name]
return None
def __repr__(self):
return 'EnvironmentProvider(name=%s, env=%s)' % (self._name, self._env)
class SectionConfigProvider(BaseProvider):
"""Provides a dictionary from a section in the scoped config
This is useful for retrieving scoped config variables (i.e. s3) that have
their own set of config variables and resolving logic.
"""
def __init__(self, section_name, session, override_providers=None):
self._section_name = section_name
self._session = session
self._scoped_config_provider = ScopedConfigProvider(
self._section_name, self._session)
self._override_providers = override_providers
if self._override_providers is None:
self._override_providers = {}
def provide(self):
section_config = self._scoped_config_provider.provide()
if section_config and not isinstance(section_config, dict):
logger.debug("The %s config key is not a dictionary type, "
"ignoring its value of: %s", self._section_name,
section_config)
return None
for section_config_var, provider in self._override_providers.items():
provider_val = provider.provide()
if provider_val is not None:
if section_config is None:
section_config = {}
section_config[section_config_var] = provider_val
return section_config
def __repr__(self):
return (
'SectionConfigProvider(section_name=%s, '
'session=%s, override_providers=%s)' % (
self._section_name, self._session,
self._override_providers,
)
)
class ConstantProvider(BaseProvider):
"""This provider provides a constant value."""
def __init__(self, value):
self._value = value
def provide(self):
"""Provide the constant value given during initialization."""
return self._value
def __repr__(self):
return 'ConstantProvider(value=%s)' % self._value
| 22,235 | Python | 38.636364 | 79 | 0.636609 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/endpoint.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
# Do a second parsing pass to pick up on any modeled error fields
# NOTE: Ideally, we would push this down into the parser classes but
# they currently have no reference to the operation or service model
# The parsers should probably take the operation model instead of
# output shape but we can't change that now
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _add_modeled_error_fields(
self, response_dict, parsed_response,
operation_model, parser,
):
error_code = parsed_response.get("Error", {}).get("Code")
if error_code is None:
return
service_model = operation_model.service_model
error_shape = service_model.shape_for_error_code(error_code)
if error_shape is None:
return
modeled_parse = parser.parse(response_dict, error_shape)
# TODO: avoid naming conflicts with ResponseMetadata and Error
parsed_response.update(modeled_parse)
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None,
proxies_config=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
proxies_config=proxies_config
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| 13,811 | Python | 41.109756 | 81 | 0.627543 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/validate.py | """User input parameter validation.
This module handles user input parameter validation
against a provided input model.
Note that the objects in this module do *not* mutate any
arguments. No type version happens here. It is up to another
layer to properly convert arguments to any required types.
Validation Errors
-----------------
"""
from botocore.compat import six
import decimal
import json
from datetime import datetime
from botocore.utils import parse_to_aware_datetime
from botocore.utils import is_json_value_header
from botocore.exceptions import ParamValidationError
def validate_parameters(params, shape):
"""Validates input parameters against a schema.
This is a convenience function that validates parameters against a schema.
You can also instantiate and use the ParamValidator class directly if you
want more control.
If there are any validation errors then a ParamValidationError
will be raised. If there are no validation errors than no exception
is raised and a value of None is returned.
:param params: The user provided input parameters.
:type shape: botocore.model.Shape
:param shape: The schema which the input parameters should
adhere to.
:raise: ParamValidationError
"""
validator = ParamValidator()
report = validator.validate(params, shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
def type_check(valid_types):
def _create_type_check_guard(func):
def _on_passes_type_check(self, param, shape, errors, name):
if _type_check(param, errors, name):
return func(self, param, shape, errors, name)
def _type_check(param, errors, name):
if not isinstance(param, valid_types):
valid_type_names = [six.text_type(t) for t in valid_types]
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
return False
return True
return _on_passes_type_check
return _create_type_check_guard
def range_check(name, value, shape, error_type, errors):
failed = False
min_allowed = float('-inf')
if 'min' in shape.metadata:
min_allowed = shape.metadata['min']
if value < min_allowed:
failed = True
elif hasattr(shape, 'serialization'):
# Members that can be bound to the host have an implicit min of 1
if shape.serialization.get('hostLabel'):
min_allowed = 1
if value < min_allowed:
failed = True
if failed:
errors.report(name, error_type, param=value, min_allowed=min_allowed)
class ValidationErrors(object):
def __init__(self):
self._errors = []
def has_errors(self):
if self._errors:
return True
return False
def generate_report(self):
error_messages = []
for error in self._errors:
error_messages.append(self._format_error(error))
return '\n'.join(error_messages)
def _format_error(self, error):
error_type, name, additional = error
name = self._get_name(name)
if error_type == 'missing required field':
return 'Missing required parameter in %s: "%s"' % (
name, additional['required_name'])
elif error_type == 'unknown field':
return 'Unknown parameter in %s: "%s", must be one of: %s' % (
name, additional['unknown_param'],
', '.join(additional['valid_names']))
elif error_type == 'invalid type':
return 'Invalid type for parameter %s, value: %s, type: %s, ' \
'valid types: %s' % (name, additional['param'],
str(type(additional['param'])),
', '.join(additional['valid_types']))
elif error_type == 'invalid range':
min_allowed = additional['min_allowed']
return ('Invalid value for parameter %s, value: %s, '
'valid min value: %s' % (name, additional['param'],
min_allowed))
elif error_type == 'invalid length':
min_allowed = additional['min_allowed']
return ('Invalid length for parameter %s, value: %s, '
'valid min length: %s' % (name, additional['param'],
min_allowed))
elif error_type == 'unable to encode to json':
return 'Invalid parameter %s must be json serializable: %s' \
% (name, additional['type_error'])
def _get_name(self, name):
if not name:
return 'input'
elif name.startswith('.'):
return name[1:]
else:
return name
def report(self, name, reason, **kwargs):
self._errors.append((reason, name, kwargs))
class ParamValidator(object):
"""Validates parameters against a shape model."""
def validate(self, params, shape):
"""Validate parameters against a shape model.
This method will validate the parameters against a provided shape model.
All errors will be collected before returning to the caller. This means
that this method will not stop at the first error, it will return all
possible errors.
:param params: User provided dict of parameters
:param shape: A shape model describing the expected input.
:return: A list of errors.
"""
errors = ValidationErrors()
self._validate(params, shape, errors, name='')
return errors
def _check_special_validation_cases(self, shape):
if is_json_value_header(shape):
return self._validate_jsonvalue_string
def _validate(self, params, shape, errors, name):
special_validator = self._check_special_validation_cases(shape)
if special_validator:
special_validator(params, shape, errors, name)
else:
getattr(self, '_validate_%s' % shape.type_name)(
params, shape, errors, name)
def _validate_jsonvalue_string(self, params, shape, errors, name):
# Check to see if a value marked as a jsonvalue can be dumped to
# a json string.
try:
json.dumps(params)
except (ValueError, TypeError) as e:
errors.report(name, 'unable to encode to json', type_error=e)
@type_check(valid_types=(dict,))
def _validate_structure(self, params, shape, errors, name):
# Validate required fields.
for required_member in shape.metadata.get('required', []):
if required_member not in params:
errors.report(name, 'missing required field',
required_name=required_member, user_params=params)
members = shape.members
known_params = []
# Validate known params.
for param in params:
if param not in members:
errors.report(name, 'unknown field', unknown_param=param,
valid_names=list(members))
else:
known_params.append(param)
# Validate structure members.
for param in known_params:
self._validate(params[param], shape.members[param],
errors, '%s.%s' % (name, param))
@type_check(valid_types=six.string_types)
def _validate_string(self, param, shape, errors, name):
# Validate range. For a string, the min/max contraints
# are of the string length.
# Looks like:
# "WorkflowId":{
# "type":"string",
# "min":1,
# "max":256
# }
range_check(name, len(param), shape, 'invalid length', errors)
@type_check(valid_types=(list, tuple))
def _validate_list(self, param, shape, errors, name):
member_shape = shape.member
range_check(name, len(param), shape, 'invalid length', errors)
for i, item in enumerate(param):
self._validate(item, member_shape, errors, '%s[%s]' % (name, i))
@type_check(valid_types=(dict,))
def _validate_map(self, param, shape, errors, name):
key_shape = shape.key
value_shape = shape.value
for key, value in param.items():
self._validate(key, key_shape, errors, "%s (key: %s)"
% (name, key))
self._validate(value, value_shape, errors, '%s.%s' % (name, key))
@type_check(valid_types=six.integer_types)
def _validate_integer(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_blob(self, param, shape, errors, name):
if isinstance(param, (bytes, bytearray, six.text_type)):
return
elif hasattr(param, 'read'):
# File like objects are also allowed for blob types.
return
else:
errors.report(name, 'invalid type', param=param,
valid_types=[str(bytes), str(bytearray),
'file-like object'])
@type_check(valid_types=(bool,))
def _validate_boolean(self, param, shape, errors, name):
pass
@type_check(valid_types=(float, decimal.Decimal) + six.integer_types)
def _validate_double(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
_validate_float = _validate_double
@type_check(valid_types=six.integer_types)
def _validate_long(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_timestamp(self, param, shape, errors, name):
# We don't use @type_check because datetimes are a bit
# more flexible. You can either provide a datetime
# object, or a string that parses to a datetime.
is_valid_type = self._type_check_datetime(param)
if not is_valid_type:
valid_type_names = [six.text_type(datetime), 'timestamp-string']
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
def _type_check_datetime(self, value):
try:
parse_to_aware_datetime(value)
return True
except (TypeError, ValueError, AttributeError):
# Yes, dateutil can sometimes raise an AttributeError
# when parsing timestamps.
return False
class ParamValidationDecorator(object):
def __init__(self, param_validator, serializer):
self._param_validator = param_validator
self._serializer = serializer
def serialize_to_request(self, parameters, operation_model):
input_shape = operation_model.input_shape
if input_shape is not None:
report = self._param_validator.validate(parameters,
operation_model.input_shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
return self._serializer.serialize_to_request(parameters,
operation_model)
| 11,323 | Python | 37.256757 | 80 | 0.59463 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/utils.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import random
import os
import socket
import cgi
import warnings
import dateutil.parser
from dateutil.tz import tzutc
import botocore
import botocore.awsrequest
import botocore.httpsession
from botocore.compat import (
json, quote, zip_longest, urlsplit, urlunsplit, OrderedDict,
six, urlparse, get_tzinfo_options, get_md5, MD5_AVAILABLE
)
from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass
from botocore.exceptions import (
InvalidExpressionError, ConfigNotFound, InvalidDNSNameError, ClientError,
MetadataRetrievalError, EndpointConnectionError, ReadTimeoutError,
ConnectionClosedError, ConnectTimeoutError, UnsupportedS3ArnError,
UnsupportedS3AccesspointConfigurationError, SSOTokenLoadError,
InvalidRegionError, InvalidIMDSEndpointError, UnsupportedOutpostResourceError,
UnsupportedS3ControlConfigurationError, UnsupportedS3ControlArnError,
InvalidHostLabelError, HTTPClientError, UnsupportedS3ConfigurationError,
)
from urllib3.exceptions import LocationParseError
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_BASE_URL = 'http://169.254.169.254/'
METADATA_BASE_URL_IPv6 = 'http://[fe80:ec2::254%eth0]/'
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RETRYABLE_HTTP_ERRORS = (
ReadTimeoutError, EndpointConnectionError, ConnectionClosedError,
ConnectTimeoutError,
)
S3_ACCELERATE_WHITELIST = ['dualstack']
# In switching events from using service name / endpoint prefix to service
# id, we have to preserve compatibility. This maps the instances where either
# is different than the transformed service id.
EVENT_ALIASES = {
"a4b": "alexa-for-business",
"alexaforbusiness": "alexa-for-business",
"api.mediatailor": "mediatailor",
"api.pricing": "pricing",
"api.sagemaker": "sagemaker",
"apigateway": "api-gateway",
"application-autoscaling": "application-auto-scaling",
"appstream2": "appstream",
"autoscaling": "auto-scaling",
"autoscaling-plans": "auto-scaling-plans",
"ce": "cost-explorer",
"cloudhsmv2": "cloudhsm-v2",
"cloudsearchdomain": "cloudsearch-domain",
"cognito-idp": "cognito-identity-provider",
"config": "config-service",
"cur": "cost-and-usage-report-service",
"data.iot": "iot-data-plane",
"data.jobs.iot": "iot-jobs-data-plane",
"data.mediastore": "mediastore-data",
"datapipeline": "data-pipeline",
"devicefarm": "device-farm",
"devices.iot1click": "iot-1click-devices-service",
"directconnect": "direct-connect",
"discovery": "application-discovery-service",
"dms": "database-migration-service",
"ds": "directory-service",
"dynamodbstreams": "dynamodb-streams",
"elasticbeanstalk": "elastic-beanstalk",
"elasticfilesystem": "efs",
"elasticloadbalancing": "elastic-load-balancing",
"elasticmapreduce": "emr",
"elastictranscoder": "elastic-transcoder",
"elb": "elastic-load-balancing",
"elbv2": "elastic-load-balancing-v2",
"email": "ses",
"entitlement.marketplace": "marketplace-entitlement-service",
"es": "elasticsearch-service",
"events": "eventbridge",
"cloudwatch-events": "eventbridge",
"iot-data": "iot-data-plane",
"iot-jobs-data": "iot-jobs-data-plane",
"iot1click-devices": "iot-1click-devices-service",
"iot1click-projects": "iot-1click-projects",
"kinesisanalytics": "kinesis-analytics",
"kinesisvideo": "kinesis-video",
"lex-models": "lex-model-building-service",
"lex-runtime": "lex-runtime-service",
"logs": "cloudwatch-logs",
"machinelearning": "machine-learning",
"marketplace-entitlement": "marketplace-entitlement-service",
"marketplacecommerceanalytics": "marketplace-commerce-analytics",
"metering.marketplace": "marketplace-metering",
"meteringmarketplace": "marketplace-metering",
"mgh": "migration-hub",
"models.lex": "lex-model-building-service",
"monitoring": "cloudwatch",
"mturk-requester": "mturk",
"opsworks-cm": "opsworkscm",
"projects.iot1click": "iot-1click-projects",
"resourcegroupstaggingapi": "resource-groups-tagging-api",
"route53": "route-53",
"route53domains": "route-53-domains",
"runtime.lex": "lex-runtime-service",
"runtime.sagemaker": "sagemaker-runtime",
"sdb": "simpledb",
"secretsmanager": "secrets-manager",
"serverlessrepo": "serverlessapplicationrepository",
"servicecatalog": "service-catalog",
"states": "sfn",
"stepfunctions": "sfn",
"storagegateway": "storage-gateway",
"streams.dynamodb": "dynamodb-streams",
"tagging": "resource-groups-tagging-api"
}
# Vendoring IPv6 validation regex patterns from urllib3
# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
def ensure_boolean(val):
"""Ensures a boolean value if a string or boolean is provided
For strings, the value for True/False is case insensitive
"""
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def normalize_boolean(val):
"""Returns None if val is None, otherwise ensure value
converted to boolean"""
if val is None:
return val
else:
return ensure_boolean(val)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
class BadIMDSRequestError(Exception):
def __init__(self, request):
self.request = request
class IMDSFetcher(object):
_RETRIES_EXCEEDED_ERROR_CLS = _RetriesExceededError
_TOKEN_PATH = 'latest/api/token'
_TOKEN_TTL = '21600'
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, base_url=METADATA_BASE_URL,
env=None, user_agent=None, config=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._base_url = self._select_base_url(base_url, config)
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
self._user_agent = user_agent
self._session = botocore.httpsession.URLLib3Session(
timeout=self._timeout,
proxies=get_environ_proxies(self._base_url),
)
def get_base_url(self):
return self._base_url
def _select_base_url(self, base_url, config):
if config is None:
config = {}
requires_ipv6 = ensure_boolean(config.get('imds_use_ipv6', False))
custom_metadata_endpoint = config.get('ec2_metadata_service_endpoint')
if requires_ipv6 and custom_metadata_endpoint:
logger.warn("Custom endpoint and IMDS_USE_IPV6 are both set. Using custom endpoint.")
chosen_base_url = None
if base_url != METADATA_BASE_URL:
chosen_base_url = base_url
elif custom_metadata_endpoint:
chosen_base_url = custom_metadata_endpoint
elif requires_ipv6:
chosen_base_url = METADATA_BASE_URL_IPv6
else:
chosen_base_url = METADATA_BASE_URL
logger.debug("IMDS ENDPOINT: %s" % chosen_base_url)
if not is_valid_uri(chosen_base_url):
raise InvalidIMDSEndpointError(endpoint=chosen_base_url)
return chosen_base_url
def _fetch_metadata_token(self):
self._assert_enabled()
url = self._base_url + self._TOKEN_PATH
headers = {
'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL,
}
self._add_user_agent(headers)
request = botocore.awsrequest.AWSRequest(
method='PUT', url=url, headers=headers)
for i in range(self._num_attempts):
try:
response = self._session.send(request.prepare())
if response.status_code == 200:
return response.text
elif response.status_code in (404, 403, 405):
return None
elif response.status_code in (400,):
raise BadIMDSRequestError(request)
except ReadTimeoutError:
return None
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
except HTTPClientError as e:
if isinstance(e.kwargs.get('error'), LocationParseError):
raise InvalidIMDSEndpointError(endpoint=url, error=e)
else:
raise
return None
def _get_request(self, url_path, retry_func, token=None):
"""Make a get request to the Instance Metadata Service.
:type url_path: str
:param url_path: The path component of the URL to make a get request.
This arg is appended to the base_url that was provided in the
initializer.
:type retry_func: callable
:param retry_func: A function that takes the response as an argument
and determines if it needs to retry. By default empty and non
200 OK responses are retried.
:type token: str
:param token: Metadata token to send along with GET requests to IMDS.
"""
self._assert_enabled()
if retry_func is None:
retry_func = self._default_retry
url = self._base_url + url_path
headers = {}
if token is not None:
headers['x-aws-ec2-metadata-token'] = token
self._add_user_agent(headers)
for i in range(self._num_attempts):
try:
request = botocore.awsrequest.AWSRequest(
method='GET', url=url, headers=headers)
response = self._session.send(request.prepare())
if not retry_func(response):
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _add_user_agent(self, headers):
if self._user_agent is not None:
headers['User-Agent'] = self._user_agent
def _assert_enabled(self):
if self._disabled:
logger.debug("Access to EC2 metadata has been disabled.")
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _default_retry(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _is_non_ok_response(self, response):
if response.status_code != 200:
self._log_imds_response(response, 'non-200', log_body=True)
return True
return False
def _is_empty(self, response):
if not response.content:
self._log_imds_response(response, 'no body', log_body=True)
return True
return False
def _log_imds_response(self, response, reason_to_log, log_body=False):
statement = (
"Metadata service returned %s response "
"with status code of %s for url: %s"
)
logger_args = [
reason_to_log, response.status_code, response.url
]
if log_body:
statement += ", content body: %s"
logger_args.append(response.content)
logger.debug(statement, *logger_args)
class InstanceMetadataFetcher(IMDSFetcher):
_URL_PATH = 'latest/meta-data/iam/security-credentials/'
_REQUIRED_CREDENTIAL_FIELDS = [
'AccessKeyId', 'SecretAccessKey', 'Token', 'Expiration'
]
def retrieve_iam_role_credentials(self):
try:
token = self._fetch_metadata_token()
role_name = self._get_iam_role(token)
credentials = self._get_credentials(role_name, token)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
# IMDS can return a 200 response that has a JSON formatted
# error message (i.e. if ec2 is not trusted entity for the
# attached role). We do not necessarily want to retry for
# these and we also do not necessarily want to raise a key
# error. So at least log the problematic response and return
# an empty dictionary to signal that it was not able to
# retrieve credentials. These error will contain both a
# Code and Message key.
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
except BadIMDSRequestError as e:
logger.debug("Bad IMDS request: %s", e.request)
return {}
def _get_iam_role(self, token=None):
return self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name,
token=token,
).text
def _get_credentials(self, role_name, token=None):
r = self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials,
token=token,
)
return json.loads(r.text)
def _is_invalid_json(self, response):
try:
json.loads(response.text)
return False
except ValueError:
self._log_imds_response(response, 'invalid json')
return True
def _needs_retry_for_role_name(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _needs_retry_for_credentials(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response) or
self._is_invalid_json(response)
)
def _contains_all_credential_fields(self, credentials):
for field in self._REQUIRED_CREDENTIAL_FIELDS:
if field not in credentials:
logger.debug(
'Retrieved credentials is missing required field: %s',
field)
return False
return True
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def lowercase_dict(original):
"""Copies the given dictionary ensuring all keys are lowercase strings. """
copy = {}
for key in original:
copy[key.lower()] = original[key]
return copy
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def _parse_timestamp_with_tzinfo(value, tzinfo):
"""Parse timestamp with pluggable tzinfo options."""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzinfo())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzinfo())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
for tzinfo in get_tzinfo_options():
try:
return _parse_timestamp_with_tzinfo(value, tzinfo)
except OSError as e:
logger.debug('Unable to parse timestamp with "%s" timezone info.',
tzinfo.__name__, exc_info=e)
raise RuntimeError('Unable to calculate correct timezone offset for '
'"%s"' % value)
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
if shape.enum:
return random.choice(shape.enum)
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name in ['float', 'double']:
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_ipv6_endpoint_url(endpoint_url):
netloc = urlparse(endpoint_url).netloc
return IPV6_ADDRZ_RE.match(netloc) is not None
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def is_valid_uri(endpoint_url):
return is_valid_endpoint_url(endpoint_url) or is_valid_ipv6_endpoint_url(endpoint_url)
def validate_region_name(region_name):
"""Provided region_name must be a valid host label."""
if region_name is None:
return
valid_host_label = re.compile(r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{,63}(?<!-)$')
valid = valid_host_label.match(region_name)
if not valid:
raise InvalidRegionError(region_name=region_name)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url=None, **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing.
"""
if request.context.get('use_global_endpoint', False):
default_endpoint_url = 's3.amazonaws.com'
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
def hyphenize_service_id(service_id):
"""Translate the form used for event emitters.
:param service_id: The service_id to convert.
"""
return service_id.replace(' ', '-').lower()
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if self._is_s3_accesspoint(request_dict.get('context', {})):
logger.debug(
'S3 request was previously to an accesspoint, not redirecting.'
)
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
if self._is_s3_accesspoint(context):
return
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
def _is_s3_accesspoint(self, context):
return 's3_accesspoint' in context
class InvalidArnException(ValueError):
pass
class ArnParser(object):
def parse_arn(self, arn):
arn_parts = arn.split(':', 5)
if len(arn_parts) < 6:
raise InvalidArnException(
'Provided ARN: %s must be of the format: '
'arn:partition:service:region:account:resource' % arn
)
return {
'partition': arn_parts[1],
'service': arn_parts[2],
'region': arn_parts[3],
'account': arn_parts[4],
'resource': arn_parts[5],
}
class S3ArnParamHandler(object):
_RESOURCE_REGEX = re.compile(
r'^(?P<resource_type>accesspoint|outpost)[/:](?P<resource_name>.+)$'
)
_OUTPOST_RESOURCE_REGEX = re.compile(
r'^(?P<outpost_name>[a-zA-Z0-9\-]{1,63})[/:]accesspoint[/:]'
r'(?P<accesspoint_name>[a-zA-Z0-9\-]{1,63}$)'
)
_BLACKLISTED_OPERATIONS = [
'CreateBucket'
]
def __init__(self, arn_parser=None):
self._arn_parser = arn_parser
if arn_parser is None:
self._arn_parser = ArnParser()
def register(self, event_emitter):
event_emitter.register('before-parameter-build.s3', self.handle_arn)
def handle_arn(self, params, model, context, **kwargs):
if model.name in self._BLACKLISTED_OPERATIONS:
return
arn_details = self._get_arn_details_from_bucket_param(params)
if arn_details is None:
return
if arn_details['resource_type'] == 'accesspoint':
self._store_accesspoint(params, context, arn_details)
elif arn_details['resource_type'] == 'outpost':
self._store_outpost(params, context, arn_details)
def _get_arn_details_from_bucket_param(self, params):
if 'Bucket' in params:
try:
arn = params['Bucket']
arn_details = self._arn_parser.parse_arn(arn)
self._add_resource_type_and_name(arn, arn_details)
return arn_details
except InvalidArnException:
pass
return None
def _add_resource_type_and_name(self, arn, arn_details):
match = self._RESOURCE_REGEX.match(arn_details['resource'])
if match:
arn_details['resource_type'] = match.group('resource_type')
arn_details['resource_name'] = match.group('resource_name')
else:
raise UnsupportedS3ArnError(arn=arn)
def _store_accesspoint(self, params, context, arn_details):
# Ideally the access-point would be stored as a parameter in the
# request where the serializer would then know how to serialize it,
# but access-points are not modeled in S3 operations so it would fail
# validation. Instead, we set the access-point to the bucket parameter
# to have some value set when serializing the request and additional
# information on the context from the arn to use in forming the
# access-point endpoint.
params['Bucket'] = arn_details['resource_name']
context['s3_accesspoint'] = {
'name': arn_details['resource_name'],
'account': arn_details['account'],
'partition': arn_details['partition'],
'region': arn_details['region'],
'service': arn_details['service'],
}
def _store_outpost(self, params, context, arn_details):
resource_name = arn_details['resource_name']
match = self._OUTPOST_RESOURCE_REGEX.match(resource_name)
if not match:
raise UnsupportedOutpostResourceError(resource_name=resource_name)
# Because we need to set the bucket name to something to pass
# validation we're going to use the access point name to be consistent
# with normal access point arns.
accesspoint_name = match.group('accesspoint_name')
params['Bucket'] = accesspoint_name
context['s3_accesspoint'] = {
'outpost_name': match.group('outpost_name'),
'name': accesspoint_name,
'account': arn_details['account'],
'partition': arn_details['partition'],
'region': arn_details['region'],
'service': arn_details['service'],
}
class S3EndpointSetter(object):
_DEFAULT_PARTITION = 'aws'
_DEFAULT_DNS_SUFFIX = 'amazonaws.com'
def __init__(self, endpoint_resolver, region=None,
s3_config=None, endpoint_url=None, partition=None):
self._endpoint_resolver = endpoint_resolver
self._region = region
self._s3_config = s3_config
if s3_config is None:
self._s3_config = {}
self._endpoint_url = endpoint_url
self._partition = partition
if partition is None:
self._partition = self._DEFAULT_PARTITION
def register(self, event_emitter):
event_emitter.register('before-sign.s3', self.set_endpoint)
event_emitter.register(
'before-call.s3.WriteGetObjectResponse',
self.update_endpoint_to_s3_object_lambda
)
def update_endpoint_to_s3_object_lambda(self, params, context, **kwargs):
if self._use_accelerate_endpoint:
raise UnsupportedS3ConfigurationError(
msg='S3 client does not support accelerate endpoints for S3 Object Lambda operations',
)
self._override_signing_name(context, 's3-object-lambda')
if self._endpoint_url:
# Only update the url if an explicit url was not provided
return
resolver = self._endpoint_resolver
resolved = resolver.construct_endpoint('s3-object-lambda', self._region)
# Ideally we would be able to replace the endpoint before
# serialization but there's no event to do that currently
new_endpoint = 'https://{host_prefix}{hostname}'.format(
host_prefix=params['host_prefix'],
hostname=resolved['hostname'],
)
params['url'] = _get_new_endpoint(params['url'], new_endpoint, False)
def set_endpoint(self, request, **kwargs):
if self._use_accesspoint_endpoint(request):
self._validate_accesspoint_supported(request)
region_name = self._resolve_region_for_accesspoint_endpoint(
request)
self._resolve_signing_name_for_accesspoint_endpoint(
request)
self._switch_to_accesspoint_endpoint(request, region_name)
return
if self._use_accelerate_endpoint:
switch_host_s3_accelerate(request=request, **kwargs)
if self._s3_addressing_handler:
self._s3_addressing_handler(request=request, **kwargs)
def _use_accesspoint_endpoint(self, request):
return 's3_accesspoint' in request.context
def _validate_accesspoint_supported(self, request):
if self._use_accelerate_endpoint:
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 accelerate configuration '
'when an access-point ARN is specified.'
)
)
request_partition = request.context['s3_accesspoint']['partition']
if request_partition != self._partition:
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client is configured for "%s" partition, but access-point'
' ARN provided is for "%s" partition. The client and '
' access-point partition must be the same.' % (
self._partition, request_partition)
)
)
s3_service = request.context['s3_accesspoint'].get('service')
if s3_service == 's3-object-lambda' and self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an S3 Object Lambda access point ARN is specified.'
)
)
outpost_name = request.context['s3_accesspoint'].get('outpost_name')
if outpost_name and self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an outpost ARN is specified.'
)
)
def _resolve_region_for_accesspoint_endpoint(self, request):
if self._s3_config.get('use_arn_region', True):
accesspoint_region = request.context['s3_accesspoint']['region']
# If we are using the region from the access point,
# we will also want to make sure that we set it as the
# signing region as well
self._override_signing_region(request, accesspoint_region)
return accesspoint_region
return self._region
def _resolve_signing_name_for_accesspoint_endpoint(self, request):
accesspoint_service = request.context['s3_accesspoint']['service']
self._override_signing_name(request.context, accesspoint_service)
def _switch_to_accesspoint_endpoint(self, request, region_name):
original_components = urlsplit(request.url)
accesspoint_endpoint = urlunsplit((
original_components.scheme,
self._get_accesspoint_netloc(request.context, region_name),
self._get_accesspoint_path(
original_components.path, request.context),
original_components.query,
''
))
logger.debug(
'Updating URI from %s to %s' % (request.url, accesspoint_endpoint))
request.url = accesspoint_endpoint
def _get_accesspoint_netloc(self, request_context, region_name):
s3_accesspoint = request_context['s3_accesspoint']
accesspoint_netloc_components = [
'%s-%s' % (s3_accesspoint['name'], s3_accesspoint['account']),
]
outpost_name = s3_accesspoint.get('outpost_name')
if self._endpoint_url:
if outpost_name:
accesspoint_netloc_components.append(outpost_name)
endpoint_url_netloc = urlsplit(self._endpoint_url).netloc
accesspoint_netloc_components.append(endpoint_url_netloc)
else:
if outpost_name:
outpost_host = [outpost_name, 's3-outposts']
accesspoint_netloc_components.extend(outpost_host)
elif s3_accesspoint['service'] == 's3-object-lambda':
accesspoint_netloc_components.append('s3-object-lambda')
else:
accesspoint_netloc_components.append('s3-accesspoint')
if self._s3_config.get('use_dualstack_endpoint'):
accesspoint_netloc_components.append('dualstack')
accesspoint_netloc_components.extend(
[
region_name,
self._get_dns_suffix(region_name)
]
)
return '.'.join(accesspoint_netloc_components)
def _get_accesspoint_path(self, original_path, request_context):
# The Bucket parameter was substituted with the access-point name as
# some value was required in serializing the bucket name. Now that
# we are making the request directly to the access point, we will
# want to remove that access-point name from the path.
name = request_context['s3_accesspoint']['name']
# All S3 operations require at least a / in their path.
return original_path.replace('/' + name, '', 1) or '/'
def _get_dns_suffix(self, region_name):
resolved = self._endpoint_resolver.construct_endpoint(
's3', region_name)
dns_suffix = self._DEFAULT_DNS_SUFFIX
if resolved and 'dnsSuffix' in resolved:
dns_suffix = resolved['dnsSuffix']
return dns_suffix
def _override_signing_region(self, request, region_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['region'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['region'] = region_name
request.context['signing'] = signing_context
def _override_signing_name(self, context, signing_name):
signing_context = context.get('signing', {})
# S3SigV4Auth will use the context['signing']['signing_name'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['signing_name'] = signing_name
context['signing'] = signing_context
@CachedProperty
def _use_accelerate_endpoint(self):
# Enable accelerate if the configuration is set to to true or the
# endpoint being used matches one of the accelerate endpoints.
# Accelerate has been explicitly configured.
if self._s3_config.get('use_accelerate_endpoint'):
return True
# Accelerate mode is turned on automatically if an endpoint url is
# provided that matches the accelerate scheme.
if self._endpoint_url is None:
return False
# Accelerate is only valid for Amazon endpoints.
netloc = urlsplit(self._endpoint_url).netloc
if not netloc.endswith('amazonaws.com'):
return False
# The first part of the url should always be s3-accelerate.
parts = netloc.split('.')
if parts[0] != 's3-accelerate':
return False
# Url parts between 's3-accelerate' and 'amazonaws.com' which
# represent different url features.
feature_parts = parts[1:-2]
# There should be no duplicate url parts.
if len(feature_parts) != len(set(feature_parts)):
return False
# Remaining parts must all be in the whitelist.
return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts)
@CachedProperty
def _addressing_style(self):
# Use virtual host style addressing if accelerate is enabled or if
# the given endpoint url is an accelerate endpoint.
if self._use_accelerate_endpoint:
return 'virtual'
# If a particular addressing style is configured, use it.
configured_addressing_style = self._s3_config.get('addressing_style')
if configured_addressing_style:
return configured_addressing_style
@CachedProperty
def _s3_addressing_handler(self):
# If virtual host style was configured, use it regardless of whether
# or not the bucket looks dns compatible.
if self._addressing_style == 'virtual':
logger.debug("Using S3 virtual host style addressing.")
return switch_to_virtual_host_style
# If path style is configured, no additional steps are needed. If
# endpoint_url was specified, don't default to virtual. We could
# potentially default provided endpoint urls to virtual hosted
# style, but for now it is avoided.
if self._addressing_style == 'path' or self._endpoint_url is not None:
logger.debug("Using S3 path style addressing.")
return None
logger.debug("Defaulting to S3 virtual host style addressing with "
"path style addressing fallback.")
# By default, try to use virtual style with path fallback.
return fix_s3_host
class S3ControlEndpointSetter(object):
_DEFAULT_PARTITION = 'aws'
_DEFAULT_DNS_SUFFIX = 'amazonaws.com'
_HOST_LABEL_REGEX = re.compile(r'^[a-zA-Z0-9\-]{1,63}$')
def __init__(self, endpoint_resolver, region=None,
s3_config=None, endpoint_url=None, partition=None):
self._endpoint_resolver = endpoint_resolver
self._region = region
self._s3_config = s3_config
if s3_config is None:
self._s3_config = {}
self._endpoint_url = endpoint_url
self._partition = partition
if partition is None:
self._partition = self._DEFAULT_PARTITION
def register(self, event_emitter):
event_emitter.register('before-sign.s3-control', self.set_endpoint)
def set_endpoint(self, request, **kwargs):
if self._use_endpoint_from_arn_details(request):
self._validate_endpoint_from_arn_details_supported(request)
region_name = self._resolve_region_from_arn_details(request)
self._resolve_signing_name_from_arn_details(request)
self._resolve_endpoint_from_arn_details(request, region_name)
self._add_headers_from_arn_details(request)
elif self._use_endpoint_from_outpost_id(request):
self._validate_outpost_redirection_valid(request)
outpost_id = request.context['outpost_id']
self._override_signing_name(request, 's3-outposts')
new_netloc = self._construct_outpost_endpoint(self._region)
self._update_request_netloc(request, new_netloc)
def _use_endpoint_from_arn_details(self, request):
return 'arn_details' in request.context
def _use_endpoint_from_outpost_id(self, request):
return 'outpost_id' in request.context
def _validate_endpoint_from_arn_details_supported(self, request):
if not self._s3_config.get('use_arn_region', False):
arn_region = request.context['arn_details']['region']
if arn_region != self._region:
error_msg = (
'The use_arn_region configuration is disabled but '
'received arn for "%s" when the client is configured '
'to use "%s"'
) % (arn_region, self._region)
raise UnsupportedS3ControlConfigurationError(msg=error_msg)
request_partion = request.context['arn_details']['partition']
if request_partion != self._partition:
raise UnsupportedS3ControlConfigurationError(
msg=(
'Client is configured for "%s" partition, but arn '
'provided is for "%s" partition. The client and '
'arn partition must be the same.' % (
self._partition, request_partion)
)
)
if self._s3_config.get('use_accelerate_endpoint'):
raise UnsupportedS3ControlConfigurationError(
msg='S3 control client does not support accelerate endpoints',
)
if 'outpost_name' in request.context['arn_details']:
self._validate_outpost_redirection_valid(request)
def _validate_outpost_redirection_valid(self, request):
if self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3ControlConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an outpost is specified.'
)
)
def _resolve_region_from_arn_details(self, request):
if self._s3_config.get('use_arn_region', False):
arn_region = request.context['arn_details']['region']
# If we are using the region from the expanded arn, we will also
# want to make sure that we set it as the signing region as well
self._override_signing_region(request, arn_region)
return arn_region
return self._region
def _resolve_signing_name_from_arn_details(self, request):
arn_service = request.context['arn_details']['service']
self._override_signing_name(request, arn_service)
return arn_service
def _resolve_endpoint_from_arn_details(self, request, region_name):
new_netloc = self._resolve_netloc_from_arn_details(request, region_name)
self._update_request_netloc(request, new_netloc)
def _update_request_netloc(self, request, new_netloc):
original_components = urlsplit(request.url)
arn_details_endpoint = urlunsplit((
original_components.scheme,
new_netloc,
original_components.path,
original_components.query,
''
))
logger.debug(
'Updating URI from %s to %s' % (request.url, arn_details_endpoint)
)
request.url = arn_details_endpoint
def _resolve_netloc_from_arn_details(self, request, region_name):
arn_details = request.context['arn_details']
if 'outpost_name' in arn_details:
return self._construct_outpost_endpoint(region_name)
account = arn_details['account']
return self._construct_s3_control_endpoint(region_name, account)
def _is_valid_host_label(self, label):
return self._HOST_LABEL_REGEX.match(label)
def _validate_host_labels(self, *labels):
for label in labels:
if not self._is_valid_host_label(label):
raise InvalidHostLabelError(label=label)
def _construct_s3_control_endpoint(self, region_name, account):
self._validate_host_labels(region_name, account)
if self._endpoint_url:
endpoint_url_netloc = urlsplit(self._endpoint_url).netloc
netloc = [account, endpoint_url_netloc]
else:
netloc = [
account,
's3-control',
]
self._add_dualstack(netloc)
dns_suffix = self._get_dns_suffix(region_name)
netloc.extend([region_name, dns_suffix])
return self._construct_netloc(netloc)
def _construct_outpost_endpoint(self, region_name):
self._validate_host_labels(region_name)
if self._endpoint_url:
return urlsplit(self._endpoint_url).netloc
else:
netloc = [
's3-outposts',
region_name,
self._get_dns_suffix(region_name),
]
return self._construct_netloc(netloc)
def _construct_netloc(self, netloc):
return '.'.join(netloc)
def _add_dualstack(self, netloc):
if self._s3_config.get('use_dualstack_endpoint'):
netloc.append('dualstack')
def _get_dns_suffix(self, region_name):
resolved = self._endpoint_resolver.construct_endpoint(
's3', region_name)
dns_suffix = self._DEFAULT_DNS_SUFFIX
if resolved and 'dnsSuffix' in resolved:
dns_suffix = resolved['dnsSuffix']
return dns_suffix
def _override_signing_region(self, request, region_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['region'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['region'] = region_name
request.context['signing'] = signing_context
def _override_signing_name(self, request, signing_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['signing_name'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['signing_name'] = signing_name
request.context['signing'] = signing_context
def _add_headers_from_arn_details(self, request):
arn_details = request.context['arn_details']
outpost_name = arn_details.get('outpost_name')
if outpost_name:
self._add_outpost_id_header(request, outpost_name)
def _add_outpost_id_header(self, request, outpost_name):
request.headers['x-amz-outpost-id'] = outpost_name
class S3ControlArnParamHandler(object):
_RESOURCE_SPLIT_REGEX = re.compile(r'[/:]')
def __init__(self, arn_parser=None):
self._arn_parser = arn_parser
if arn_parser is None:
self._arn_parser = ArnParser()
def register(self, event_emitter):
event_emitter.register(
'before-parameter-build.s3-control',
self.handle_arn,
)
def handle_arn(self, params, model, context, **kwargs):
if model.name in ('CreateBucket', 'ListRegionalBuckets'):
# CreateBucket and ListRegionalBuckets are special cases that do
# not obey ARN based redirection but will redirect based off of the
# presence of the OutpostId parameter
self._handle_outpost_id_param(params, model, context)
else:
self._handle_name_param(params, model, context)
self._handle_bucket_param(params, model, context)
def _get_arn_details_from_param(self, params, param_name):
if param_name not in params:
return None
try:
arn = params[param_name]
arn_details = self._arn_parser.parse_arn(arn)
arn_details['original'] = arn
arn_details['resources'] = self._split_resource(arn_details)
return arn_details
except InvalidArnException:
return None
def _split_resource(self, arn_details):
return self._RESOURCE_SPLIT_REGEX.split(arn_details['resource'])
def _override_account_id_param(self, params, arn_details):
account_id = arn_details['account']
if 'AccountId' in params and params['AccountId'] != account_id:
error_msg = (
'Account ID in arn does not match the AccountId parameter '
'provided: "%s"'
) % params['AccountId']
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
params['AccountId'] = account_id
def _handle_outpost_id_param(self, params, model, context):
if 'OutpostId' not in params:
return
context['outpost_id'] = params['OutpostId']
def _handle_name_param(self, params, model, context):
# CreateAccessPoint is a special case that does not expand Name
if model.name == 'CreateAccessPoint':
return
arn_details = self._get_arn_details_from_param(params, 'Name')
if arn_details is None:
return
if self._is_outpost_accesspoint(arn_details):
self._store_outpost_accesspoint(params, context, arn_details)
else:
error_msg = 'The Name parameter does not support the provided ARN'
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
def _is_outpost_accesspoint(self, arn_details):
if arn_details['service'] != 's3-outposts':
return False
resources = arn_details['resources']
if len(resources) != 4:
return False
# Resource must be of the form outpost/op-123/accesspoint/name
return resources[0] == 'outpost' and resources[2] == 'accesspoint'
def _store_outpost_accesspoint(self, params, context, arn_details):
self._override_account_id_param(params, arn_details)
accesspoint_name = arn_details['resources'][3]
params['Name'] = accesspoint_name
arn_details['accesspoint_name'] = accesspoint_name
arn_details['outpost_name'] = arn_details['resources'][1]
context['arn_details'] = arn_details
def _handle_bucket_param(self, params, model, context):
arn_details = self._get_arn_details_from_param(params, 'Bucket')
if arn_details is None:
return
if self._is_outpost_bucket(arn_details):
self._store_outpost_bucket(params, context, arn_details)
else:
error_msg = (
'The Bucket parameter does not support the provided ARN'
)
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
def _is_outpost_bucket(self, arn_details):
if arn_details['service'] != 's3-outposts':
return False
resources = arn_details['resources']
if len(resources) != 4:
return False
# Resource must be of the form outpost/op-123/bucket/name
return resources[0] == 'outpost' and resources[2] == 'bucket'
def _store_outpost_bucket(self, params, context, arn_details):
self._override_account_id_param(params, arn_details)
bucket_name = arn_details['resources'][3]
params['Bucket'] = bucket_name
arn_details['bucket_name'] = bucket_name
arn_details['outpost_name'] = arn_details['resources'][1]
context['arn_details'] = arn_details
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = botocore.httpsession.URLLib3Session(
timeout=self.TIMEOUT_SECONDS
)
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
AWSRequest = botocore.awsrequest.AWSRequest
request = AWSRequest(method='GET', url=full_url, headers=headers)
response = self._session.send(request.prepare())
response_text = response.content.decode('utf-8')
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%s) from ECS metadata: %s"
) % (response.status_code, response_text))
try:
return json.loads(response_text)
except ValueError:
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, response_text)
raise MetadataRetrievalError(error_msg=error_msg)
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
def get_environ_proxies(url):
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
# NOTE: requests allowed for ip/cidr entries in no_proxy env that we don't
# support current as urllib only checks DNS suffix
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
if proxy_bypass(urlparse(url).netloc):
return True
except (TypeError, socket.gaierror):
pass
return False
def get_encoding_from_headers(headers, default='ISO-8859-1'):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:param default: default encoding if the content-type is text
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return default
def calculate_md5(body, **kwargs):
if isinstance(body, (bytes, bytearray)):
binary_md5 = _calculate_md5_from_bytes(body)
else:
binary_md5 = _calculate_md5_from_file(body)
return base64.b64encode(binary_md5).decode('ascii')
def _calculate_md5_from_bytes(body_bytes):
md5 = get_md5(body_bytes)
return md5.digest()
def _calculate_md5_from_file(fileobj):
start_position = fileobj.tell()
md5 = get_md5()
for chunk in iter(lambda: fileobj.read(1024 * 1024), b''):
md5.update(chunk)
fileobj.seek(start_position)
return md5.digest()
def conditionally_calculate_md5(params, **kwargs):
"""Only add a Content-MD5 if the system supports it."""
headers = params['headers']
body = params['body']
if MD5_AVAILABLE and body is not None and 'Content-MD5' not in headers:
md5_digest = calculate_md5(body, **kwargs)
params['headers']['Content-MD5'] = md5_digest
class FileWebIdentityTokenLoader(object):
def __init__(self, web_identity_token_path, _open=open):
self._web_identity_token_path = web_identity_token_path
self._open = _open
def __call__(self):
with self._open(self._web_identity_token_path) as token_file:
return token_file.read()
class SSOTokenLoader(object):
def __init__(self, cache=None):
if cache is None:
cache = {}
self._cache = cache
def _generate_cache_key(self, start_url):
return hashlib.sha1(start_url.encode('utf-8')).hexdigest()
def __call__(self, start_url):
cache_key = self._generate_cache_key(start_url)
try:
token = self._cache[cache_key]
return token['accessToken']
except KeyError:
logger.debug('Failed to load SSO token:', exc_info=True)
error_msg = (
'The SSO access token has either expired or is otherwise '
'invalid.'
)
raise SSOTokenLoadError(error_msg=error_msg)
| 88,236 | Python | 37.414018 | 102 | 0.612437 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/serialize.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various AWS protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
import calendar
import datetime
from xml.etree import ElementTree
from botocore.compat import six
from botocore.compat import json, formatdate
from botocore.utils import parse_to_aware_datetime
from botocore.utils import percent_encode
from botocore.utils import is_json_value_header
from botocore.utils import conditionally_calculate_md5
from botocore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'host_prefix'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'host_prefix': 'value.',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': {},
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
if isinstance(value, datetime.datetime):
value = self._timestamp_unixtimestamp(value)
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value, timestamp_format=None):
if timestamp_format is None:
timestamp_format = self.TIMESTAMP_FORMAT
timestamp_format = timestamp_format.lower()
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % timestamp_format)
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
def _expand_host_prefix(self, parameters, operation_model):
operation_endpoint = operation_model.endpoint
if operation_endpoint is None:
return None
host_prefix_expression = operation_endpoint['hostPrefix']
input_members = operation_model.input_shape.members
host_labels = [
member for member, shape in input_members.items()
if shape.serialization.get('hostLabel')
]
format_kwargs = dict((name, parameters[name]) for name in host_labels)
return host_prefix_expression.format(**format_kwargs)
def _prepare_additional_traits(self, request, operation_model):
"""Determine if additional traits are required for given model"""
if operation_model.http_checksum_required:
conditionally_calculate_md5(request)
return request
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
serialized['body'] = body_params
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(
value, shape.serialization.get('timestampFormat'))
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
json_version = operation_model.metadata['jsonVersion']
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/x-amz-json-%s' % json_version,
}
body = self.MAP_TYPE()
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(
value, shape.serialization.get('timestampFormat'))
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
QUERY_STRING_TIMESTAMP_FORMAT = 'iso8601'
HEADER_TIMESTAMP_FORMAT = 'rfc822'
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
elif isinstance(param_value, bool):
partitioned['query_string_kwargs'][
key_name] = str(param_value).lower()
elif member.type_name == 'timestamp':
timestamp_format = member.serialization.get(
'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT)
partitioned['query_string_kwargs'][
key_name] = self._convert_timestamp_to_str(
param_value, timestamp_format
)
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
timestamp_format = shape.serialization.get(
'timestampFormat', self.HEADER_TIMESTAMP_FORMAT)
return self._convert_timestamp_to_str(timestamp, timestamp_format)
elif is_json_value_header(shape):
# Serialize with no spaces after separators to save space in
# the header.
return self._get_base64(json.dumps(value, separators=(',', ':')))
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(
params, shape.serialization.get('timestampFormat'))
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = six.text_type(params)
SERIALIZERS = {
'ec2': EC2Serializer,
'query': QuerySerializer,
'json': JSONSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
}
| 30,430 | Python | 42.164539 | 79 | 0.616891 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/model.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Abstractions to interact with service models."""
from collections import defaultdict
from botocore.utils import CachedProperty, instance_cache, hyphenize_service_id
from botocore.compat import OrderedDict
from botocore.exceptions import MissingServiceIdError
from botocore.exceptions import UndefinedModelAttributeError
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class ServiceId(str):
def hyphenize(self):
return hyphenize_service_id(self)
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute',
'eventstream', 'event', 'eventheader', 'eventpayload',
'jsonvalue', 'timestampFormat', 'hostLabel']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum',
'idempotencyToken', 'error', 'exception',
'endpointdiscoveryid', 'retryable']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
* jsonvalue
* timestampFormat
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
* idempotencyToken
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
@property
def event_stream_name(self):
return None
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
@CachedProperty
def event_stream_name(self):
for member_name, member in self.members.items():
if member.serialization.get('eventstream'):
return member_name
return None
@CachedProperty
def error_code(self):
if not self.metadata.get('exception', False):
return None
error_metadata = self.metadata.get("error", {})
code = error_metadata.get("code")
if code:
return code
# Use the exception name if there is no explicit code modeled
return self.name
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class StringShape(Shape):
@CachedProperty
def enum(self):
return self.metadata.get('enum', [])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
def __init__(self, service_description, service_name=None):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
:type service_name: str
:param service_name: The name of the service. Normally this is
the endpoint prefix defined in the service_description. However,
you can override this value to provide a more convenient name.
This is done in a few places in botocore (ses instead of email,
emr instead of elasticmapreduce). If this value is not provided,
it will default to the endpointPrefix defined in the model.
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
self._service_name = service_name
self._instance_cache = {}
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def shape_for_error_code(self, error_code):
return self._error_code_cache.get(error_code, None)
@CachedProperty
def _error_code_cache(self):
error_code_cache = {}
for error_shape in self.error_shapes:
code = error_shape.error_code
error_code_cache[code] = error_shape
return error_code_cache
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
@CachedProperty
def shape_names(self):
return list(self._service_description.get('shapes', {}))
@CachedProperty
def error_shapes(self):
error_shapes = []
for shape_name in self.shape_names:
error_shape = self.shape_for(shape_name)
if error_shape.metadata.get('exception', False):
error_shapes.append(error_shape)
return error_shapes
@instance_cache
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self, operation_name)
@CachedProperty
def documentation(self):
return self._service_description.get('documentation', '')
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def service_name(self):
"""The name of the service.
This defaults to the endpointPrefix defined in the service model.
However, this value can be overriden when a ``ServiceModel`` is
created. If a service_name was not provided when the ``ServiceModel``
was created and if there is no endpointPrefix defined in the
service model, then an ``UndefinedModelAttributeError`` exception
will be raised.
"""
if self._service_name is not None:
return self._service_name
else:
return self.endpoint_prefix
@CachedProperty
def service_id(self):
try:
return ServiceId(self._get_metadata_property('serviceId'))
except UndefinedModelAttributeError:
raise MissingServiceIdError(
service_name=self._service_name
)
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
@CachedProperty
def endpoint_discovery_operation(self):
for operation in self.operation_names:
model = self.operation_model(operation)
if model.is_endpoint_discovery_operation:
return model
@CachedProperty
def endpoint_discovery_required(self):
for operation in self.operation_names:
model = self.operation_model(operation)
if (model.endpoint_discovery is not None and
model.endpoint_discovery.get('required')):
return True
return False
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.service_name)
class OperationModel(object):
def __init__(self, operation_model, service_model, name=None):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
:type name: string
:param name: The operation name. This is the operation name exposed to
the users of this model. This can potentially be different from
the "wire_name", which is the operation name that *must* by
provided over the wire. For example, given::
"CreateCloudFrontOriginAccessIdentity":{
"name":"CreateCloudFrontOriginAccessIdentity2014_11_06",
...
}
The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``,
but the ``self.wire_name`` would be
``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the
value we must send in the corresponding HTTP request.
"""
self._operation_model = operation_model
self._service_model = service_model
self._api_name = name
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self._wire_name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def name(self):
if self._api_name is not None:
return self._api_name
else:
return self.wire_name
@property
def wire_name(self):
"""The wire name of the operation.
In many situations this is the same value as the
``name``, value, but in some services, the operation name
exposed to the user is different from the operaiton name
we send across the wire (e.g cloudfront).
Any serialization code should use ``wire_name``.
"""
return self._operation_model.get('name')
@property
def service_model(self):
return self._service_model
@CachedProperty
def documentation(self):
return self._operation_model.get('documentation', '')
@CachedProperty
def deprecated(self):
return self._operation_model.get('deprecated', False)
@CachedProperty
def endpoint_discovery(self):
# Explicit None default. An empty dictionary for this trait means it is
# enabled but not required to be used.
return self._operation_model.get('endpointdiscovery', None)
@CachedProperty
def is_endpoint_discovery_operation(self):
return self._operation_model.get('endpointoperation', False)
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def idempotent_members(self):
input_shape = self.input_shape
if not input_shape:
return []
return [name for (name, shape) in input_shape.members.items()
if 'idempotencyToken' in shape.metadata and
shape.metadata['idempotencyToken']]
@CachedProperty
def auth_type(self):
return self._operation_model.get('authtype')
@CachedProperty
def error_shapes(self):
shapes = self._operation_model.get("errors", [])
return list(self._service_model.resolve_shape_ref(s) for s in shapes)
@CachedProperty
def endpoint(self):
return self._operation_model.get('endpoint')
@CachedProperty
def http_checksum_required(self):
return self._operation_model.get('httpChecksumRequired', False)
@CachedProperty
def has_event_stream_input(self):
return self.get_event_stream_input() is not None
@CachedProperty
def has_event_stream_output(self):
return self.get_event_stream_output() is not None
def get_event_stream_input(self):
return self._get_event_stream(self.input_shape)
def get_event_stream_output(self):
return self._get_event_stream(self.output_shape)
def _get_event_stream(self, shape):
"""Returns the event stream member's shape if any or None otherwise."""
if shape is None:
return None
event_name = shape.event_stream_name
if event_name:
return shape.members[event_name]
return None
@CachedProperty
def has_streaming_input(self):
return self.get_streaming_input() is not None
@CachedProperty
def has_streaming_output(self):
return self.get_streaming_output() is not None
def get_streaming_input(self):
return self._get_streaming_body(self.input_shape)
def get_streaming_output(self):
return self._get_streaming_body(self.output_shape)
def _get_streaming_body(self, shape):
"""Returns the streaming member's shape if any; or None otherwise."""
if shape is None:
return None
payload = shape.serialization.get('payload')
if payload is not None:
payload_shape = shape.members[payload]
if payload_shape.type_name == 'blob':
return payload_shape
return None
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
'string': StringShape
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
result = shape_cls(shape_name, shape_model, self)
return result
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
:type dict_type: class
:param dict_type: The dictionary type to use, allowing you to opt-in
to using OrderedDict or another dict type. This can
be particularly useful for testing when order
matters, such as for documentation.
"""
def __init__(self, name=None):
self.members = OrderedDict()
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = OrderedDict()
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = OrderedDict()
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
for attr in Shape.METADATA_ATTRS:
if attr in model:
shape[attr] = model[attr]
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| 28,352 | Python | 33.367273 | 79 | 0.60553 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retryhandler.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import functools
import logging
from binascii import crc32
from botocore.exceptions import (
ChecksumError, EndpointConnectionError, ReadTimeoutError,
ConnectionError, ConnectionClosedError,
)
logger = logging.getLogger(__name__)
# The only supported error for now is GENERAL_CONNECTION_ERROR
# which maps to requests generic ConnectionError. If we're able
# to get more specific exceptions from requests we can update
# this mapping with more specific exceptions.
EXCEPTION_MAP = {
'GENERAL_CONNECTION_ERROR': [
ConnectionError, ConnectionClosedError, ReadTimeoutError,
EndpointConnectionError
],
}
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
def create_retry_handler(config, operation_name=None):
checker = create_checker_from_retry_config(
config, operation_name=operation_name)
action = create_retry_action_from_config(
config, operation_name=operation_name)
return RetryHandler(checker=checker, action=action)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_checker_from_retry_config(config, operation_name=None):
checkers = []
max_attempts = None
retryable_exceptions = []
if '__default__' in config:
policies = config['__default__'].get('policies', [])
max_attempts = config['__default__']['max_attempts']
for key in policies:
current_config = policies[key]
checkers.append(_create_single_checker(current_config))
retry_exception = _extract_retryable_exception(current_config)
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if operation_name is not None and config.get(operation_name) is not None:
operation_policies = config[operation_name]['policies']
for key in operation_policies:
checkers.append(_create_single_checker(operation_policies[key]))
retry_exception = _extract_retryable_exception(
operation_policies[key])
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if len(checkers) == 1:
# Don't need to use a MultiChecker
return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
else:
multi_checker = MultiChecker(checkers)
return MaxAttemptsDecorator(
multi_checker, max_attempts=max_attempts,
retryable_exceptions=tuple(retryable_exceptions))
def _create_single_checker(config):
if 'response' in config['applies_when']:
return _create_single_response_checker(
config['applies_when']['response'])
elif 'socket_errors' in config['applies_when']:
return ExceptionRaiser()
def _create_single_response_checker(response):
if 'service_error_code' in response:
checker = ServiceErrorCodeChecker(
status_code=response['http_status_code'],
error_code=response['service_error_code'])
elif 'http_status_code' in response:
checker = HTTPStatusCodeChecker(
status_code=response['http_status_code'])
elif 'crc32body' in response:
checker = CRC32Checker(header=response['crc32body'])
else:
# TODO: send a signal.
raise ValueError("Unknown retry policy: %s" % config)
return checker
def _extract_retryable_exception(config):
applies_when = config['applies_when']
if 'crc32body' in applies_when.get('response', {}):
return [ChecksumError]
elif 'socket_errors' in applies_when:
exceptions = []
for name in applies_when['socket_errors']:
exceptions.extend(EXCEPTION_MAP[name])
return exceptions
class RetryHandler(object):
"""Retry handler.
The retry handler takes two params, ``checker`` object
and an ``action`` object.
The ``checker`` object must be a callable object and based on a response
and an attempt number, determines whether or not sufficient criteria for
a retry has been met. If this is the case then the ``action`` object
(which also is a callable) determines what needs to happen in the event
of a retry.
"""
def __init__(self, checker, action):
self._checker = checker
self._action = action
def __call__(self, attempts, response, caught_exception, **kwargs):
"""Handler for a retry.
Intended to be hooked up to an event handler (hence the **kwargs),
this will process retries appropriately.
"""
if self._checker(attempts, response, caught_exception):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
logger.debug("No retry needed.")
class BaseChecker(object):
"""Base class for retry checkers.
Each class is responsible for checking a single criteria that determines
whether or not a retry should not happen.
"""
def __call__(self, attempt_number, response, caught_exception):
"""Determine if retry criteria matches.
Note that either ``response`` is not None and ``caught_exception`` is
None or ``response`` is None and ``caught_exception`` is not None.
:type attempt_number: int
:param attempt_number: The total number of times we've attempted
to send the request.
:param response: The HTTP response (if one was received).
:type caught_exception: Exception
:param caught_exception: Any exception that was caught while trying to
send the HTTP response.
:return: True, if the retry criteria matches (and therefore a retry
should occur. False if the criteria does not match.
"""
# The default implementation allows subclasses to not have to check
# whether or not response is None or not.
if response is not None:
return self._check_response(attempt_number, response)
elif caught_exception is not None:
return self._check_caught_exception(
attempt_number, caught_exception)
else:
raise ValueError("Both response and caught_exception are None.")
def _check_response(self, attempt_number, response):
pass
def _check_caught_exception(self, attempt_number, caught_exception):
pass
class MaxAttemptsDecorator(BaseChecker):
"""Allow retries up to a maximum number of attempts.
This will pass through calls to the decorated retry checker, provided
that the number of attempts does not exceed max_attempts. It will
also catch any retryable_exceptions passed in. Once max_attempts has
been exceeded, then False will be returned or the retryable_exceptions
that was previously being caught will be raised.
"""
def __init__(self, checker, max_attempts, retryable_exceptions=None):
self._checker = checker
self._max_attempts = max_attempts
self._retryable_exceptions = retryable_exceptions
def __call__(self, attempt_number, response, caught_exception):
should_retry = self._should_retry(attempt_number, response,
caught_exception)
if should_retry:
if attempt_number >= self._max_attempts:
# explicitly set MaxAttemptsReached
if response is not None and 'ResponseMetadata' in response[1]:
response[1]['ResponseMetadata']['MaxAttemptsReached'] = True
logger.debug("Reached the maximum number of retry "
"attempts: %s", attempt_number)
return False
else:
return should_retry
else:
return False
def _should_retry(self, attempt_number, response, caught_exception):
if self._retryable_exceptions and \
attempt_number < self._max_attempts:
try:
return self._checker(attempt_number, response, caught_exception)
except self._retryable_exceptions as e:
logger.debug("retry needed, retryable exception caught: %s",
e, exc_info=True)
return True
else:
# If we've exceeded the max attempts we just let the exception
# propogate if one has occurred.
return self._checker(attempt_number, response, caught_exception)
class HTTPStatusCodeChecker(BaseChecker):
def __init__(self, status_code):
self._status_code = status_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
logger.debug(
"retry needed: retryable HTTP status code received: %s",
self._status_code)
return True
else:
return False
class ServiceErrorCodeChecker(BaseChecker):
def __init__(self, status_code, error_code):
self._status_code = status_code
self._error_code = error_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
actual_error_code = response[1].get('Error', {}).get('Code')
if actual_error_code == self._error_code:
logger.debug(
"retry needed: matching HTTP status and error code seen: "
"%s, %s", self._status_code, self._error_code)
return True
return False
class MultiChecker(BaseChecker):
def __init__(self, checkers):
self._checkers = checkers
def __call__(self, attempt_number, response, caught_exception):
for checker in self._checkers:
checker_response = checker(attempt_number, response,
caught_exception)
if checker_response:
return checker_response
return False
class CRC32Checker(BaseChecker):
def __init__(self, header):
# The header where the expected crc32 is located.
self._header_name = header
def _check_response(self, attempt_number, response):
http_response = response[0]
expected_crc = http_response.headers.get(self._header_name)
if expected_crc is None:
logger.debug("crc32 check skipped, the %s header is not "
"in the http response.", self._header_name)
else:
actual_crc32 = crc32(response[0].content) & 0xffffffff
if not actual_crc32 == int(expected_crc):
logger.debug(
"retry needed: crc32 check failed, expected != actual: "
"%s != %s", int(expected_crc), actual_crc32)
raise ChecksumError(checksum_type='crc32',
expected_checksum=int(expected_crc),
actual_checksum=actual_crc32)
class ExceptionRaiser(BaseChecker):
"""Raise any caught exceptions.
This class will raise any non None ``caught_exception``.
"""
def _check_caught_exception(self, attempt_number, caught_exception):
# This is implementation specific, but this class is useful by
# coordinating with the MaxAttemptsDecorator.
# The MaxAttemptsDecorator has a list of exceptions it should catch
# and retry, but something needs to come along and actually raise the
# caught_exception. That's what this class is being used for. If
# the MaxAttemptsDecorator is not interested in retrying the exception
# then this exception just propogates out past the retry code.
raise caught_exception
| 13,781 | Python | 37.283333 | 80 | 0.642261 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/credentials.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
import threading
import json
import subprocess
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
from dateutil.parser import parse
from dateutil.tz import tzlocal, tzutc
import botocore.configloader
import botocore.compat
from botocore import UNSIGNED
from botocore.compat import total_seconds
from botocore.compat import compat_shell_split
from botocore.config import Config
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.exceptions import InvalidConfigError
from botocore.exceptions import InfiniteLoopConfigError
from botocore.exceptions import RefreshWithMFAUnsupportedError
from botocore.exceptions import MetadataRetrievalError
from botocore.exceptions import CredentialRetrievalError
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
from botocore.utils import ContainerMetadataFetcher
from botocore.utils import FileWebIdentityTokenLoader
from botocore.utils import SSOTokenLoader
logger = logging.getLogger(__name__)
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials',
['access_key', 'secret_key', 'token'])
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = ProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
class ProfileProviderBuilder(object):
"""This class handles the creation of profile based providers.
NOTE: This class is only intended for internal use.
This class handles the creation and ordering of the various credential
providers that primarly source their configuration from the shared config.
This is needed to enable sharing between the default credential chain and
the source profile chain created by the assume role provider.
"""
def __init__(self, session, cache=None, region_name=None,
sso_token_cache=None):
self._session = session
self._cache = cache
self._region_name = region_name
self._sso_token_cache = sso_token_cache
def providers(self, profile_name, disable_env_vars=False):
return [
self._create_web_identity_provider(
profile_name, disable_env_vars,
),
self._create_sso_provider(profile_name),
self._create_shared_credential_provider(profile_name),
self._create_process_provider(profile_name),
self._create_config_provider(profile_name),
]
def _create_process_provider(self, profile_name):
return ProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return SharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return ConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return SSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value, iso=False):
if isinstance(value, datetime.datetime):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
def _get_client_creator(session, region_name):
def client_creator(service_name, **kwargs):
create_client_kwargs = {
'region_name': region_name
}
create_client_kwargs.update(**kwargs)
return session.create_client(service_name, **create_client_kwargs)
return client_creator
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
def __call__(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return self._refresh()
return _Refresher(actual_refresh)
class JSONFileCache(object):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache'))
def __init__(self, working_dir=CACHE_DIR, dumps_func=None):
self._working_dir = working_dir
if dumps_func is None:
dumps_func = self._default_dumps
self._dumps = dumps_func
def _default_dumps(self, obj):
return json.dumps(obj, default=_serialize_if_needed)
def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = self._dumps(value)
except (TypeError, ValueError):
raise ValueError("Value cannot be cached, must be "
"JSON serializable: %s" % value)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
# The time at which we'll attempt to refresh, but not
# block if someone else is refreshing.
_advisory_refresh_timeout = 15 * 60
# The time at which all threads will block waiting for
# refreshed credentials.
_mandatory_refresh_timeout = 10 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = ReadOnlyCredentials(
access_key, secret_key, token)
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh needed, False otherwise.
"""
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
# Checks if the current credentials are expired.
return self.refresh_needed(refresh_in=0)
def _refresh(self):
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
# precondition: this method should only be called if you've acquired
# the self._refresh_lock.
try:
metadata = self._refresh_using()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
# We successfully refreshed credentials but for whatever
# reason, our refreshing function returned credentials
# that are still expired. In this scenario, the only
# thing we can do is let the user know and raise
# an exception.
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time']
if not data:
missing_keys = expected_keys
else:
missing_keys = [k for k in expected_keys if k not in data]
if missing_keys:
message = "Credential refresh failed, response did not contain: %s"
raise CredentialRetrievalError(
provider=self.method,
error_msg=message % ', '.join(missing_keys),
)
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s",
self._expiry_time)
self._normalize()
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials
class DeferredRefreshableCredentials(RefreshableCredentials):
"""Refreshable credentials that don't require initial credentials.
refresh_using will be called upon first access.
"""
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(DeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class CachedCredentialFetcher(object):
DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, cache=None, expiry_window_seconds=None):
if cache is None:
cache = {}
self._cache = cache
self._cache_key = self._create_cache_key()
if expiry_window_seconds is None:
expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS
self._expiry_window_seconds = expiry_window_seconds
def _create_cache_key(self):
raise NotImplementedError('_create_cache_key()')
def _make_file_safe(self, filename):
# Replace :, path sep, and / to make it the string filename safe.
filename = filename.replace(':', '_').replace(os.path.sep, '_')
return filename.replace('/', '_')
def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
def fetch_credentials(self):
return self._get_cached_credentials()
def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
def _load_from_cache(self):
if self._cache_key in self._cache:
creds = deepcopy(self._cache[self._cache_key])
if not self._is_expired(creds):
return creds
else:
logger.debug(
"Credentials were found in cache, but they are expired."
)
return None
def _write_to_cache(self, response):
self._cache[self._cache_key] = deepcopy(response)
def _is_expired(self, credentials):
"""Check if credentials are expired."""
end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
seconds = total_seconds(end_time - _local_now())
return seconds < self._expiry_window_seconds
class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher):
def __init__(self, client_creator, role_arn, extra_args=None,
cache=None, expiry_window_seconds=None):
self._client_creator = client_creator
self._role_arn = role_arn
if extra_args is None:
self._assume_kwargs = {}
else:
self._assume_kwargs = deepcopy(extra_args)
self._assume_kwargs['RoleArn'] = self._role_arn
self._role_session_name = self._assume_kwargs.get('RoleSessionName')
self._using_default_session_name = False
if not self._role_session_name:
self._generate_assume_role_name()
super(BaseAssumeRoleCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _generate_assume_role_name(self):
self._role_session_name = 'botocore-session-%s' % (int(time.time()))
self._assume_kwargs['RoleSessionName'] = self._role_session_name
self._using_default_session_name = True
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = deepcopy(self._assume_kwargs)
# The role session name gets randomly generated, so we don't want it
# in the hash.
if self._using_default_session_name:
del args['RoleSessionName']
if 'Policy' in args:
# To have a predictable hash, the keys of the policy must be
# sorted, so we have to load it here to make sure it gets sorted
# later on.
args['Policy'] = json.loads(args['Policy'])
args = json.dumps(args, sort_keys=True)
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher):
def __init__(self, client_creator, source_credentials, role_arn,
extra_args=None, mfa_prompter=None, cache=None,
expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type source_credentials: Credentials
:param source_credentials: The credentials to use to create the
client for the call to AssumeRole.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type mfa_prompter: callable
:param mfa_prompter: A callable that returns input provided by the
user (i.e raw_input, getpass.getpass, etc.).
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._source_credentials = source_credentials
self._mfa_prompter = mfa_prompter
if self._mfa_prompter is None:
self._mfa_prompter = getpass.getpass
super(AssumeRoleCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = self._create_client()
return client.assume_role(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
mfa_serial = assume_role_kwargs.get('SerialNumber')
if mfa_serial is not None:
prompt = 'Enter MFA code for %s: ' % mfa_serial
token_code = self._mfa_prompter(prompt)
assume_role_kwargs['TokenCode'] = token_code
duration_seconds = assume_role_kwargs.get('DurationSeconds')
if duration_seconds is not None:
assume_role_kwargs['DurationSeconds'] = duration_seconds
return assume_role_kwargs
def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AssumeRoleWithWebIdentityCredentialFetcher(
BaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type web_identity_token_loader: callable
:param web_identity_token_loader: A callable that takes no arguments
and returns a web identity token str.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._web_identity_token_loader = web_identity_token_loader
super(AssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = Config(signature_version=UNSIGNED)
client = self._client_creator('sts', config=config)
return client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class CredentialProvider(object):
# A short name to identify the provider within botocore.
METHOD = None
# A name to identify the provider for use in cross-sdk features like
# assume role's `credential_source` configuration option. These names
# are to be treated in a case-insensitive way. NOTE: any providers not
# implemented in botocore MUST prefix their canonical names with
# 'custom' or we DO NOT guarantee that it will work with any features
# that this provides.
CANONICAL_NAME = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: Credentials
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=key_name)
return found
class ProcessProvider(CredentialProvider):
METHOD = 'custom-process'
def __init__(self, profile_name, load_config, popen=subprocess.Popen):
self._profile_name = profile_name
self._load_config = load_config
self._loaded_config = None
self._popen = popen
def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return RefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return Credentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = self._popen(process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
@property
def _credential_process(self):
if self._loaded_config is None:
self._loaded_config = self._load_config()
profile_config = self._loaded_config.get(
'profiles', {}).get(self._profile_name, {})
return profile_config.get('credential_process')
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
CANONICAL_NAME = 'Environment'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['token'] = self.TOKENS
var_mapping['expiry_time'] = self.EXPIRY_TIME
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
var_mapping['expiry_time'] = mapping.get(
'expiry_time', self.EXPIRY_TIME)
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
access_key = self.environ.get(self._mapping['access_key'], '')
if access_key:
logger.info('Found credentials in environment variables.')
fetcher = self._create_credentials_fetcher()
credentials = fetcher(require_expiry=False)
expiry_time = credentials['expiry_time']
if expiry_time is not None:
expiry_time = parse(expiry_time)
return RefreshableCredentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], expiry_time,
refresh_using=fetcher, method=self.METHOD
)
return Credentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], method=self.METHOD
)
else:
return None
def _create_credentials_fetcher(self):
mapping = self._mapping
method = self.METHOD
environ = self.environ
def fetch_credentials(require_expiry=True):
credentials = {}
access_key = environ.get(mapping['access_key'], '')
if not access_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['access_key'])
credentials['access_key'] = access_key
secret_key = environ.get(mapping['secret_key'], '')
if not secret_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['secret_key'])
credentials['secret_key'] = secret_key
credentials['token'] = None
for token_env_var in mapping['token']:
token = environ.get(token_env_var, '')
if token:
credentials['token'] = token
break
credentials['expiry_time'] = None
expiry_time = environ.get(mapping['expiry_time'], '')
if expiry_time:
credentials['expiry_time'] = expiry_time
if require_expiry and not expiry_time:
raise PartialCredentialsError(
provider=method, cred_var=mapping['expiry_time'])
return credentials
return fetch_credentials
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CANONICAL_NAME = 'Ec2Config'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(
self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
CANONICAL_NAME = 'SharedCredentials'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._ini_parser = ini_parser
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.ACCESS_KEY in config:
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
CANONICAL_NAME = 'SharedConfig'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = botocore.configloader.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.ACCESS_KEY in profile_config:
logger.info("Credentials found in config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
CANONICAL_NAME = 'Boto2Config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
# The AssumeRole provider is logically part of the SharedConfig and
# SharedCredentials providers. Since the purpose of the canonical name
# is to provide cross-sdk compatibility, calling code will need to be
# aware that either of those providers should be tied to the AssumeRole
# provider as much as possible.
CANONICAL_NAME = None
ROLE_CONFIG_VAR = 'role_arn'
WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass, credential_sourcer=None,
profile_provider_builder=None):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``botocore.session.Session.create_client``.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class in the CLI.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
:type credential_sourcer: CanonicalNameCredentialSourcer
:param credential_sourcer: A credential provider that takes a
configuration, which is used to provide the source credentials
for the STS call.
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
self._credential_sourcer = credential_sourcer
self._profile_provider_builder = profile_provider_builder
self._visited_profiles = [self._profile_name]
def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return self._load_creds_via_assume_role(self._profile_name)
def _has_assume_role_config_vars(self, profile):
return (
self.ROLE_CONFIG_VAR in profile and
# We need to ensure this provider doesn't look at a profile when
# the profile has configuration for web identity. Simply relying on
# the order in the credential chain is insufficient as it doesn't
# prevent the case when we're doing an assume role chain.
self.WEB_IDENTITY_TOKE_FILE_VAR not in profile
)
def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
def _get_role_config(self, profile_name):
"""Retrieves and validates the role configuration for the profile."""
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
source_profile = profile.get('source_profile')
role_arn = profile['role_arn']
credential_source = profile.get('credential_source')
mfa_serial = profile.get('mfa_serial')
external_id = profile.get('external_id')
role_session_name = profile.get('role_session_name')
duration_seconds = profile.get('duration_seconds')
role_config = {
'role_arn': role_arn,
'external_id': external_id,
'mfa_serial': mfa_serial,
'role_session_name': role_session_name,
'source_profile': source_profile,
'credential_source': credential_source
}
if duration_seconds is not None:
try:
role_config['duration_seconds'] = int(duration_seconds)
except ValueError:
pass
# Either the credential source or the source profile must be
# specified, but not both.
if credential_source is not None and source_profile is not None:
raise InvalidConfigError(
error_msg=(
'The profile "%s" contains both source_profile and '
'credential_source.' % profile_name
)
)
elif credential_source is None and source_profile is None:
raise PartialCredentialsError(
provider=self.METHOD,
cred_var='source_profile or credential_source'
)
elif credential_source is not None:
self._validate_credential_source(
profile_name, credential_source)
else:
self._validate_source_profile(profile_name, source_profile)
return role_config
def _validate_credential_source(self, parent_profile, credential_source):
if self._credential_sourcer is None:
raise InvalidConfigError(error_msg=(
'The credential_source "%s" is specified in profile "%s", '
'but no source provider was configured.' % (
credential_source, parent_profile)
))
if not self._credential_sourcer.is_supported(credential_source):
raise InvalidConfigError(error_msg=(
'The credential source "%s" referenced in profile "%s" is not '
'valid.' % (credential_source, parent_profile)
))
def _source_profile_has_credentials(self, profile):
return any([
self._has_static_credentials(profile),
self._has_assume_role_config_vars(profile),
])
def _validate_source_profile(self, parent_profile_name,
source_profile_name):
profiles = self._loaded_config.get('profiles', {})
if source_profile_name not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile_name, parent_profile_name)
)
)
source_profile = profiles[source_profile_name]
# Make sure we aren't going into an infinite loop. If we haven't
# visited the profile yet, we're good.
if source_profile_name not in self._visited_profiles:
return
# If we have visited the profile and the profile isn't simply
# referencing itself, that's an infinite loop.
if source_profile_name != parent_profile_name:
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
# A profile is allowed to reference itself so that it can source
# static credentials and have configuration all in the same
# profile. This will only ever work for the top level assume
# role because the static credentials will otherwise take
# precedence.
if not self._has_static_credentials(source_profile):
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
def _has_static_credentials(self, profile):
static_keys = ['aws_secret_access_key', 'aws_access_key_id']
return any(static_key in profile for static_key in static_keys)
def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return self._resolve_credentials_from_profile(source_profile)
def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
# This is only here for backwards compatibility. If this provider
# isn't given a profile provider builder we still want to be able
# handle the basic static credential case as we would before the
# provile provider builder parameter was added.
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = CredentialResolver(profile_providers)
credentials = profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return Credentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AssumeRoleWithWebIdentityProvider(CredentialProvider):
METHOD = 'assume-role-with-web-identity'
CANONICAL_NAME = None
_CONFIG_TO_ENV_VAR = {
'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE',
'role_session_name': 'AWS_ROLE_SESSION_NAME',
'role_arn': 'AWS_ROLE_ARN',
}
def __init__(
self,
load_config,
client_creator,
profile_name,
cache=None,
disable_env_vars=False,
token_loader_cls=None,
):
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
self._profile_config = None
self._disable_env_vars = disable_env_vars
if token_loader_cls is None:
token_loader_cls = FileWebIdentityTokenLoader
self._token_loader_cls = token_loader_cls
def load(self):
return self._assume_role_with_web_identity()
def _get_profile_config(self, key):
if self._profile_config is None:
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
self._profile_config = profiles.get(self._profile_name, {})
return self._profile_config.get(key)
def _get_env_config(self, key):
if self._disable_env_vars:
return None
env_key = self._CONFIG_TO_ENV_VAR.get(key)
if env_key and env_key in os.environ:
return os.environ[env_key]
return None
def _get_config(self, key):
env_value = self._get_env_config(key)
if env_value is not None:
return env_value
return self._get_profile_config(key)
def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class CanonicalNameCredentialSourcer(object):
def __init__(self, providers):
self._providers = providers
def is_supported(self, source_name):
"""Validates a given source name.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: bool
:returns: True if the credential provider is supported,
False otherwise.
"""
return source_name in [p.CANONICAL_NAME for p in self._providers]
def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, CredentialResolver):
return source.load_credentials()
return source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return CredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
def _get_provider_by_canonical_name(self, canonical_name):
"""Return a credential provider by its canonical name.
This function is strict, it does not attempt to address
compatibility issues.
"""
for provider in self._providers:
name = provider.CANONICAL_NAME
# Canonical names are case-insensitive
if name and name.lower() == canonical_name.lower():
return provider
def _get_provider_by_method(self, method):
"""Return a credential provider by its METHOD name."""
for provider in self._providers:
if provider.METHOD == method:
return provider
class ContainerProvider(CredentialProvider):
METHOD = 'container-role'
CANONICAL_NAME = 'EcsContainer'
ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
def __init__(self, environ=None, fetcher=None):
if environ is None:
environ = os.environ
if fetcher is None:
fetcher = ContainerMetadataFetcher()
self._environ = environ
self._fetcher = fetcher
def load(self):
# This cred provider is only triggered if the self.ENV_VAR is set,
# which only happens if you opt into this feature.
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return self._retrieve_or_fail()
def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = fetcher()
return RefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _build_headers(self):
headers = {}
auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
if auth_token is not None:
return {
'Authorization': auth_token
}
def _create_fetcher(self, full_uri, headers):
def fetch_creds():
try:
response = self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
def _provided_relative_uri(self):
return self.ENV_VAR in self._environ
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that
will be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if name not in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class SSOCredentialFetcher(CachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(SSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
# NOTE: It would be good to hoist this cache key construction logic
# into the CachedCredentialFetcher class as we should be consistent.
# Unfortunately, the current assume role fetchers that sub class don't
# pass separators resulting in non-minified JSON. In the long term,
# all fetchers should use the below caching scheme.
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
client = self._client_creator('sso', config=config)
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class SSOProvider(CredentialProvider):
METHOD = 'sso'
_SSO_TOKEN_CACHE_DIR = os.path.expanduser(
os.path.join('~', '.aws', 'sso', 'cache')
)
_SSO_CONFIG_VARS = [
'sso_start_url',
'sso_region',
'sso_role_name',
'sso_account_id',
]
def __init__(self, load_config, client_creator, profile_name,
cache=None, token_cache=None):
if token_cache is None:
token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
self._token_cache = token_cache
if cache is None:
cache = {}
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
def _load_sso_config(self):
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
profile_name = self._profile_name
profile_config = profiles.get(self._profile_name, {})
if all(c not in profile_config for c in self._SSO_CONFIG_VARS):
return None
config = {}
missing_config_vars = []
for config_var in self._SSO_CONFIG_VARS:
if config_var in profile_config:
config[config_var] = profile_config[config_var]
else:
missing_config_vars.append(config_var)
if missing_config_vars:
missing = ', '.join(missing_config_vars)
raise InvalidConfigError(
error_msg=(
'The profile "%s" is configured to use SSO but is missing '
'required configuration: %s' % (profile_name, missing)
)
)
return config
def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = SSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
| 81,776 | Python | 37.356942 | 79 | 0.610424 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/loaders.py | # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Module for loading various model files.
This module provides the classes that are used to load models used
by botocore. This can include:
* Service models (e.g. the model for EC2, S3, DynamoDB, etc.)
* Service model extras which customize the service models
* Other models associated with a service (pagination, waiters)
* Non service-specific config (Endpoint data, retry config)
Loading a module is broken down into several steps:
* Determining the path to load
* Search the data_path for files to load
* The mechanics of loading the file
* Searching for extras and applying them to the loaded file
The last item is used so that other faster loading mechanism
besides the default JSON loader can be used.
The Search Path
===============
Similar to how the PATH environment variable is to finding executables
and the PYTHONPATH environment variable is to finding python modules
to import, the botocore loaders have the concept of a data path exposed
through AWS_DATA_PATH.
This enables end users to provide additional search paths where we
will attempt to load models outside of the models we ship with
botocore. When you create a ``Loader``, there are two paths
automatically added to the model search path:
* <botocore root>/data/
* ~/.aws/models
The first value is the path where all the model files shipped with
botocore are located.
The second path is so that users can just drop new model files in
``~/.aws/models`` without having to mess around with the AWS_DATA_PATH.
The AWS_DATA_PATH using the platform specific path separator to
separate entries (typically ``:`` on linux and ``;`` on windows).
Directory Layout
================
The Loader expects a particular directory layout. In order for any
directory specified in AWS_DATA_PATH to be considered, it must have
this structure for service models::
<root>
|
|-- servicename1
| |-- 2012-10-25
| |-- service-2.json
|-- ec2
| |-- 2014-01-01
| | |-- paginators-1.json
| | |-- service-2.json
| | |-- waiters-2.json
| |-- 2015-03-01
| |-- paginators-1.json
| |-- service-2.json
| |-- waiters-2.json
| |-- service-2.sdk-extras.json
That is:
* The root directory contains sub directories that are the name
of the services.
* Within each service directory, there's a sub directory for each
available API version.
* Within each API version, there are model specific files, including
(but not limited to): service-2.json, waiters-2.json, paginators-1.json
The ``-1`` and ``-2`` suffix at the end of the model files denote which version
schema is used within the model. Even though this information is available in
the ``version`` key within the model, this version is also part of the filename
so that code does not need to load the JSON model in order to determine which
version to use.
The ``sdk-extras`` and similar files represent extra data that needs to be
applied to the model after it is loaded. Data in these files might represent
information that doesn't quite fit in the original models, but is still needed
for the sdk. For instance, additional operation parameters might be added here
which don't represent the actual service api.
"""
import os
import logging
from botocore import BOTOCORE_ROOT
from botocore.compat import json
from botocore.compat import OrderedDict
from botocore.exceptions import DataNotFoundError, UnknownServiceError
from botocore.utils import deep_merge
logger = logging.getLogger(__name__)
def instance_cache(func):
"""Cache the result of a method on a per instance basis.
This is not a general purpose caching decorator. In order
for this to be used, it must be used on methods on an
instance, and that instance *must* provide a
``self._cache`` dictionary.
"""
def _wrapper(self, *args, **kwargs):
key = (func.__name__,) + args
for pair in sorted(kwargs.items()):
key += pair
if key in self._cache:
return self._cache[key]
data = func(self, *args, **kwargs)
self._cache[key] = data
return data
return _wrapper
class JSONFileLoader(object):
"""Loader JSON files.
This class can load the default format of models, which is a JSON file.
"""
def exists(self, file_path):
"""Checks if the file exists.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: True if file path exists, False otherwise.
"""
return os.path.isfile(file_path + '.json')
def load_file(self, file_path):
"""Attempt to load the file path.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: The loaded data if it exists, otherwise None.
"""
full_path = file_path + '.json'
if not os.path.isfile(full_path):
return
# By default the file will be opened with locale encoding on Python 3.
# We specify "utf8" here to ensure the correct behavior.
with open(full_path, 'rb') as fp:
payload = fp.read().decode('utf-8')
logger.debug("Loading JSON file: %s", full_path)
return json.loads(payload, object_pairs_hook=OrderedDict)
def create_loader(search_path_string=None):
"""Create a Loader class.
This factory function creates a loader given a search string path.
:type search_string_path: str
:param search_string_path: The AWS_DATA_PATH value. A string
of data path values separated by the ``os.path.pathsep`` value,
which is typically ``:`` on POSIX platforms and ``;`` on
windows.
:return: A ``Loader`` instance.
"""
if search_path_string is None:
return Loader()
paths = []
extra_paths = search_path_string.split(os.pathsep)
for path in extra_paths:
path = os.path.expanduser(os.path.expandvars(path))
paths.append(path)
return Loader(extra_search_paths=paths)
class Loader(object):
"""Find and load data models.
This class will handle searching for and loading data models.
The main method used here is ``load_service_model``, which is a
convenience method over ``load_data`` and ``determine_latest_version``.
"""
FILE_LOADER_CLASS = JSONFileLoader
# The included models in botocore/data/ that we ship with botocore.
BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data')
# For convenience we automatically add ~/.aws/models to the data path.
CUSTOMER_DATA_PATH = os.path.join(os.path.expanduser('~'),
'.aws', 'models')
BUILTIN_EXTRAS_TYPES = ['sdk']
def __init__(self, extra_search_paths=None, file_loader=None,
cache=None, include_default_search_paths=True,
include_default_extras=True):
self._cache = {}
if file_loader is None:
file_loader = self.FILE_LOADER_CLASS()
self.file_loader = file_loader
if extra_search_paths is not None:
self._search_paths = extra_search_paths
else:
self._search_paths = []
if include_default_search_paths:
self._search_paths.extend([self.CUSTOMER_DATA_PATH,
self.BUILTIN_DATA_PATH])
self._extras_types = []
if include_default_extras:
self._extras_types.extend(self.BUILTIN_EXTRAS_TYPES)
self._extras_processor = ExtrasProcessor()
@property
def search_paths(self):
return self._search_paths
@property
def extras_types(self):
return self._extras_types
@instance_cache
def list_available_services(self, type_name):
"""List all known services.
This will traverse the search path and look for all known
services.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the list of available services depends on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:return: A list of all services. The list of services will
be sorted.
"""
services = set()
for possible_path in self._potential_locations():
# Any directory in the search path is potentially a service.
# We'll collect any initial list of potential services,
# but we'll then need to further process these directories
# by searching for the corresponding type_name in each
# potential directory.
possible_services = [
d for d in os.listdir(possible_path)
if os.path.isdir(os.path.join(possible_path, d))]
for service_name in possible_services:
full_dirname = os.path.join(possible_path, service_name)
api_versions = os.listdir(full_dirname)
for api_version in api_versions:
full_load_path = os.path.join(full_dirname,
api_version,
type_name)
if self.file_loader.exists(full_load_path):
services.add(service_name)
break
return sorted(services)
@instance_cache
def determine_latest_version(self, service_name, type_name):
"""Find the latest API version available for a service.
:type service_name: str
:param service_name: The name of the service.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the latest API version available can depend on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:rtype: str
:return: The latest API version. If the service does not exist
or does not have any available API data, then a
``DataNotFoundError`` exception will be raised.
"""
return max(self.list_api_versions(service_name, type_name))
@instance_cache
def list_api_versions(self, service_name, type_name):
"""List all API versions available for a particular service type
:type service_name: str
:param service_name: The name of the service
:type type_name: str
:param type_name: The type name for the service (i.e service-2,
paginators-1, etc.)
:rtype: list
:return: A list of API version strings in sorted order.
"""
known_api_versions = set()
for possible_path in self._potential_locations(service_name,
must_exist=True,
is_dir=True):
for dirname in os.listdir(possible_path):
full_path = os.path.join(possible_path, dirname, type_name)
# Only add to the known_api_versions if the directory
# contains a service-2, paginators-1, etc. file corresponding
# to the type_name passed in.
if self.file_loader.exists(full_path):
known_api_versions.add(dirname)
if not known_api_versions:
raise DataNotFoundError(data_path=service_name)
return sorted(known_api_versions)
@instance_cache
def load_service_model(self, service_name, type_name, api_version=None):
"""Load a botocore service model
This is the main method for loading botocore models (e.g. a service
model, pagination configs, waiter configs, etc.).
:type service_name: str
:param service_name: The name of the service (e.g ``ec2``, ``s3``).
:type type_name: str
:param type_name: The model type. Valid types include, but are not
limited to: ``service-2``, ``paginators-1``, ``waiters-2``.
:type api_version: str
:param api_version: The API version to load. If this is not
provided, then the latest API version will be used.
:type load_extras: bool
:param load_extras: Whether or not to load the tool extras which
contain additional data to be added to the model.
:raises: UnknownServiceError if there is no known service with
the provided service_name.
:raises: DataNotFoundError if no data could be found for the
service_name/type_name/api_version.
:return: The loaded data, as a python type (e.g. dict, list, etc).
"""
# Wrapper around the load_data. This will calculate the path
# to call load_data with.
known_services = self.list_available_services(type_name)
if service_name not in known_services:
raise UnknownServiceError(
service_name=service_name,
known_service_names=', '.join(sorted(known_services)))
if api_version is None:
api_version = self.determine_latest_version(
service_name, type_name)
full_path = os.path.join(service_name, api_version, type_name)
model = self.load_data(full_path)
# Load in all the extras
extras_data = self._find_extras(service_name, type_name, api_version)
self._extras_processor.process(model, extras_data)
return model
def _find_extras(self, service_name, type_name, api_version):
"""Creates an iterator over all the extras data."""
for extras_type in self.extras_types:
extras_name = '%s.%s-extras' % (type_name, extras_type)
full_path = os.path.join(service_name, api_version, extras_name)
try:
yield self.load_data(full_path)
except DataNotFoundError:
pass
@instance_cache
def load_data(self, name):
"""Load data given a data path.
This is a low level method that will search through the various
search paths until it's able to load a value. This is typically
only needed to load *non* model files (such as _endpoints and
_retry). If you need to load model files, you should prefer
``load_service_model``.
:type name: str
:param name: The data path, i.e ``ec2/2015-03-01/service-2``.
:return: The loaded data. If no data could be found then
a DataNotFoundError is raised.
"""
for possible_path in self._potential_locations(name):
found = self.file_loader.load_file(possible_path)
if found is not None:
return found
# We didn't find anything that matched on any path.
raise DataNotFoundError(data_path=name)
def _potential_locations(self, name=None, must_exist=False,
is_dir=False):
# Will give an iterator over the full path of potential locations
# according to the search path.
for path in self.search_paths:
if os.path.isdir(path):
full_path = path
if name is not None:
full_path = os.path.join(path, name)
if not must_exist:
yield full_path
else:
if is_dir and os.path.isdir(full_path):
yield full_path
elif os.path.exists(full_path):
yield full_path
class ExtrasProcessor(object):
"""Processes data from extras files into service models."""
def process(self, original_model, extra_models):
"""Processes data from a list of loaded extras files into a model
:type original_model: dict
:param original_model: The service model to load all the extras into.
:type extra_models: iterable of dict
:param extra_models: A list of loaded extras models.
"""
for extras in extra_models:
self._process(original_model, extras)
def _process(self, model, extra_model):
"""Process a single extras model into a service model."""
if 'merge' in extra_model:
deep_merge(model, extra_model['merge'])
| 17,355 | Python | 36.567099 | 79 | 0.623855 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/stub.py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from collections import deque
from pprint import pformat
from botocore.validate import validate_parameters
from botocore.exceptions import ParamValidationError, \
StubResponseError, StubAssertionError, UnStubbedResponseError
from botocore.awsrequest import AWSResponse
class _ANY(object):
"""
A helper object that compares equal to everything. Copied from
unittest.mock
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
class Stubber(object):
"""
This class will allow you to stub out requests so you don't have to hit
an endpoint to write tests. Responses are returned first in, first out.
If operations are called out of order, or are called with no remaining
queued responses, an error will be raised.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': 'test-bucket'}
stubber.add_response('list_objects', response, expected_params)
stubber.activate()
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
This class can also be called as a context manager, which will handle
activation / deactivation for you.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
response = {
"Owner": {
"ID": "foo",
"DisplayName": "bar"
},
"Buckets": [{
"CreationDate": datetime.datetime(2016, 1, 20, 22, 9),
"Name": "baz"
}]
}
with Stubber(s3) as stubber:
stubber.add_response('list_buckets', response, {})
service_response = s3.list_buckets()
assert service_response == response
If you have an input parameter that is a randomly generated value, or you
otherwise don't care about its value, you can use ``stub.ANY`` to ignore
it in validation.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber, ANY
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': ANY}
stubber.add_response('list_objects', response, expected_params)
with stubber:
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
"""
def __init__(self, client):
"""
:param client: The client to add your stubs to.
"""
self.client = client
self._event_id = 'boto_stubber'
self._expected_params_event_id = 'boto_stubber_expected_params'
self._queue = deque()
def __enter__(self):
self.activate()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deactivate()
def activate(self):
"""
Activates the stubber on the client
"""
self.client.meta.events.register_first(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.register(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def deactivate(self):
"""
Deactivates the stubber on the client
"""
self.client.meta.events.unregister(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.unregister(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def add_response(self, method, service_response, expected_params=None):
"""
Adds a service response to the response queue. This will be validated
against the service model to ensure correctness. It should be noted,
however, that while missing attributes are often considered correct,
your code may not function properly if you leave them out. Therefore
you should always fill in every value you see in a typical response for
your particular request.
:param method: The name of the client method to stub.
:type method: str
:param service_response: A dict response stub. Provided parameters will
be validated against the service model.
:type service_response: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation. stub.ANY is only valid for top level params.
"""
self._add_response(method, service_response, expected_params)
def _add_response(self, method, service_response, expected_params):
if not hasattr(self.client, method):
raise ValueError(
"Client %s does not have method: %s"
% (self.client.meta.service_model.service_name, method))
# Create a successful http response
http_response = AWSResponse(None, 200, {}, None)
operation_name = self.client.meta.method_to_api_mapping.get(method)
self._validate_response(operation_name, service_response)
# Add the service_response to the queue for returning responses
response = {
'operation_name': operation_name,
'response': (http_response, service_response),
'expected_params': expected_params
}
self._queue.append(response)
def add_client_error(self, method, service_error_code='',
service_message='', http_status_code=400,
service_error_meta=None, expected_params=None,
response_meta=None):
"""
Adds a ``ClientError`` to the response queue.
:param method: The name of the service method to return the error on.
:type method: str
:param service_error_code: The service error code to return,
e.g. ``NoSuchBucket``
:type service_error_code: str
:param service_message: The service message to return, e.g.
'The specified bucket does not exist.'
:type service_message: str
:param http_status_code: The HTTP status code to return, e.g. 404, etc
:type http_status_code: int
:param service_error_meta: Additional keys to be added to the
service Error
:type service_error_meta: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation.
:param response_meta: Additional keys to be added to the
response's ResponseMetadata
:type response_meta: dict
"""
http_response = AWSResponse(None, http_status_code, {}, None)
# We don't look to the model to build this because the caller would
# need to know the details of what the HTTP body would need to
# look like.
parsed_response = {
'ResponseMetadata': {'HTTPStatusCode': http_status_code},
'Error': {
'Message': service_message,
'Code': service_error_code
}
}
if service_error_meta is not None:
parsed_response['Error'].update(service_error_meta)
if response_meta is not None:
parsed_response['ResponseMetadata'].update(response_meta)
operation_name = self.client.meta.method_to_api_mapping.get(method)
# Note that we do not allow for expected_params while
# adding errors into the queue yet.
response = {
'operation_name': operation_name,
'response': (http_response, parsed_response),
'expected_params': expected_params,
}
self._queue.append(response)
def assert_no_pending_responses(self):
"""
Asserts that all expected calls were made.
"""
remaining = len(self._queue)
if remaining != 0:
raise AssertionError(
"%d responses remaining in queue." % remaining)
def _assert_expected_call_order(self, model, params):
if not self._queue:
raise UnStubbedResponseError(
operation_name=model.name,
reason=(
'Unexpected API Call: A call was made but no additional calls expected. '
'Either the API Call was not stubbed or it was called multiple times.'
)
)
name = self._queue[0]['operation_name']
if name != model.name:
raise StubResponseError(
operation_name=model.name,
reason='Operation mismatch: found response for %s.' % name)
def _get_response_handler(self, model, params, context, **kwargs):
self._assert_expected_call_order(model, params)
# Pop off the entire response once everything has been validated
return self._queue.popleft()['response']
def _assert_expected_params(self, model, params, context, **kwargs):
if self._should_not_stub(context):
return
self._assert_expected_call_order(model, params)
expected_params = self._queue[0]['expected_params']
if expected_params is None:
return
# Validate the parameters are equal
for param, value in expected_params.items():
if param not in params or expected_params[param] != params[param]:
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
# Ensure there are no extra params hanging around
if sorted(expected_params.keys()) != sorted(params.keys()):
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
def _should_not_stub(self, context):
# Do not include presign requests when processing stubbed client calls
# as a presign request will never have an HTTP request sent over the
# wire for it and therefore not receive a response back.
if context and context.get('is_presign_request'):
return True
def _validate_response(self, operation_name, service_response):
service_model = self.client.meta.service_model
operation_model = service_model.operation_model(operation_name)
output_shape = operation_model.output_shape
# Remove ResponseMetadata so that the validator doesn't attempt to
# perform validation on it.
response = service_response
if 'ResponseMetadata' in response:
response = copy.copy(service_response)
del response['ResponseMetadata']
if output_shape is not None:
validate_parameters(response, output_shape)
elif response:
# If the output shape is None, that means the response should be
# empty apart from ResponseMetadata
raise ParamValidationError(
report=(
"Service response should only contain ResponseMetadata."))
| 14,361 | Python | 35.359494 | 97 | 0.590906 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/paginate.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from itertools import tee
from botocore.compat import six
import jmespath
import json
import base64
import logging
from botocore.exceptions import PaginationError
from botocore.compat import zip
from botocore.utils import set_value_from_jmespath, merge_dicts
log = logging.getLogger(__name__)
class TokenEncoder(object):
"""Encodes dictionaries into opaque strings.
This for the most part json dumps + base64 encoding, but also supports
having bytes in the dictionary in addition to the types that json can
handle by default.
This is intended for use in encoding pagination tokens, which in some
cases can be complex structures and / or contain bytes.
"""
def encode(self, token):
"""Encodes a dictionary to an opaque string.
:type token: dict
:param token: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
:rtype: str
:returns: An opaque string
"""
try:
# Try just using json dumps first to avoid having to traverse
# and encode the dict. In 99.9999% of cases this will work.
json_string = json.dumps(token)
except (TypeError, UnicodeDecodeError):
# If normal dumping failed, go through and base64 encode all bytes.
encoded_token, encoded_keys = self._encode(token, [])
# Save the list of all the encoded key paths. We can safely
# assume that no service will ever use this key.
encoded_token['boto_encoded_keys'] = encoded_keys
# Now that the bytes are all encoded, dump the json.
json_string = json.dumps(encoded_token)
# base64 encode the json string to produce an opaque token string.
return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
def _encode(self, data, path):
"""Encode bytes in given data, keeping track of the path traversed."""
if isinstance(data, dict):
return self._encode_dict(data, path)
elif isinstance(data, list):
return self._encode_list(data, path)
elif isinstance(data, six.binary_type):
return self._encode_bytes(data, path)
else:
return data, []
def _encode_list(self, data, path):
"""Encode any bytes in a list, noting the index of what is encoded."""
new_data = []
encoded = []
for i, value in enumerate(data):
new_path = path + [i]
new_value, new_encoded = self._encode(value, new_path)
new_data.append(new_value)
encoded.extend(new_encoded)
return new_data, encoded
def _encode_dict(self, data, path):
"""Encode any bytes in a dict, noting the index of what is encoded."""
new_data = {}
encoded = []
for key, value in data.items():
new_path = path + [key]
new_value, new_encoded = self._encode(value, new_path)
new_data[key] = new_value
encoded.extend(new_encoded)
return new_data, encoded
def _encode_bytes(self, data, path):
"""Base64 encode a byte string."""
return base64.b64encode(data).decode('utf-8'), [path]
class TokenDecoder(object):
"""Decodes token strings back into dictionaries.
This performs the inverse operation to the TokenEncoder, accepting
opaque strings and decoding them into a useable form.
"""
def decode(self, token):
"""Decodes an opaque string to a dictionary.
:type token: str
:param token: A token string given by the botocore pagination
interface.
:rtype: dict
:returns: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
"""
json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')
decoded_token = json.loads(json_string)
# Remove the encoding metadata as it is read since it will no longer
# be needed.
encoded_keys = decoded_token.pop('boto_encoded_keys', None)
if encoded_keys is None:
return decoded_token
else:
return self._decode(decoded_token, encoded_keys)
def _decode(self, token, encoded_keys):
"""Find each encoded value and decode it."""
for key in encoded_keys:
encoded = self._path_get(token, key)
decoded = base64.b64decode(encoded.encode('utf-8'))
self._path_set(token, key, decoded)
return token
def _path_get(self, data, path):
"""Return the nested data at the given path.
For instance:
data = {'foo': ['bar', 'baz']}
path = ['foo', 0]
==> 'bar'
"""
# jmespath isn't used here because it would be difficult to actually
# create the jmespath query when taking all of the unknowns of key
# structure into account. Gross though this is, it is simple and not
# very error prone.
d = data
for step in path:
d = d[step]
return d
def _path_set(self, data, path, value):
"""Set the value of a key in the given data.
Example:
data = {'foo': ['bar', 'baz']}
path = ['foo', 1]
value = 'bin'
==> data = {'foo': ['bar', 'bin']}
"""
container = self._path_get(data, path[:-1])
container[path[-1]] = value
class PaginatorModel(object):
def __init__(self, paginator_config):
self._paginator_config = paginator_config['pagination']
def get_paginator(self, operation_name):
try:
single_paginator_config = self._paginator_config[operation_name]
except KeyError:
raise ValueError("Paginator for operation does not exist: %s"
% operation_name)
return single_paginator_config
class PageIterator(object):
def __init__(self, method, input_token, output_token, more_results,
result_keys, non_aggregate_keys, limit_key, max_items,
starting_token, page_size, op_kwargs):
self._method = method
self._input_token = input_token
self._output_token = output_token
self._more_results = more_results
self._result_keys = result_keys
self._max_items = max_items
self._limit_key = limit_key
self._starting_token = starting_token
self._page_size = page_size
self._op_kwargs = op_kwargs
self._resume_token = None
self._non_aggregate_key_exprs = non_aggregate_keys
self._non_aggregate_part = {}
self._token_encoder = TokenEncoder()
self._token_decoder = TokenDecoder()
@property
def result_keys(self):
return self._result_keys
@property
def resume_token(self):
"""Token to specify to resume pagination."""
return self._resume_token
@resume_token.setter
def resume_token(self, value):
if not isinstance(value, dict):
raise ValueError("Bad starting token: %s" % value)
if 'boto_truncate_amount' in value:
token_keys = sorted(self._input_token + ['boto_truncate_amount'])
else:
token_keys = sorted(self._input_token)
dict_keys = sorted(value.keys())
if token_keys == dict_keys:
self._resume_token = self._token_encoder.encode(value)
else:
raise ValueError("Bad starting token: %s" % value)
@property
def non_aggregate_part(self):
return self._non_aggregate_part
def __iter__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def search(self, expression):
"""Applies a JMESPath expression to a paginator
Each page of results is searched using the provided JMESPath
expression. If the result is not a list, it is yielded
directly. If the result is a list, each element in the result
is yielded individually (essentially implementing a flatmap in
which the JMESPath search is the mapping function).
:type expression: str
:param expression: JMESPath expression to apply to each page.
:return: Returns an iterator that yields the individual
elements of applying a JMESPath expression to each page of
results.
"""
compiled = jmespath.compile(expression)
for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
# Yield result directly if it is not a list.
yield results
def _make_request(self, current_kwargs):
return self._method(**current_kwargs)
def _extract_parsed_response(self, response):
return response
def _record_non_aggregate_key_values(self, response):
non_aggregate_keys = {}
for expression in self._non_aggregate_key_exprs:
result = expression.search(response)
set_value_from_jmespath(non_aggregate_keys,
expression.expression,
result)
self._non_aggregate_part = non_aggregate_keys
def _inject_starting_params(self, op_kwargs):
# If the user has specified a starting token we need to
# inject that into the operation's kwargs.
if self._starting_token is not None:
# Don't need to do anything special if there is no starting
# token specified.
next_token = self._parse_starting_token()[0]
self._inject_token_into_kwargs(op_kwargs, next_token)
if self._page_size is not None:
# Pass the page size as the parameter name for limiting
# page size, also known as the limit_key.
op_kwargs[self._limit_key] = self._page_size
def _inject_token_into_kwargs(self, op_kwargs, next_token):
for name, token in next_token.items():
if (token is not None) and (token != 'None'):
op_kwargs[name] = token
elif name in op_kwargs:
del op_kwargs[name]
def _handle_first_request(self, parsed, primary_result_key,
starting_truncation):
# If the payload is an array or string, we need to slice into it
# and only return the truncated amount.
starting_truncation = self._parse_starting_token()[1]
all_data = primary_result_key.search(parsed)
if isinstance(all_data, (list, six.string_types)):
data = all_data[starting_truncation:]
else:
data = None
set_value_from_jmespath(
parsed,
primary_result_key.expression,
data
)
# We also need to truncate any secondary result keys
# because they were not truncated in the previous last
# response.
for token in self.result_keys:
if token == primary_result_key:
continue
sample = token.search(parsed)
if isinstance(sample, list):
empty_value = []
elif isinstance(sample, six.string_types):
empty_value = ''
elif isinstance(sample, (int, float)):
empty_value = 0
else:
empty_value = None
set_value_from_jmespath(parsed, token.expression, empty_value)
return starting_truncation
def _truncate_response(self, parsed, primary_result_key, truncate_amount,
starting_truncation, next_token):
original = primary_result_key.search(parsed)
if original is None:
original = []
amount_to_keep = len(original) - truncate_amount
truncated = original[:amount_to_keep]
set_value_from_jmespath(
parsed,
primary_result_key.expression,
truncated
)
# The issue here is that even though we know how much we've truncated
# we need to account for this globally including any starting
# left truncation. For example:
# Raw response: [0,1,2,3]
# Starting index: 1
# Max items: 1
# Starting left truncation: [1, 2, 3]
# End right truncation for max items: [1]
# However, even though we only kept 1, this is post
# left truncation so the next starting index should be 2, not 1
# (left_truncation + amount_to_keep).
next_token['boto_truncate_amount'] = \
amount_to_keep + starting_truncation
self.resume_token = next_token
def _get_next_token(self, parsed):
if self._more_results is not None:
if not self._more_results.search(parsed):
return {}
next_tokens = {}
for output_token, input_key in \
zip(self._output_token, self._input_token):
next_token = output_token.search(parsed)
# We do not want to include any empty strings as actual tokens.
# Treat them as None.
if next_token:
next_tokens[input_key] = next_token
else:
next_tokens[input_key] = None
return next_tokens
def result_key_iters(self):
teed_results = tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
def build_full_result(self):
complete_result = {}
for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
def _parse_starting_token(self):
if self._starting_token is None:
return None
# The starting token is a dict passed as a base64 encoded string.
next_token = self._starting_token
try:
next_token = self._token_decoder.decode(next_token)
index = 0
if 'boto_truncate_amount' in next_token:
index = next_token.get('boto_truncate_amount')
del next_token['boto_truncate_amount']
except (ValueError, TypeError):
next_token, index = self._parse_starting_token_deprecated()
return next_token, index
def _parse_starting_token_deprecated(self):
"""
This handles parsing of old style starting tokens, and attempts to
coerce them into the new style.
"""
log.debug("Attempting to fall back to old starting token parser. For "
"token: %s" % self._starting_token)
if self._starting_token is None:
return None
parts = self._starting_token.split('___')
next_token = []
index = 0
if len(parts) == len(self._input_token) + 1:
try:
index = int(parts.pop())
except ValueError:
# This doesn't look like a valid old-style token, so we're
# passing it along as an opaque service token.
parts = [self._starting_token]
for part in parts:
if part == 'None':
next_token.append(None)
else:
next_token.append(part)
return self._convert_deprecated_starting_token(next_token), index
def _convert_deprecated_starting_token(self, deprecated_token):
"""
This attempts to convert a deprecated starting token into the new
style.
"""
len_deprecated_token = len(deprecated_token)
len_input_token = len(self._input_token)
if len_deprecated_token > len_input_token:
raise ValueError("Bad starting token: %s" % self._starting_token)
elif len_deprecated_token < len_input_token:
log.debug("Old format starting token does not contain all input "
"tokens. Setting the rest, in order, as None.")
for i in range(len_input_token - len_deprecated_token):
deprecated_token.append(None)
return dict(zip(self._input_token, deprecated_token))
class Paginator(object):
PAGE_ITERATOR_CLS = PageIterator
def __init__(self, method, pagination_config, model):
self._model = model
self._method = method
self._pagination_cfg = pagination_config
self._output_token = self._get_output_tokens(self._pagination_cfg)
self._input_token = self._get_input_tokens(self._pagination_cfg)
self._more_results = self._get_more_results_token(self._pagination_cfg)
self._non_aggregate_keys = self._get_non_aggregate_keys(
self._pagination_cfg)
self._result_keys = self._get_result_keys(self._pagination_cfg)
self._limit_key = self._get_limit_key(self._pagination_cfg)
@property
def result_keys(self):
return self._result_keys
def _get_non_aggregate_keys(self, config):
keys = []
for key in config.get('non_aggregate_keys', []):
keys.append(jmespath.compile(key))
return keys
def _get_output_tokens(self, config):
output = []
output_token = config['output_token']
if not isinstance(output_token, list):
output_token = [output_token]
for config in output_token:
output.append(jmespath.compile(config))
return output
def _get_input_tokens(self, config):
input_token = self._pagination_cfg['input_token']
if not isinstance(input_token, list):
input_token = [input_token]
return input_token
def _get_more_results_token(self, config):
more_results = config.get('more_results')
if more_results is not None:
return jmespath.compile(more_results)
def _get_result_keys(self, config):
result_key = config.get('result_key')
if result_key is not None:
if not isinstance(result_key, list):
result_key = [result_key]
result_key = [jmespath.compile(rk) for rk in result_key]
return result_key
def _get_limit_key(self, config):
return config.get('limit_key')
def paginate(self, **kwargs):
"""Create paginator object for an operation.
This returns an iterable object. Iterating over
this object will yield a single page of a response
at a time.
"""
page_params = self._extract_paging_params(kwargs)
return self.PAGE_ITERATOR_CLS(
self._method, self._input_token,
self._output_token, self._more_results,
self._result_keys, self._non_aggregate_keys,
self._limit_key,
page_params['MaxItems'],
page_params['StartingToken'],
page_params['PageSize'],
kwargs)
def _extract_paging_params(self, kwargs):
pagination_config = kwargs.pop('PaginationConfig', {})
max_items = pagination_config.get('MaxItems', None)
if max_items is not None:
max_items = int(max_items)
page_size = pagination_config.get('PageSize', None)
if page_size is not None:
if self._limit_key is None:
raise PaginationError(
message="PageSize parameter is not supported for the "
"pagination interface for this operation.")
input_members = self._model.input_shape.members
limit_key_shape = input_members.get(self._limit_key)
if limit_key_shape.type_name == 'string':
if not isinstance(page_size, six.string_types):
page_size = str(page_size)
else:
page_size = int(page_size)
return {
'MaxItems': max_items,
'StartingToken': pagination_config.get('StartingToken', None),
'PageSize': page_size,
}
class ResultKeyIterator(object):
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __iter__(self):
for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
| 27,128 | Python | 39.013274 | 79 | 0.585373 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/eventstream.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Binary Event Stream Decoding """
from binascii import crc32
from struct import unpack
from botocore.exceptions import EventStreamError
# byte length of the prelude (total_length + header_length + prelude_crc)
_PRELUDE_LENGTH = 12
_MAX_HEADERS_LENGTH = 128 * 1024 # 128 Kb
_MAX_PAYLOAD_LENGTH = 16 * 1024 ** 2 # 16 Mb
class ParserError(Exception):
"""Base binary flow encoding parsing exception. """
pass
class DuplicateHeader(ParserError):
"""Duplicate header found in the event. """
def __init__(self, header):
message = 'Duplicate header present: "%s"' % header
super(DuplicateHeader, self).__init__(message)
class InvalidHeadersLength(ParserError):
"""Headers length is longer than the maximum. """
def __init__(self, length):
message = 'Header length of %s exceeded the maximum of %s' % (
length, _MAX_HEADERS_LENGTH
)
super(InvalidHeadersLength, self).__init__(message)
class InvalidPayloadLength(ParserError):
"""Payload length is longer than the maximum. """
def __init__(self, length):
message = 'Payload length of %s exceeded the maximum of %s' % (
length, _MAX_PAYLOAD_LENGTH
)
super(InvalidPayloadLength, self).__init__(message)
class ChecksumMismatch(ParserError):
"""Calculated checksum did not match the expected checksum. """
def __init__(self, expected, calculated):
message = 'Checksum mismatch: expected 0x%08x, calculated 0x%08x' % (
expected, calculated
)
super(ChecksumMismatch, self).__init__(message)
class NoInitialResponseError(ParserError):
"""An event of type initial-response was not received.
This exception is raised when the event stream produced no events or
the first event in the stream was not of the initial-response type.
"""
def __init__(self):
message = 'First event was not of the initial-response type'
super(NoInitialResponseError, self).__init__(message)
class DecodeUtils(object):
"""Unpacking utility functions used in the decoder.
All methods on this class take raw bytes and return a tuple containing
the value parsed from the bytes and the number of bytes consumed to parse
that value.
"""
UINT8_BYTE_FORMAT = '!B'
UINT16_BYTE_FORMAT = '!H'
UINT32_BYTE_FORMAT = '!I'
INT8_BYTE_FORMAT = '!b'
INT16_BYTE_FORMAT = '!h'
INT32_BYTE_FORMAT = '!i'
INT64_BYTE_FORMAT = '!q'
PRELUDE_BYTE_FORMAT = '!III'
# uint byte size to unpack format
UINT_BYTE_FORMAT = {
1: UINT8_BYTE_FORMAT,
2: UINT16_BYTE_FORMAT,
4: UINT32_BYTE_FORMAT,
}
@staticmethod
def unpack_true(data):
"""This method consumes none of the provided bytes and returns True.
:type data: bytes
:param data: The bytes to parse from. This is ignored in this method.
:rtype: tuple
:rtype: (bool, int)
:returns: The tuple (True, 0)
"""
return True, 0
@staticmethod
def unpack_false(data):
"""This method consumes none of the provided bytes and returns False.
:type data: bytes
:param data: The bytes to parse from. This is ignored in this method.
:rtype: tuple
:rtype: (bool, int)
:returns: The tuple (False, 0)
"""
return False, 0
@staticmethod
def unpack_uint8(data):
"""Parse an unsigned 8-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.UINT8_BYTE_FORMAT, data[:1])[0]
return value, 1
@staticmethod
def unpack_uint32(data):
"""Parse an unsigned 32-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.UINT32_BYTE_FORMAT, data[:4])[0]
return value, 4
@staticmethod
def unpack_int8(data):
"""Parse a signed 8-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT8_BYTE_FORMAT, data[:1])[0]
return value, 1
@staticmethod
def unpack_int16(data):
"""Parse a signed 16-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: tuple
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT16_BYTE_FORMAT, data[:2])[0]
return value, 2
@staticmethod
def unpack_int32(data):
"""Parse a signed 32-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: tuple
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT32_BYTE_FORMAT, data[:4])[0]
return value, 4
@staticmethod
def unpack_int64(data):
"""Parse a signed 64-bit integer from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: tuple
:rtype: (int, int)
:returns: A tuple containing the (parsed integer value, bytes consumed)
"""
value = unpack(DecodeUtils.INT64_BYTE_FORMAT, data[:8])[0]
return value, 8
@staticmethod
def unpack_byte_array(data, length_byte_size=2):
"""Parse a variable length byte array from the bytes.
The bytes are expected to be in the following format:
[ length ][0 ... length bytes]
where length is an unsigned integer represented in the smallest number
of bytes to hold the maximum length of the array.
:type data: bytes
:param data: The bytes to parse from.
:type length_byte_size: int
:param length_byte_size: The byte size of the preceeding integer that
represents the length of the array. Supported values are 1, 2, and 4.
:rtype: (bytes, int)
:returns: A tuple containing the (parsed byte array, bytes consumed).
"""
uint_byte_format = DecodeUtils.UINT_BYTE_FORMAT[length_byte_size]
length = unpack(uint_byte_format, data[:length_byte_size])[0]
bytes_end = length + length_byte_size
array_bytes = data[length_byte_size:bytes_end]
return array_bytes, bytes_end
@staticmethod
def unpack_utf8_string(data, length_byte_size=2):
"""Parse a variable length utf-8 string from the bytes.
The bytes are expected to be in the following format:
[ length ][0 ... length bytes]
where length is an unsigned integer represented in the smallest number
of bytes to hold the maximum length of the array and the following
bytes are a valid utf-8 string.
:type data: bytes
:param bytes: The bytes to parse from.
:type length_byte_size: int
:param length_byte_size: The byte size of the preceeding integer that
represents the length of the array. Supported values are 1, 2, and 4.
:rtype: (str, int)
:returns: A tuple containing the (utf-8 string, bytes consumed).
"""
array_bytes, consumed = DecodeUtils.unpack_byte_array(
data, length_byte_size)
return array_bytes.decode('utf-8'), consumed
@staticmethod
def unpack_uuid(data):
"""Parse a 16-byte uuid from the bytes.
:type data: bytes
:param data: The bytes to parse from.
:rtype: (bytes, int)
:returns: A tuple containing the (uuid bytes, bytes consumed).
"""
return data[:16], 16
@staticmethod
def unpack_prelude(data):
"""Parse the prelude for an event stream message from the bytes.
The prelude for an event stream message has the following format:
[total_length][header_length][prelude_crc]
where each field is an unsigned 32-bit integer.
:rtype: ((int, int, int), int)
:returns: A tuple of ((total_length, headers_length, prelude_crc),
consumed)
"""
return (unpack(DecodeUtils.PRELUDE_BYTE_FORMAT, data), _PRELUDE_LENGTH)
def _validate_checksum(data, checksum, crc=0):
# To generate the same numeric value across all Python versions and
# platforms use crc32(data) & 0xffffffff.
computed_checksum = crc32(data, crc) & 0xFFFFFFFF
if checksum != computed_checksum:
raise ChecksumMismatch(checksum, computed_checksum)
class MessagePrelude(object):
"""Represents the prelude of an event stream message. """
def __init__(self, total_length, headers_length, crc):
self.total_length = total_length
self.headers_length = headers_length
self.crc = crc
@property
def payload_length(self):
"""Calculates the total payload length.
The extra minus 4 bytes is for the message CRC.
:rtype: int
:returns: The total payload length.
"""
return self.total_length - self.headers_length - _PRELUDE_LENGTH - 4
@property
def payload_end(self):
"""Calculates the byte offset for the end of the message payload.
The extra minus 4 bytes is for the message CRC.
:rtype: int
:returns: The byte offset from the beginning of the event stream
message to the end of the payload.
"""
return self.total_length - 4
@property
def headers_end(self):
"""Calculates the byte offset for the end of the message headers.
:rtype: int
:returns: The byte offset from the beginning of the event stream
message to the end of the headers.
"""
return _PRELUDE_LENGTH + self.headers_length
class EventStreamMessage(object):
"""Represents an event stream message. """
def __init__(self, prelude, headers, payload, crc):
self.prelude = prelude
self.headers = headers
self.payload = payload
self.crc = crc
def to_response_dict(self, status_code=200):
message_type = self.headers.get(':message-type')
if message_type == 'error' or message_type == 'exception':
status_code = 400
return {
'status_code': status_code,
'headers': self.headers,
'body': self.payload
}
class EventStreamHeaderParser(object):
""" Parses the event headers from an event stream message.
Expects all of the header data upfront and creates a dictionary of headers
to return. This object can be reused multiple times to parse the headers
from multiple event stream messages.
"""
# Maps header type to appropriate unpacking function
# These unpacking functions return the value and the amount unpacked
_HEADER_TYPE_MAP = {
# boolean_true
0: DecodeUtils.unpack_true,
# boolean_false
1: DecodeUtils.unpack_false,
# byte
2: DecodeUtils.unpack_int8,
# short
3: DecodeUtils.unpack_int16,
# integer
4: DecodeUtils.unpack_int32,
# long
5: DecodeUtils.unpack_int64,
# byte_array
6: DecodeUtils.unpack_byte_array,
# string
7: DecodeUtils.unpack_utf8_string,
# timestamp
8: DecodeUtils.unpack_int64,
# uuid
9: DecodeUtils.unpack_uuid,
}
def __init__(self):
self._data = None
def parse(self, data):
"""Parses the event stream headers from an event stream message.
:type data: bytes
:param data: The bytes that correspond to the headers section of an
event stream message.
:rtype: dict
:returns: A dicionary of header key, value pairs.
"""
self._data = data
return self._parse_headers()
def _parse_headers(self):
headers = {}
while self._data:
name, value = self._parse_header()
if name in headers:
raise DuplicateHeader(name)
headers[name] = value
return headers
def _parse_header(self):
name = self._parse_name()
value = self._parse_value()
return name, value
def _parse_name(self):
name, consumed = DecodeUtils.unpack_utf8_string(self._data, 1)
self._advance_data(consumed)
return name
def _parse_type(self):
type, consumed = DecodeUtils.unpack_uint8(self._data)
self._advance_data(consumed)
return type
def _parse_value(self):
header_type = self._parse_type()
value_unpacker = self._HEADER_TYPE_MAP[header_type]
value, consumed = value_unpacker(self._data)
self._advance_data(consumed)
return value
def _advance_data(self, consumed):
self._data = self._data[consumed:]
class EventStreamBuffer(object):
"""Streaming based event stream buffer
A buffer class that wraps bytes from an event stream providing parsed
messages as they become available via an iterable interface.
"""
def __init__(self):
self._data = b''
self._prelude = None
self._header_parser = EventStreamHeaderParser()
def add_data(self, data):
"""Add data to the buffer.
:type data: bytes
:param data: The bytes to add to the buffer to be used when parsing
"""
self._data += data
def _validate_prelude(self, prelude):
if prelude.headers_length > _MAX_HEADERS_LENGTH:
raise InvalidHeadersLength(prelude.headers_length)
if prelude.payload_length > _MAX_PAYLOAD_LENGTH:
raise InvalidPayloadLength(prelude.payload_length)
def _parse_prelude(self):
prelude_bytes = self._data[:_PRELUDE_LENGTH]
raw_prelude, _ = DecodeUtils.unpack_prelude(prelude_bytes)
prelude = MessagePrelude(*raw_prelude)
self._validate_prelude(prelude)
# The minus 4 removes the prelude crc from the bytes to be checked
_validate_checksum(prelude_bytes[:_PRELUDE_LENGTH-4], prelude.crc)
return prelude
def _parse_headers(self):
header_bytes = self._data[_PRELUDE_LENGTH:self._prelude.headers_end]
return self._header_parser.parse(header_bytes)
def _parse_payload(self):
prelude = self._prelude
payload_bytes = self._data[prelude.headers_end:prelude.payload_end]
return payload_bytes
def _parse_message_crc(self):
prelude = self._prelude
crc_bytes = self._data[prelude.payload_end:prelude.total_length]
message_crc, _ = DecodeUtils.unpack_uint32(crc_bytes)
return message_crc
def _parse_message_bytes(self):
# The minus 4 includes the prelude crc to the bytes to be checked
message_bytes = self._data[_PRELUDE_LENGTH-4:self._prelude.payload_end]
return message_bytes
def _validate_message_crc(self):
message_crc = self._parse_message_crc()
message_bytes = self._parse_message_bytes()
_validate_checksum(message_bytes, message_crc, crc=self._prelude.crc)
return message_crc
def _parse_message(self):
crc = self._validate_message_crc()
headers = self._parse_headers()
payload = self._parse_payload()
message = EventStreamMessage(self._prelude, headers, payload, crc)
self._prepare_for_next_message()
return message
def _prepare_for_next_message(self):
# Advance the data and reset the current prelude
self._data = self._data[self._prelude.total_length:]
self._prelude = None
def next(self):
"""Provides the next available message parsed from the stream
:rtype: EventStreamMessage
:returns: The next event stream message
"""
if len(self._data) < _PRELUDE_LENGTH:
raise StopIteration()
if self._prelude is None:
self._prelude = self._parse_prelude()
if len(self._data) < self._prelude.total_length:
raise StopIteration()
return self._parse_message()
def __next__(self):
return self.next()
def __iter__(self):
return self
class EventStream(object):
"""Wrapper class for an event stream body.
This wraps the underlying streaming body, parsing it for individual events
and yielding them as they come available through the iterator interface.
The following example uses the S3 select API to get structured data out of
an object stored in S3 using an event stream.
**Example:**
::
from botocore.session import Session
s3 = Session().create_client('s3')
response = s3.select_object_content(
Bucket='bucketname',
Key='keyname',
ExpressionType='SQL',
RequestProgress={'Enabled': True},
Expression="SELECT * FROM S3Object s",
InputSerialization={'CSV': {}},
OutputSerialization={'CSV': {}},
)
# This is the event stream in the response
event_stream = response['Payload']
end_event_received = False
with open('output', 'wb') as f:
# Iterate over events in the event stream as they come
for event in event_stream:
# If we received a records event, write the data to a file
if 'Records' in event:
data = event['Records']['Payload']
f.write(data)
# If we received a progress event, print the details
elif 'Progress' in event:
print(event['Progress']['Details'])
# End event indicates that the request finished successfully
elif 'End' in event:
print('Result is complete')
end_event_received = True
if not end_event_received:
raise Exception("End event not received, request incomplete.")
"""
def __init__(self, raw_stream, output_shape, parser, operation_name):
self._raw_stream = raw_stream
self._output_shape = output_shape
self._operation_name = operation_name
self._parser = parser
self._event_generator = self._create_raw_event_generator()
def __iter__(self):
for event in self._event_generator:
parsed_event = self._parse_event(event)
if parsed_event:
yield parsed_event
def _create_raw_event_generator(self):
event_stream_buffer = EventStreamBuffer()
for chunk in self._raw_stream.stream():
event_stream_buffer.add_data(chunk)
for event in event_stream_buffer:
yield event
def _parse_event(self, event):
response_dict = event.to_response_dict()
parsed_response = self._parser.parse(response_dict, self._output_shape)
if response_dict['status_code'] == 200:
return parsed_response
else:
raise EventStreamError(parsed_response, self._operation_name)
def get_initial_response(self):
try:
initial_event = next(self._event_generator)
event_type = initial_event.headers.get(':event-type')
if event_type == 'initial-response':
return initial_event
except StopIteration:
pass
raise NoInitialResponseError()
def close(self):
"""Closes the underlying streaming body. """
self._raw_stream.close()
| 20,517 | Python | 32.254457 | 79 | 0.61973 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/httpsession.py | import os.path
import logging
import socket
from base64 import b64encode
from urllib3 import PoolManager, ProxyManager, proxy_from_url, Timeout
from urllib3.util.retry import Retry
from urllib3.util.ssl_ import (
ssl, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION, DEFAULT_CIPHERS,
)
from urllib3.exceptions import SSLError as URLLib3SSLError
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
from urllib3.exceptions import ConnectTimeoutError as URLLib3ConnectTimeoutError
from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError
try:
# Always import the original SSLContext, even if it has been patched
from urllib3.contrib.pyopenssl import orig_util_SSLContext as SSLContext
except ImportError:
from urllib3.util.ssl_ import SSLContext
import botocore.awsrequest
from botocore.vendored import six
from botocore.vendored.six.moves.urllib_parse import unquote
from botocore.compat import filter_ssl_warnings, urlparse
from botocore.exceptions import (
ConnectionClosedError, EndpointConnectionError, HTTPClientError,
ReadTimeoutError, ProxyConnectionError, ConnectTimeoutError, SSLError,
InvalidProxiesConfigError
)
filter_ssl_warnings()
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem')
try:
from certifi import where
except ImportError:
def where():
return DEFAULT_CA_BUNDLE
def get_cert_path(verify):
if verify is not True:
return verify
cert_path = where()
logger.debug("Certificate path: {0}".format(cert_path))
return cert_path
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
""" This function is a vendored version of the same function in urllib3
We vendor this function to ensure that the SSL contexts we construct
always use the std lib SSLContext instead of pyopenssl.
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue urllib3#309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True):
# Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None:
# Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
class ProxyConfiguration(object):
"""Represents a proxy configuration dictionary and additional settings.
This class represents a proxy configuration dictionary and provides utility
functions to retreive well structured proxy urls and proxy headers from the
proxy configuration dictionary.
"""
def __init__(self, proxies=None, proxies_settings=None):
if proxies is None:
proxies = {}
if proxies_settings is None:
proxies_settings = {}
self._proxies = proxies
self._proxies_settings = proxies_settings
def proxy_url_for(self, url):
"""Retrieves the corresponding proxy url for a given url. """
parsed_url = urlparse(url)
proxy = self._proxies.get(parsed_url.scheme)
if proxy:
proxy = self._fix_proxy_url(proxy)
return proxy
def proxy_headers_for(self, proxy_url):
"""Retrieves the corresponding proxy headers for a given proxy url. """
headers = {}
username, password = self._get_auth_from_url(proxy_url)
if username and password:
basic_auth = self._construct_basic_auth(username, password)
headers['Proxy-Authorization'] = basic_auth
return headers
@property
def settings(self):
return self._proxies_settings
def _fix_proxy_url(self, proxy_url):
if proxy_url.startswith('http:') or proxy_url.startswith('https:'):
return proxy_url
elif proxy_url.startswith('//'):
return 'http:' + proxy_url
else:
return 'http://' + proxy_url
def _construct_basic_auth(self, username, password):
auth_str = '{0}:{1}'.format(username, password)
encoded_str = b64encode(auth_str.encode('ascii')).strip().decode()
return 'Basic {0}'.format(encoded_str)
def _get_auth_from_url(self, url):
parsed_url = urlparse(url)
try:
return unquote(parsed_url.username), unquote(parsed_url.password)
except (AttributeError, TypeError):
return None, None
class URLLib3Session(object):
"""A basic HTTP client that supports connection pooling and proxies.
This class is inspired by requests.adapters.HTTPAdapter, but has been
boiled down to meet the use cases needed by botocore. For the most part
this classes matches the functionality of HTTPAdapter in requests v2.7.0
(the same as our vendored version). The only major difference of note is
that we currently do not support sending chunked requests. While requests
v2.7.0 implemented this themselves, later version urllib3 support this
directly via a flag to urlopen so enabling it if needed should be trivial.
"""
def __init__(self,
verify=True,
proxies=None,
timeout=None,
max_pool_connections=MAX_POOL_CONNECTIONS,
socket_options=None,
client_cert=None,
proxies_config=None,
):
self._verify = verify
self._proxy_config = ProxyConfiguration(proxies=proxies,
proxies_settings=proxies_config)
self._pool_classes_by_scheme = {
'http': botocore.awsrequest.AWSHTTPConnectionPool,
'https': botocore.awsrequest.AWSHTTPSConnectionPool,
}
if timeout is None:
timeout = DEFAULT_TIMEOUT
if not isinstance(timeout, (int, float)):
timeout = Timeout(connect=timeout[0], read=timeout[1])
self._cert_file = None
self._key_file = None
if isinstance(client_cert, str):
self._cert_file = client_cert
elif isinstance(client_cert, tuple):
self._cert_file, self._key_file = client_cert
self._timeout = timeout
self._max_pool_connections = max_pool_connections
self._socket_options = socket_options
if socket_options is None:
self._socket_options = []
self._proxy_managers = {}
self._manager = PoolManager(**self._get_pool_manager_kwargs())
self._manager.pool_classes_by_scheme = self._pool_classes_by_scheme
@property
def _proxies_kwargs(self):
proxies_settings = self._proxy_config.settings
proxy_ssl_context = self._setup_proxy_ssl_context(proxies_settings)
proxies_kwargs = {
'proxy_ssl_context': proxy_ssl_context,
'use_forwarding_for_https': proxies_settings.get(
'proxy_use_forwarding_for_https'),
}
return {k: v for k, v in proxies_kwargs.items() if v is not None}
def _get_pool_manager_kwargs(self, **extra_kwargs):
pool_manager_kwargs = {
'strict': True,
'timeout': self._timeout,
'maxsize': self._max_pool_connections,
'ssl_context': self._get_ssl_context(),
'socket_options': self._socket_options,
'cert_file': self._cert_file,
'key_file': self._key_file,
}
pool_manager_kwargs.update(**extra_kwargs)
return pool_manager_kwargs
def _get_ssl_context(self):
return create_urllib3_context()
def _get_proxy_manager(self, proxy_url):
if proxy_url not in self._proxy_managers:
proxy_headers = self._proxy_config.proxy_headers_for(proxy_url)
proxy_manager_kwargs = self._get_pool_manager_kwargs(
proxy_headers=proxy_headers)
proxy_manager_kwargs.update(**self._proxies_kwargs)
proxy_manager = proxy_from_url(proxy_url, **proxy_manager_kwargs)
proxy_manager.pool_classes_by_scheme = self._pool_classes_by_scheme
self._proxy_managers[proxy_url] = proxy_manager
return self._proxy_managers[proxy_url]
def _path_url(self, url):
parsed_url = urlparse(url)
path = parsed_url.path
if not path:
path = '/'
if parsed_url.query:
path = path + '?' + parsed_url.query
return path
def _setup_ssl_cert(self, conn, url, verify):
if url.lower().startswith('https') and verify:
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = get_cert_path(verify)
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
def _setup_proxy_ssl_context(self, proxies_settings):
proxy_ca_bundle = proxies_settings.get('proxy_ca_bundle')
proxy_cert = proxies_settings.get('proxy_client_cert')
if proxy_ca_bundle is None and proxy_cert is None:
return None
context = self._get_ssl_context()
try:
# urllib3 disables this by default but we need
# it for proper proxy tls negotiation.
context.check_hostname = True
if proxy_ca_bundle is not None:
context.load_verify_locations(cafile=proxy_ca_bundle)
if isinstance(proxy_cert, tuple):
context.load_cert_chain(proxy_cert[0], keyfile=proxy_cert[1])
elif isinstance(proxy_cert, str):
context.load_cert_chain(proxy_cert)
return context
except (IOError, URLLib3SSLError) as e:
raise InvalidProxiesConfigError(error=e)
def _get_connection_manager(self, url, proxy_url=None):
if proxy_url:
manager = self._get_proxy_manager(proxy_url)
else:
manager = self._manager
return manager
def _get_request_target(self, url, proxy_url):
has_proxy = proxy_url is not None
if not has_proxy:
return self._path_url(url)
# HTTP proxies expect the request_target to be the absolute url to know
# which host to establish a connection to. urllib3 also supports
# forwarding for HTTPS through the 'use_forwarding_for_https' parameter.
proxy_scheme = urlparse(proxy_url).scheme
using_https_forwarding_proxy = (
proxy_scheme == 'https' and
self._proxies_kwargs.get('use_forwarding_for_https', False)
)
if using_https_forwarding_proxy or url.startswith('http:'):
return url
else:
return self._path_url(url)
def _chunked(self, headers):
return headers.get('Transfer-Encoding', '') == 'chunked'
def send(self, request):
try:
proxy_url = self._proxy_config.proxy_url_for(request.url)
manager = self._get_connection_manager(request.url, proxy_url)
conn = manager.connection_from_url(request.url)
self._setup_ssl_cert(conn, request.url, self._verify)
request_target = self._get_request_target(request.url, proxy_url)
urllib_response = conn.urlopen(
method=request.method,
url=request_target,
body=request.body,
headers=request.headers,
retries=Retry(False),
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=self._chunked(request.headers),
)
http_response = botocore.awsrequest.AWSResponse(
request.url,
urllib_response.status,
urllib_response.headers,
urllib_response,
)
if not request.stream_output:
# Cause the raw stream to be exhausted immediately. We do it
# this way instead of using preload_content because
# preload_content will never buffer chunked responses
http_response.content
return http_response
except URLLib3SSLError as e:
raise SSLError(endpoint_url=request.url, error=e)
except (NewConnectionError, socket.gaierror) as e:
raise EndpointConnectionError(endpoint_url=request.url, error=e)
except ProxyError as e:
raise ProxyConnectionError(proxy_url=proxy_url, error=e)
except URLLib3ConnectTimeoutError as e:
raise ConnectTimeoutError(endpoint_url=request.url, error=e)
except URLLib3ReadTimeoutError as e:
raise ReadTimeoutError(endpoint_url=request.url, error=e)
except ProtocolError as e:
raise ConnectionClosedError(
error=e,
request=request,
endpoint_url=request.url
)
except Exception as e:
message = 'Exception received when sending urllib3 HTTP request'
logger.debug(message, exc_info=True)
raise HTTPClientError(error=e)
| 13,786 | Python | 37.297222 | 80 | 0.629624 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/translate.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.utils import merge_dicts
def build_retry_config(endpoint_prefix, retry_model, definitions,
client_retry_config=None):
service_config = retry_model.get(endpoint_prefix, {})
resolve_references(service_config, definitions)
# We want to merge the global defaults with the service specific
# defaults, with the service specific defaults taking precedence.
# So we use the global defaults as the base.
#
# A deepcopy is done on the retry defaults because it ensures the
# retry model has no chance of getting mutated when the service specific
# configuration or client retry config is merged in.
final_retry_config = {
'__default__': copy.deepcopy(retry_model.get('__default__', {}))
}
resolve_references(final_retry_config, definitions)
# The merge the service specific config on top.
merge_dicts(final_retry_config, service_config)
if client_retry_config is not None:
_merge_client_retry_config(final_retry_config, client_retry_config)
return final_retry_config
def _merge_client_retry_config(retry_config, client_retry_config):
max_retry_attempts_override = client_retry_config.get('max_attempts')
if max_retry_attempts_override is not None:
# In the retry config, the max_attempts refers to the maximum number
# of requests in general will be made. However, for the client's
# retry config it refers to how many retry attempts will be made at
# most. So to translate this number from the client config, one is
# added to convert it to the maximum number request that will be made
# by including the initial request.
#
# It is also important to note that if we ever support per operation
# configuration in the retry model via the client, we will need to
# revisit this logic to make sure max_attempts gets applied
# per operation.
retry_config['__default__'][
'max_attempts'] = max_retry_attempts_override + 1
def resolve_references(config, definitions):
"""Recursively replace $ref keys.
To cut down on duplication, common definitions can be declared
(and passed in via the ``definitions`` attribute) and then
references as {"$ref": "name"}, when this happens the reference
dict is placed with the value from the ``definition`` dict.
This is recursively done.
"""
for key, value in config.items():
if isinstance(value, dict):
if len(value) == 1 and list(value.keys())[0] == '$ref':
# Then we need to resolve this reference.
config[key] = definitions[list(value.values())[0]]
else:
resolve_references(value, definitions)
| 3,412 | Python | 43.324675 | 78 | 0.688159 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/auth.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import datetime
from hashlib import sha256
from hashlib import sha1
import hmac
import logging
from email.utils import formatdate
from operator import itemgetter
import functools
import time
import calendar
import json
from botocore.exceptions import NoCredentialsError
from botocore.utils import normalize_url_path, percent_encode_sequence
from botocore.compat import HTTPHeaders
from botocore.compat import quote, unquote, urlsplit, parse_qs
from botocore.compat import urlunsplit
from botocore.compat import encodebytes
from botocore.compat import six
from botocore.compat import json
from botocore.compat import MD5_AVAILABLE
from botocore.compat import ensure_unicode
logger = logging.getLogger(__name__)
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'
SIGNED_HEADERS_BLACKLIST = [
'expect',
'user-agent',
'x-amzn-trace-id',
]
UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD'
class BaseSigner(object):
REQUIRES_REGION = False
def add_auth(self, request):
raise NotImplementedError("add_auth")
class SigV2Auth(BaseSigner):
"""
Sign a request with Signature V2.
"""
def __init__(self, credentials):
self.credentials = credentials
def calc_signature(self, request, params):
logger.debug("Calculating signature using v2 auth.")
split = urlsplit(request.url)
path = split.path
if len(path) == 0:
path = '/'
string_to_sign = '%s\n%s\n%s\n' % (request.method,
split.netloc,
path)
lhmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
pairs = []
for key in sorted(params):
# Any previous signature should not be a part of this
# one, so we skip that particular key. This prevents
# issues during retries.
if key == 'Signature':
continue
value = six.text_type(params[key])
pairs.append(quote(key.encode('utf-8'), safe='') + '=' +
quote(value.encode('utf-8'), safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
logger.debug('String to sign: %s', string_to_sign)
lhmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8')
return (qs, b64)
def add_auth(self, request):
# The auth handler is the last thing called in the
# preparation phase of a prepared request.
# Because of this we have to parse the query params
# from the request body so we can update them with
# the sigv2 auth params.
if self.credentials is None:
raise NoCredentialsError
if request.data:
# POST
params = request.data
else:
# GET
params = request.params
params['AWSAccessKeyId'] = self.credentials.access_key
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = time.strftime(ISO8601, time.gmtime())
if self.credentials.token:
params['SecurityToken'] = self.credentials.token
qs, signature = self.calc_signature(request, params)
params['Signature'] = signature
return request
class SigV3Auth(BaseSigner):
def __init__(self, credentials):
self.credentials = credentials
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
if 'Date' in request.headers:
del request.headers['Date']
request.headers['Date'] = formatdate(usegmt=True)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
new_hmac.update(request.headers['Date'].encode('utf-8'))
encoded_signature = encodebytes(new_hmac.digest()).strip()
signature = ('AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s' %
(self.credentials.access_key, 'HmacSHA256',
encoded_signature.decode('utf-8')))
if 'X-Amzn-Authorization' in request.headers:
del request.headers['X-Amzn-Authorization']
request.headers['X-Amzn-Authorization'] = signature
class SigV4Auth(BaseSigner):
"""
Sign a request with Signature V4.
"""
REQUIRES_REGION = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
# We initialize these value here so the unit tests can have
# valid values. But these will get overriden in ``add_auth``
# later for real requests.
self._region_name = region_name
self._service_name = service_name
def _sign(self, key, msg, hex=False):
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
header_map = HTTPHeaders()
for name, value in request.headers.items():
lname = name.lower()
if lname not in SIGNED_HEADERS_BLACKLIST:
header_map[lname] = value
if 'host' not in header_map:
# Ensure we sign the lowercased version of the host, as that
# is what will ultimately be sent on the wire.
# TODO: We should set the host ourselves, instead of relying on our
# HTTP client to set it for us.
header_map['host'] = self._canonical_host(request.url).lower()
return header_map
def _canonical_host(self, url):
url_parts = urlsplit(url)
default_ports = {
'http': 80,
'https': 443
}
if any(url_parts.scheme == scheme and url_parts.port == port
for scheme, port in default_ports.items()):
# No need to include the port if it's the default port.
return url_parts.hostname
# Strip out auth if it's present in the netloc.
return url_parts.netloc.rsplit('@', 1)[-1]
def canonical_query_string(self, request):
# The query string can come from two parts. One is the
# params attribute of the request. The other is from the request
# url (in which case we have to re-split the url into its components
# and parse out the query string component).
if request.params:
return self._canonical_query_string_params(request.params)
else:
return self._canonical_query_string_url(urlsplit(request.url))
def _canonical_query_string_params(self, params):
l = []
for param in sorted(params):
value = str(params[param])
l.append('%s=%s' % (quote(param, safe='-_.~'),
quote(value, safe='-_.~')))
cqs = '&'.join(l)
return cqs
def _canonical_query_string_url(self, parts):
canonical_query_string = ''
if parts.query:
# [(key, value), (key2, value2)]
key_val_pairs = []
for pair in parts.query.split('&'):
key, _, value = pair.partition('=')
key_val_pairs.append((key, value))
sorted_key_vals = []
# Sort by the key names, and in the case of
# repeated keys, then sort by the value.
for key, value in sorted(key_val_pairs):
sorted_key_vals.append('%s=%s' % (key, value))
canonical_query_string = '&'.join(sorted_key_vals)
return canonical_query_string
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
headers = []
sorted_header_names = sorted(set(headers_to_sign))
for key in sorted_header_names:
value = ','.join(self._header_value(v) for v in
sorted(headers_to_sign.get_all(key)))
headers.append('%s:%s' % (key, ensure_unicode(value)))
return '\n'.join(headers)
def _header_value(self, value):
# From the sigv4 docs:
# Lowercase(HeaderName) + ':' + Trimall(HeaderValue)
#
# The Trimall function removes excess white space before and after
# values, and converts sequential spaces to a single space.
return ' '.join(value.split())
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]
l = sorted(l)
return ';'.join(l)
def payload(self, request):
if not self._should_sha256_sign_payload(request):
# When payload signing is disabled, we use this static string in
# place of the payload checksum.
return UNSIGNED_PAYLOAD
request_body = request.body
if request_body and hasattr(request_body, 'seek'):
position = request_body.tell()
read_chunksize = functools.partial(request_body.read,
PAYLOAD_BUFFER)
checksum = sha256()
for chunk in iter(read_chunksize, b''):
checksum.update(chunk)
hex_checksum = checksum.hexdigest()
request_body.seek(position)
return hex_checksum
elif request_body:
# The request serialization has ensured that
# request.body is a bytes() type.
return sha256(request_body).hexdigest()
else:
return EMPTY_SHA256_HASH
def _should_sha256_sign_payload(self, request):
# Payloads will always be signed over insecure connections.
if not request.url.startswith('https'):
return True
# Certain operations may have payload signing disabled by default.
# Since we don't have access to the operation model, we pass in this
# bit of metadata through the request context.
return request.context.get('payload_signing_enabled', True)
def canonical_request(self, request):
cr = [request.method.upper()]
path = self._normalize_url_path(urlsplit(request.url).path)
cr.append(path)
cr.append(self.canonical_query_string(request))
headers_to_sign = self.headers_to_sign(request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
if 'X-Amz-Content-SHA256' in request.headers:
body_checksum = request.headers['X-Amz-Content-SHA256']
else:
body_checksum = self.payload(request)
cr.append(body_checksum)
return '\n'.join(cr)
def _normalize_url_path(self, path):
normalized_path = quote(normalize_url_path(path), safe='/~')
return normalized_path
def scope(self, request):
scope = [self.credentials.access_key]
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, request):
scope = []
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(request.context['timestamp'])
sts.append(self.credential_scope(request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, string_to_sign, request):
key = self.credentials.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
request.context['timestamp'][0:8])
k_region = self._sign(k_date, self._region_name)
k_service = self._sign(k_region, self._service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
# This could be a retry. Make sure the previous
# authorization header is removed first.
self._modify_request_before_signing(request)
canonical_request = self.canonical_request(request)
logger.debug("Calculating signature using v4 auth.")
logger.debug('CanonicalRequest:\n%s', canonical_request)
string_to_sign = self.string_to_sign(request, canonical_request)
logger.debug('StringToSign:\n%s', string_to_sign)
signature = self.signature(string_to_sign, request)
logger.debug('Signature:\n%s', signature)
self._inject_signature_to_request(request, signature)
def _inject_signature_to_request(self, request, signature):
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]
headers_to_sign = self.headers_to_sign(request)
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
request.headers['Authorization'] = ', '.join(l)
return request
def _modify_request_before_signing(self, request):
if 'Authorization' in request.headers:
del request.headers['Authorization']
self._set_necessary_date_headers(request)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
if not request.context.get('payload_signing_enabled', True):
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = UNSIGNED_PAYLOAD
def _set_necessary_date_headers(self, request):
# The spec allows for either the Date _or_ the X-Amz-Date value to be
# used so we check both. If there's a Date header, we use the date
# header. Otherwise we use the X-Amz-Date header.
if 'Date' in request.headers:
del request.headers['Date']
datetime_timestamp = datetime.datetime.strptime(
request.context['timestamp'], SIGV4_TIMESTAMP)
request.headers['Date'] = formatdate(
int(calendar.timegm(datetime_timestamp.timetuple())))
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
else:
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
request.headers['X-Amz-Date'] = request.context['timestamp']
class S3SigV4Auth(SigV4Auth):
def _modify_request_before_signing(self, request):
super(S3SigV4Auth, self)._modify_request_before_signing(request)
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = self.payload(request)
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
# The explicit configuration takes precedence over any implicit
# configuration.
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
# We require that both content-md5 be present and https be enabled
# to implicitly disable body signing. The combination of TLS and
# content-md5 is sufficiently secure and durable for us to be
# confident in the request without body signing.
if not request.url.startswith('https') or \
'Content-MD5' not in request.headers:
return True
# If the input is streaming we disable body signing by default.
if request.context.get('has_streaming_input', False):
return False
# If the S3-specific checks had no results, delegate to the generic
# checks.
return super(S3SigV4Auth, self)._should_sha256_sign_payload(request)
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
class SigV4QueryAuth(SigV4Auth):
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, service_name, region_name,
expires=DEFAULT_EXPIRES):
super(SigV4QueryAuth, self).__init__(credentials, service_name,
region_name)
self._expires = expires
def _modify_request_before_signing(self, request):
# We automatically set this header, so if it's the auto-set value we
# want to get rid of it since it doesn't make sense for presigned urls.
content_type = request.headers.get('content-type')
blacklisted_content_type = (
'application/x-www-form-urlencoded; charset=utf-8'
)
if content_type == blacklisted_content_type:
del request.headers['content-type']
# Note that we're not including X-Amz-Signature.
# From the docs: "The Canonical Query String must include all the query
# parameters from the preceding table except for X-Amz-Signature.
signed_headers = self.signed_headers(self.headers_to_sign(request))
auth_params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': self.scope(request),
'X-Amz-Date': request.context['timestamp'],
'X-Amz-Expires': self._expires,
'X-Amz-SignedHeaders': signed_headers,
}
if self.credentials.token is not None:
auth_params['X-Amz-Security-Token'] = self.credentials.token
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_dict = dict(
[(k, v[0]) for k, v in
parse_qs(url_parts.query, keep_blank_values=True).items()])
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
operation_params = ''
if request.data:
# We also need to move the body params into the query string. To
# do this, we first have to convert it to a dict.
query_dict.update(self._get_body_as_dict(request))
request.data = ''
if query_dict:
operation_params = percent_encode_sequence(query_dict) + '&'
new_query_string = (operation_params +
percent_encode_sequence(auth_params))
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _get_body_as_dict(self, request):
# For query services, request.data is form-encoded and is already a
# dict, but for other services such as rest-json it could be a json
# string or bytes. In those cases we attempt to load the data as a
# dict.
data = request.data
if isinstance(data, six.binary_type):
data = json.loads(data.decode('utf-8'))
elif isinstance(data, six.string_types):
data = json.loads(data)
return data
def _inject_signature_to_request(self, request, signature):
# Rather than calculating an "Authorization" header, for the query
# param quth, we just append an 'X-Amz-Signature' param to the end
# of the query string.
request.url += '&X-Amz-Signature=%s' % signature
class S3SigV4QueryAuth(SigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
def payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return UNSIGNED_PAYLOAD
class S3SigV4PostAuth(SigV4Auth):
"""
Presigns a s3 post
Implementation doc here:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
"""
def add_auth(self, request):
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'
fields['x-amz-credential'] = self.scope(request)
fields['x-amz-date'] = request.context['timestamp']
conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})
conditions.append({'x-amz-credential': self.scope(request)})
conditions.append({'x-amz-date': request.context['timestamp']})
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['x-amz-signature'] = self.signature(fields['policy'], request)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
class HmacV1Auth(BaseSigner):
# List of Query String Arguments of Interest
QSAOfInterest = ['accelerate', 'acl', 'cors', 'defaultObjectAcl',
'location', 'logging', 'partNumber', 'policy',
'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore', 'storageClass', 'notification',
'replication', 'requestPayment', 'analytics', 'metrics',
'inventory', 'select', 'select-type']
def __init__(self, credentials, service_name=None, region_name=None):
self.credentials = credentials
def sign_string(self, string_to_sign):
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha1)
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).strip().decode('utf-8')
def canonical_standard_headers(self, headers):
interesting_headers = ['content-md5', 'content-type', 'date']
hoi = []
if 'Date' in headers:
del headers['Date']
headers['Date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def canonical_custom_headers(self, headers):
hoi = []
custom_headers = {}
for key in headers:
lk = key.lower()
if headers[key] is not None:
if lk.startswith('x-amz-'):
custom_headers[lk] = ','.join(v.strip() for v in
headers.get_all(key))
sorted_header_keys = sorted(custom_headers.keys())
for key in sorted_header_keys:
hoi.append("%s:%s" % (key, custom_headers[key]))
return '\n'.join(hoi)
def unquote_v(self, nv):
"""
TODO: Do we need this?
"""
if len(nv) == 1:
return nv
else:
return (nv[0], unquote(nv[1]))
def canonical_resource(self, split, auth_path=None):
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
# NOTE:
# The path in the canonical resource should always be the
# full path including the bucket name, even for virtual-hosting
# style addressing. The ``auth_path`` keeps track of the full
# path for the canonical resource and would be passed in if
# the client was using virtual-hosting style.
if auth_path is not None:
buf = auth_path
else:
buf = split.path
if split.query:
qsa = split.query.split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [self.unquote_v(a) for a in qsa
if a[0] in self.QSAOfInterest]
if len(qsa) > 0:
qsa.sort(key=itemgetter(0))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def canonical_string(self, method, split, headers, expires=None,
auth_path=None):
cs = method.upper() + '\n'
cs += self.canonical_standard_headers(headers) + '\n'
custom_headers = self.canonical_custom_headers(headers)
if custom_headers:
cs += custom_headers + '\n'
cs += self.canonical_resource(split, auth_path=auth_path)
return cs
def get_signature(self, method, split, headers, expires=None,
auth_path=None):
if self.credentials.token:
del headers['x-amz-security-token']
headers['x-amz-security-token'] = self.credentials.token
string_to_sign = self.canonical_string(method,
split,
headers,
auth_path=auth_path)
logger.debug('StringToSign:\n%s', string_to_sign)
return self.sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
logger.debug("Calculating signature using hmacv1 auth.")
split = urlsplit(request.url)
logger.debug('HTTP request method: %s', request.method)
signature = self.get_signature(request.method, split,
request.headers,
auth_path=request.auth_path)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'Authorization' in request.headers:
# We have to do this because request.headers is not
# normal dictionary. It has the (unintuitive) behavior
# of aggregating repeated setattr calls for the same
# key value. For example:
# headers['foo'] = 'a'; headers['foo'] = 'b'
# list(headers) will print ['foo', 'foo'].
del request.headers['Authorization']
request.headers['Authorization'] = (
"AWS %s:%s" % (self.credentials.access_key, signature))
class HmacV1QueryAuth(HmacV1Auth):
"""
Generates a presigned request for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
#RESTAuthenticationQueryStringAuth
"""
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, expires=DEFAULT_EXPIRES):
self.credentials = credentials
self._expires = expires
def _get_date(self):
return str(int(time.time() + int(self._expires)))
def _inject_signature(self, request, signature):
query_dict = {}
query_dict['AWSAccessKeyId'] = self.credentials.access_key
query_dict['Signature'] = signature
for header_key in request.headers:
lk = header_key.lower()
# For query string requests, Expires is used instead of the
# Date header.
if header_key == 'Date':
query_dict['Expires'] = request.headers['Date']
# We only want to include relevant headers in the query string.
# These can be anything that starts with x-amz, is Content-MD5,
# or is Content-Type.
elif lk.startswith('x-amz-') or lk in ['content-md5',
'content-type']:
query_dict[lk] = request.headers[lk]
# Combine all of the identified headers into an encoded
# query string
new_query_string = percent_encode_sequence(query_dict)
# Create a new url with the presigned url.
p = urlsplit(request.url)
if p[3]:
# If there was a pre-existing query string, we should
# add that back before injecting the new query string.
new_query_string = '%s&%s' % (p[3], new_query_string)
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
class HmacV1PostAuth(HmacV1Auth):
"""
Generates a presigned post for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html
"""
def add_auth(self, request):
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['AWSAccessKeyId'] = self.credentials.access_key
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['signature'] = self.sign_string(fields['policy'])
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
# Defined at the bottom instead of the top of the module because the Auth
# classes weren't defined yet.
AUTH_TYPE_MAPS = {
'v2': SigV2Auth,
'v4': SigV4Auth,
'v4-query': SigV4QueryAuth,
'v3': SigV3Auth,
'v3https': SigV3Auth,
's3': HmacV1Auth,
's3-query': HmacV1QueryAuth,
's3-presign-post': HmacV1PostAuth,
's3v4': S3SigV4Auth,
's3v4-query': S3SigV4QueryAuth,
's3v4-presign-post': S3SigV4PostAuth,
}
| 34,966 | Python | 39.659302 | 79 | 0.601956 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/monitoring.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import re
import time
from botocore.compat import ensure_unicode, ensure_bytes, urlparse
from botocore.retryhandler import EXCEPTION_MAP as RETRYABLE_EXCEPTIONS
logger = logging.getLogger(__name__)
class Monitor(object):
_EVENTS_TO_REGISTER = [
'before-parameter-build',
'request-created',
'response-received',
'after-call',
'after-call-error',
]
def __init__(self, adapter, publisher):
"""Abstraction for monitoring clients API calls
:param adapter: An adapter that takes event emitter events
and produces monitor events
:param publisher: A publisher for generated monitor events
"""
self._adapter = adapter
self._publisher = publisher
def register(self, event_emitter):
"""Register an event emitter to the monitor"""
for event_to_register in self._EVENTS_TO_REGISTER:
event_emitter.register_last(event_to_register, self.capture)
def capture(self, event_name, **payload):
"""Captures an incoming event from the event emitter
It will feed an event emitter event to the monitor's adaptor to create
a monitor event and then publish that event to the monitor's publisher.
"""
try:
monitor_event = self._adapter.feed(event_name, payload)
if monitor_event:
self._publisher.publish(monitor_event)
except Exception as e:
logger.debug(
'Exception %s raised by client monitor in handling event %s',
e, event_name, exc_info=True)
class MonitorEventAdapter(object):
def __init__(self, time=time.time):
"""Adapts event emitter events to produce monitor events
:type time: callable
:param time: A callable that produces the current time
"""
self._time = time
def feed(self, emitter_event_name, emitter_payload):
"""Feed an event emitter event to generate a monitor event
:type emitter_event_name: str
:param emitter_event_name: The name of the event emitted
:type emitter_payload: dict
:param emitter_payload: The payload to associated to the event
emitted
:rtype: BaseMonitorEvent
:returns: A monitor event based on the event emitter events
fired
"""
return self._get_handler(emitter_event_name)(**emitter_payload)
def _get_handler(self, event_name):
return getattr(
self, '_handle_' + event_name.split('.')[0].replace('-', '_')
)
def _handle_before_parameter_build(self, model, context, **kwargs):
context['current_api_call_event'] = APICallEvent(
service=model.service_model.service_id,
operation=model.wire_name,
timestamp=self._get_current_time(),
)
def _handle_request_created(self, request, **kwargs):
context = request.context
new_attempt_event = context[
'current_api_call_event'].new_api_call_attempt(
timestamp=self._get_current_time())
new_attempt_event.request_headers = request.headers
new_attempt_event.url = request.url
context['current_api_call_attempt_event'] = new_attempt_event
def _handle_response_received(self, parsed_response, context, exception,
**kwargs):
attempt_event = context.pop('current_api_call_attempt_event')
attempt_event.latency = self._get_latency(attempt_event)
if parsed_response is not None:
attempt_event.http_status_code = parsed_response[
'ResponseMetadata']['HTTPStatusCode']
attempt_event.response_headers = parsed_response[
'ResponseMetadata']['HTTPHeaders']
attempt_event.parsed_error = parsed_response.get('Error')
else:
attempt_event.wire_exception = exception
return attempt_event
def _handle_after_call(self, context, parsed, **kwargs):
context['current_api_call_event'].retries_exceeded = parsed[
'ResponseMetadata'].get('MaxAttemptsReached', False)
return self._complete_api_call(context)
def _handle_after_call_error(self, context, exception, **kwargs):
# If the after-call-error was emitted and the error being raised
# was a retryable connection error, then the retries must have exceeded
# for that exception as this event gets emitted **after** retries
# happen.
context['current_api_call_event'].retries_exceeded = \
self._is_retryable_exception(exception)
return self._complete_api_call(context)
def _is_retryable_exception(self, exception):
return isinstance(
exception, tuple(RETRYABLE_EXCEPTIONS['GENERAL_CONNECTION_ERROR']))
def _complete_api_call(self, context):
call_event = context.pop('current_api_call_event')
call_event.latency = self._get_latency(call_event)
return call_event
def _get_latency(self, event):
return self._get_current_time() - event.timestamp
def _get_current_time(self):
return int(self._time() * 1000)
class BaseMonitorEvent(object):
def __init__(self, service, operation, timestamp):
"""Base monitor event
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the event began
"""
self.service = service
self.operation = operation
self.timestamp = timestamp
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
class APICallEvent(BaseMonitorEvent):
def __init__(self, service, operation, timestamp, latency=None,
attempts=None, retries_exceeded=False):
"""Monitor event for a single API call
This event corresponds to a single client method call, which includes
every HTTP requests attempt made in order to complete the client call
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the event began
:type latency: int
:param latency: The time in milliseconds to complete the client call
:type attempts: list
:param attempts: The list of APICallAttempts associated to the
APICall
:type retries_exceeded: bool
:param retries_exceeded: True if API call exceeded retries. False
otherwise
"""
super(APICallEvent, self).__init__(
service=service, operation=operation, timestamp=timestamp)
self.latency = latency
self.attempts = attempts
if attempts is None:
self.attempts = []
self.retries_exceeded = retries_exceeded
def new_api_call_attempt(self, timestamp):
"""Instantiates APICallAttemptEvent associated to the APICallEvent
:type timestamp: int
:param timestamp: Epoch time in milliseconds to associate to the
APICallAttemptEvent
"""
attempt_event = APICallAttemptEvent(
service=self.service,
operation=self.operation,
timestamp=timestamp
)
self.attempts.append(attempt_event)
return attempt_event
class APICallAttemptEvent(BaseMonitorEvent):
def __init__(self, service, operation, timestamp,
latency=None, url=None, http_status_code=None,
request_headers=None, response_headers=None,
parsed_error=None, wire_exception=None):
"""Monitor event for a single API call attempt
This event corresponds to a single HTTP request attempt in completing
the entire client method call.
:type service: str
:param service: A string identifying the service associated to
the event
:type operation: str
:param operation: A string identifying the operation of service
associated to the event
:type timestamp: int
:param timestamp: Epoch time in milliseconds from when the HTTP request
started
:type latency: int
:param latency: The time in milliseconds to complete the HTTP request
whether it succeeded or failed
:type url: str
:param url: The URL the attempt was sent to
:type http_status_code: int
:param http_status_code: The HTTP status code of the HTTP response
if there was a response
:type request_headers: dict
:param request_headers: The HTTP headers sent in making the HTTP
request
:type response_headers: dict
:param response_headers: The HTTP headers returned in the HTTP response
if there was a response
:type parsed_error: dict
:param parsed_error: The error parsed if the service returned an
error back
:type wire_exception: Exception
:param wire_exception: The exception raised in sending the HTTP
request (i.e. ConnectionError)
"""
super(APICallAttemptEvent, self).__init__(
service=service, operation=operation, timestamp=timestamp
)
self.latency = latency
self.url = url
self.http_status_code = http_status_code
self.request_headers = request_headers
self.response_headers = response_headers
self.parsed_error = parsed_error
self.wire_exception = wire_exception
class CSMSerializer(object):
_MAX_CLIENT_ID_LENGTH = 255
_MAX_EXCEPTION_CLASS_LENGTH = 128
_MAX_ERROR_CODE_LENGTH = 128
_MAX_USER_AGENT_LENGTH = 256
_MAX_MESSAGE_LENGTH = 512
_RESPONSE_HEADERS_TO_EVENT_ENTRIES = {
'x-amzn-requestid': 'XAmznRequestId',
'x-amz-request-id': 'XAmzRequestId',
'x-amz-id-2': 'XAmzId2',
}
_AUTH_REGEXS = {
'v4': re.compile(
r'AWS4-HMAC-SHA256 '
r'Credential=(?P<access_key>\w+)/\d+/'
r'(?P<signing_region>[a-z0-9-]+)/'
),
's3': re.compile(
r'AWS (?P<access_key>\w+):'
)
}
_SERIALIZEABLE_EVENT_PROPERTIES = [
'service',
'operation',
'timestamp',
'attempts',
'latency',
'retries_exceeded',
'url',
'request_headers',
'http_status_code',
'response_headers',
'parsed_error',
'wire_exception',
]
def __init__(self, csm_client_id):
"""Serializes monitor events to CSM (Client Side Monitoring) format
:type csm_client_id: str
:param csm_client_id: The application identifier to associate
to the serialized events
"""
self._validate_client_id(csm_client_id)
self.csm_client_id = csm_client_id
def _validate_client_id(self, csm_client_id):
if len(csm_client_id) > self._MAX_CLIENT_ID_LENGTH:
raise ValueError(
'The value provided for csm_client_id: %s exceeds the '
'maximum length of %s characters' % (
csm_client_id, self._MAX_CLIENT_ID_LENGTH)
)
def serialize(self, event):
"""Serializes a monitor event to the CSM format
:type event: BaseMonitorEvent
:param event: The event to serialize to bytes
:rtype: bytes
:returns: The CSM serialized form of the event
"""
event_dict = self._get_base_event_dict(event)
event_type = self._get_event_type(event)
event_dict['Type'] = event_type
for attr in self._SERIALIZEABLE_EVENT_PROPERTIES:
value = getattr(event, attr, None)
if value is not None:
getattr(self, '_serialize_' + attr)(
value, event_dict, event_type=event_type)
return ensure_bytes(
json.dumps(event_dict, separators=(',', ':')))
def _get_base_event_dict(self, event):
return {
'Version': 1,
'ClientId': self.csm_client_id,
}
def _serialize_service(self, service, event_dict, **kwargs):
event_dict['Service'] = service
def _serialize_operation(self, operation, event_dict, **kwargs):
event_dict['Api'] = operation
def _serialize_timestamp(self, timestamp, event_dict, **kwargs):
event_dict['Timestamp'] = timestamp
def _serialize_attempts(self, attempts, event_dict, **kwargs):
event_dict['AttemptCount'] = len(attempts)
if attempts:
self._add_fields_from_last_attempt(event_dict, attempts[-1])
def _add_fields_from_last_attempt(self, event_dict, last_attempt):
if last_attempt.request_headers:
# It does not matter which attempt to use to grab the region
# for the ApiCall event, but SDKs typically do the last one.
region = self._get_region(last_attempt.request_headers)
if region is not None:
event_dict['Region'] = region
event_dict['UserAgent'] = self._get_user_agent(
last_attempt.request_headers)
if last_attempt.http_status_code is not None:
event_dict['FinalHttpStatusCode'] = last_attempt.http_status_code
if last_attempt.parsed_error is not None:
self._serialize_parsed_error(
last_attempt.parsed_error, event_dict, 'ApiCall')
if last_attempt.wire_exception is not None:
self._serialize_wire_exception(
last_attempt.wire_exception, event_dict, 'ApiCall')
def _serialize_latency(self, latency, event_dict, event_type):
if event_type == 'ApiCall':
event_dict['Latency'] = latency
elif event_type == 'ApiCallAttempt':
event_dict['AttemptLatency'] = latency
def _serialize_retries_exceeded(self, retries_exceeded, event_dict,
**kwargs):
event_dict['MaxRetriesExceeded'] = (1 if retries_exceeded else 0)
def _serialize_url(self, url, event_dict, **kwargs):
event_dict['Fqdn'] = urlparse(url).netloc
def _serialize_request_headers(self, request_headers, event_dict,
**kwargs):
event_dict['UserAgent'] = self._get_user_agent(request_headers)
if self._is_signed(request_headers):
event_dict['AccessKey'] = self._get_access_key(request_headers)
region = self._get_region(request_headers)
if region is not None:
event_dict['Region'] = region
if 'X-Amz-Security-Token' in request_headers:
event_dict['SessionToken'] = request_headers[
'X-Amz-Security-Token']
def _serialize_http_status_code(self, http_status_code, event_dict,
**kwargs):
event_dict['HttpStatusCode'] = http_status_code
def _serialize_response_headers(self, response_headers, event_dict,
**kwargs):
for header, entry in self._RESPONSE_HEADERS_TO_EVENT_ENTRIES.items():
if header in response_headers:
event_dict[entry] = response_headers[header]
def _serialize_parsed_error(self, parsed_error, event_dict, event_type,
**kwargs):
field_prefix = 'Final' if event_type == 'ApiCall' else ''
event_dict[field_prefix + 'AwsException'] = self._truncate(
parsed_error['Code'], self._MAX_ERROR_CODE_LENGTH)
event_dict[field_prefix + 'AwsExceptionMessage'] = self._truncate(
parsed_error['Message'], self._MAX_MESSAGE_LENGTH)
def _serialize_wire_exception(self, wire_exception, event_dict, event_type,
**kwargs):
field_prefix = 'Final' if event_type == 'ApiCall' else ''
event_dict[field_prefix + 'SdkException'] = self._truncate(
wire_exception.__class__.__name__,
self._MAX_EXCEPTION_CLASS_LENGTH)
event_dict[field_prefix + 'SdkExceptionMessage'] = self._truncate(
str(wire_exception), self._MAX_MESSAGE_LENGTH)
def _get_event_type(self, event):
if isinstance(event, APICallEvent):
return 'ApiCall'
elif isinstance(event, APICallAttemptEvent):
return 'ApiCallAttempt'
def _get_access_key(self, request_headers):
auth_val = self._get_auth_value(request_headers)
_, auth_match = self._get_auth_match(auth_val)
return auth_match.group('access_key')
def _get_region(self, request_headers):
if not self._is_signed(request_headers):
return None
auth_val = self._get_auth_value(request_headers)
signature_version, auth_match = self._get_auth_match(auth_val)
if signature_version != 'v4':
return None
return auth_match.group('signing_region')
def _get_user_agent(self, request_headers):
return self._truncate(
ensure_unicode(request_headers.get('User-Agent', '')),
self._MAX_USER_AGENT_LENGTH
)
def _is_signed(self, request_headers):
return 'Authorization' in request_headers
def _get_auth_value(self, request_headers):
return ensure_unicode(request_headers['Authorization'])
def _get_auth_match(self, auth_val):
for signature_version, regex in self._AUTH_REGEXS.items():
match = regex.match(auth_val)
if match:
return signature_version, match
return None, None
def _truncate(self, text, max_length):
if len(text) > max_length:
logger.debug(
'Truncating following value to maximum length of '
'%s: %s', text, max_length)
return text[:max_length]
return text
class SocketPublisher(object):
_MAX_MONITOR_EVENT_LENGTH = 8 * 1024
def __init__(self, socket, host, port, serializer):
"""Publishes monitor events to a socket
:type socket: socket.socket
:param socket: The socket object to use to publish events
:type host: string
:param host: The host to send events to
:type port: integer
:param port: The port on the host to send events to
:param serializer: The serializer to use to serialize the event
to a form that can be published to the socket. This must
have a `serialize()` method that accepts a monitor event
and return bytes
"""
self._socket = socket
self._address = (host, port)
self._serializer = serializer
def publish(self, event):
"""Publishes a specified monitor event
:type event: BaseMonitorEvent
:param event: The monitor event to be sent
over the publisher's socket to the desired address.
"""
serialized_event = self._serializer.serialize(event)
if len(serialized_event) > self._MAX_MONITOR_EVENT_LENGTH:
logger.debug(
'Serialized event of size %s exceeds the maximum length '
'allowed: %s. Not sending event to socket.',
len(serialized_event), self._MAX_MONITOR_EVENT_LENGTH
)
return
self._socket.sendto(serialized_event, self._address)
| 20,586 | Python | 36.362976 | 79 | 0.612601 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/client.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import functools
from botocore import waiter, xform_name
from botocore.args import ClientArgsCreator
from botocore.auth import AUTH_TYPE_MAPS
from botocore.awsrequest import prepare_request_dict
from botocore.docs.docstring import ClientMethodDocstring
from botocore.docs.docstring import PaginatorDocstring
from botocore.exceptions import (
ClientError, DataNotFoundError, OperationNotPageableError,
UnknownSignatureVersionError, InvalidEndpointDiscoveryConfigurationError
)
from botocore.hooks import first_non_none_response
from botocore.model import ServiceModel
from botocore.paginate import Paginator
from botocore.utils import (
CachedProperty, get_service_module_name, S3RegionRedirector,
S3ArnParamHandler, S3EndpointSetter, ensure_boolean,
S3ControlArnParamHandler, S3ControlEndpointSetter,
)
from botocore.args import ClientArgsCreator
from botocore import UNSIGNED
# Keep this imported. There's pre-existing code that uses
# "from botocore.client import Config".
from botocore.config import Config
from botocore.history import get_global_history_recorder
from botocore.discovery import (
EndpointDiscoveryHandler, EndpointDiscoveryManager,
block_endpoint_discovery_required_operations
)
from botocore.retries import standard
from botocore.retries import adaptive
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
class ClientCreator(object):
"""Creates client objects for a service."""
def __init__(self, loader, endpoint_resolver, user_agent, event_emitter,
retry_handler_factory, retry_config_translator,
response_parser_factory=None, exceptions_factory=None,
config_store=None):
self._loader = loader
self._endpoint_resolver = endpoint_resolver
self._user_agent = user_agent
self._event_emitter = event_emitter
self._retry_handler_factory = retry_handler_factory
self._retry_config_translator = retry_config_translator
self._response_parser_factory = response_parser_factory
self._exceptions_factory = exceptions_factory
# TODO: Migrate things away from scoped_config in favor of the
# config_store. The config store can pull things from both the scoped
# config and environment variables (and potentially more in the
# future).
self._config_store = config_store
def create_client(self, service_name, region_name, is_secure=True,
endpoint_url=None, verify=None,
credentials=None, scoped_config=None,
api_version=None,
client_config=None):
responses = self._event_emitter.emit(
'choose-service-name', service_name=service_name)
service_name = first_non_none_response(responses, default=service_name)
service_model = self._load_service_model(service_name, api_version)
cls = self._create_client_class(service_name, service_model)
endpoint_bridge = ClientEndpointBridge(
self._endpoint_resolver, scoped_config, client_config,
service_signing_name=service_model.metadata.get('signingName'))
client_args = self._get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
service_client = cls(**client_args)
self._register_retries(service_client)
self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_s3_control_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_endpoint_discovery(
service_client, endpoint_url, client_config
)
return service_client
def create_client_class(self, service_name, api_version=None):
service_model = self._load_service_model(service_name, api_version)
return self._create_client_class(service_name, service_model)
def _create_client_class(self, service_name, service_model):
class_attributes = self._create_methods(service_model)
py_name_to_operation_name = self._create_name_mapping(service_model)
class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name
bases = [BaseClient]
service_id = service_model.service_id.hyphenize()
self._event_emitter.emit(
'creating-client-class.%s' % service_id,
class_attributes=class_attributes,
base_classes=bases)
class_name = get_service_module_name(service_model)
cls = type(str(class_name), tuple(bases), class_attributes)
return cls
def _load_service_model(self, service_name, api_version=None):
json_model = self._loader.load_service_model(service_name, 'service-2',
api_version=api_version)
service_model = ServiceModel(json_model, service_name=service_name)
return service_model
def _register_retries(self, client):
retry_mode = client.meta.config.retries['mode']
if retry_mode == 'standard':
self._register_v2_standard_retries(client)
elif retry_mode == 'adaptive':
self._register_v2_standard_retries(client)
self._register_v2_adaptive_retries(client)
elif retry_mode == 'legacy':
self._register_legacy_retries(client)
def _register_v2_standard_retries(self, client):
max_attempts = client.meta.config.retries.get('total_max_attempts')
kwargs = {'client': client}
if max_attempts is not None:
kwargs['max_attempts'] = max_attempts
standard.register_retry_handler(**kwargs)
def _register_v2_adaptive_retries(self, client):
adaptive.register_retry_handler(client)
def _register_legacy_retries(self, client):
endpoint_prefix = client.meta.service_model.endpoint_prefix
service_id = client.meta.service_model.service_id
service_event_name = service_id.hyphenize()
# First, we load the entire retry config for all services,
# then pull out just the information we need.
original_config = self._loader.load_data('_retry')
if not original_config:
return
retries = self._transform_legacy_retries(client.meta.config.retries)
retry_config = self._retry_config_translator.build_retry_config(
endpoint_prefix, original_config.get('retry', {}),
original_config.get('definitions', {}),
retries
)
logger.debug("Registering retry handlers for service: %s",
client.meta.service_model.service_name)
handler = self._retry_handler_factory.create_retry_handler(
retry_config, endpoint_prefix)
unique_id = 'retry-config-%s' % service_event_name
client.meta.events.register(
'needs-retry.%s' % service_event_name, handler,
unique_id=unique_id
)
def _transform_legacy_retries(self, retries):
if retries is None:
return
copied_args = retries.copy()
if 'total_max_attempts' in retries:
copied_args = retries.copy()
copied_args['max_attempts'] = (
copied_args.pop('total_max_attempts') - 1)
return copied_args
def _get_retry_mode(self, client, config_store):
client_retries = client.meta.config.retries
if client_retries is not None and \
client_retries.get('mode') is not None:
return client_retries['mode']
return config_store.get_config_variable('retry_mode') or 'legacy'
def _register_endpoint_discovery(self, client, endpoint_url, config):
if endpoint_url is not None:
# Don't register any handlers in the case of a custom endpoint url
return
# Only attach handlers if the service supports discovery
if client.meta.service_model.endpoint_discovery_operation is None:
return
events = client.meta.events
service_id = client.meta.service_model.service_id.hyphenize()
enabled = False
if config and config.endpoint_discovery_enabled is not None:
enabled = config.endpoint_discovery_enabled
elif self._config_store:
enabled = self._config_store.get_config_variable(
'endpoint_discovery_enabled')
enabled = self._normalize_endpoint_discovery_config(enabled)
if enabled and self._requires_endpoint_discovery(client, enabled):
discover = enabled is True
manager = EndpointDiscoveryManager(client, always_discover=discover)
handler = EndpointDiscoveryHandler(manager)
handler.register(events, service_id)
else:
events.register('before-parameter-build',
block_endpoint_discovery_required_operations)
def _normalize_endpoint_discovery_config(self, enabled):
"""Config must either be a boolean-string or string-literal 'auto'"""
if isinstance(enabled, str):
enabled = enabled.lower().strip()
if enabled == 'auto':
return enabled
elif enabled in ('true', 'false'):
return ensure_boolean(enabled)
elif isinstance(enabled, bool):
return enabled
raise InvalidEndpointDiscoveryConfigurationError(config_value=enabled)
def _requires_endpoint_discovery(self, client, enabled):
if enabled == "auto":
return client.meta.service_model.endpoint_discovery_required
return enabled
def _register_s3_events(self, client, endpoint_bridge, endpoint_url,
client_config, scoped_config):
if client.meta.service_model.service_name != 's3':
return
S3RegionRedirector(endpoint_bridge, client).register()
S3ArnParamHandler().register(client.meta.events)
S3EndpointSetter(
endpoint_resolver=self._endpoint_resolver,
region=client.meta.region_name,
s3_config=client.meta.config.s3,
endpoint_url=endpoint_url,
partition=client.meta.partition
).register(client.meta.events)
self._set_s3_presign_signature_version(
client.meta, client_config, scoped_config)
def _register_s3_control_events(
self, client, endpoint_bridge,
endpoint_url, client_config, scoped_config
):
if client.meta.service_model.service_name != 's3control':
return
S3ControlArnParamHandler().register(client.meta.events)
S3ControlEndpointSetter(
endpoint_resolver=self._endpoint_resolver,
region=client.meta.region_name,
s3_config=client.meta.config.s3,
endpoint_url=endpoint_url,
partition=client.meta.partition
).register(client.meta.events)
def _set_s3_presign_signature_version(self, client_meta,
client_config, scoped_config):
# This will return the manually configured signature version, or None
# if none was manually set. If a customer manually sets the signature
# version, we always want to use what they set.
provided_signature_version = _get_configured_signature_version(
's3', client_config, scoped_config)
if provided_signature_version is not None:
return
# Check to see if the region is a region that we know about. If we
# don't know about a region, then we can safely assume it's a new
# region that is sigv4 only, since all new S3 regions only allow sigv4.
# The only exception is aws-global. This is a pseudo-region for the
# global endpoint, we should respect the signature versions it
# supports, which includes v2.
regions = self._endpoint_resolver.get_available_endpoints(
's3', client_meta.partition)
if client_meta.region_name != 'aws-global' and \
client_meta.region_name not in regions:
return
# If it is a region we know about, we want to default to sigv2, so here
# we check to see if it is available.
endpoint = self._endpoint_resolver.construct_endpoint(
's3', client_meta.region_name)
signature_versions = endpoint['signatureVersions']
if 's3' not in signature_versions:
return
# We now know that we're in a known region that supports sigv2 and
# the customer hasn't set a signature version so we default the
# signature version to sigv2.
client_meta.events.register(
'choose-signer.s3', self._default_s3_presign_to_sigv2)
def _default_s3_presign_to_sigv2(self, signature_version, **kwargs):
"""
Returns the 's3' (sigv2) signer if presigning an s3 request. This is
intended to be used to set the default signature version for the signer
to sigv2.
:type signature_version: str
:param signature_version: The current client signature version.
:type signing_name: str
:param signing_name: The signing name of the service.
:return: 's3' if the request is an s3 presign request, None otherwise
"""
for suffix in ['-query', '-presign-post']:
if signature_version.endswith(suffix):
return 's3' + suffix
def _get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials,
scoped_config, client_config, endpoint_bridge):
args_creator = ClientArgsCreator(
self._event_emitter, self._user_agent,
self._response_parser_factory, self._loader,
self._exceptions_factory, config_store=self._config_store)
return args_creator.get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
def _create_methods(self, service_model):
op_dict = {}
for operation_name in service_model.operation_names:
py_operation_name = xform_name(operation_name)
op_dict[py_operation_name] = self._create_api_method(
py_operation_name, operation_name, service_model)
return op_dict
def _create_name_mapping(self, service_model):
# py_name -> OperationName, for every operation available
# for a service.
mapping = {}
for operation_name in service_model.operation_names:
py_operation_name = xform_name(operation_name)
mapping[py_operation_name] = operation_name
return mapping
def _create_api_method(self, py_operation_name, operation_name,
service_model):
def _api_call(self, *args, **kwargs):
# We're accepting *args so that we can give a more helpful
# error message than TypeError: _api_call takes exactly
# 1 argument.
if args:
raise TypeError(
"%s() only accepts keyword arguments." % py_operation_name)
# The "self" in this scope is referring to the BaseClient.
return self._make_api_call(operation_name, kwargs)
_api_call.__name__ = str(py_operation_name)
# Add the docstring to the client method
operation_model = service_model.operation_model(operation_name)
docstring = ClientMethodDocstring(
operation_model=operation_model,
method_name=operation_name,
event_emitter=self._event_emitter,
method_description=operation_model.documentation,
example_prefix='response = client.%s' % py_operation_name,
include_signature=False
)
_api_call.__doc__ = docstring
return _api_call
class ClientEndpointBridge(object):
"""Bridges endpoint data and client creation
This class handles taking out the relevant arguments from the endpoint
resolver and determining which values to use, taking into account any
client configuration options and scope configuration options.
This class also handles determining what, if any, region to use if no
explicit region setting is provided. For example, Amazon S3 client will
utilize "us-east-1" by default if no region can be resolved."""
DEFAULT_ENDPOINT = '{service}.{region}.amazonaws.com'
_DUALSTACK_ENABLED_SERVICES = ['s3', 's3-control']
def __init__(self, endpoint_resolver, scoped_config=None,
client_config=None, default_endpoint=None,
service_signing_name=None):
self.service_signing_name = service_signing_name
self.endpoint_resolver = endpoint_resolver
self.scoped_config = scoped_config
self.client_config = client_config
self.default_endpoint = default_endpoint or self.DEFAULT_ENDPOINT
def resolve(self, service_name, region_name=None, endpoint_url=None,
is_secure=True):
region_name = self._check_default_region(service_name, region_name)
resolved = self.endpoint_resolver.construct_endpoint(
service_name, region_name)
# If we can't resolve the region, we'll attempt to get a global
# endpoint for non-regionalized services (iam, route53, etc)
if not resolved:
# TODO: fallback partition_name should be configurable in the
# future for users to define as needed.
resolved = self.endpoint_resolver.construct_endpoint(
service_name, region_name, partition_name='aws')
if resolved:
return self._create_endpoint(
resolved, service_name, region_name, endpoint_url, is_secure)
else:
return self._assume_endpoint(service_name, region_name,
endpoint_url, is_secure)
def _check_default_region(self, service_name, region_name):
if region_name is not None:
return region_name
# Use the client_config region if no explicit region was provided.
if self.client_config and self.client_config.region_name is not None:
return self.client_config.region_name
def _create_endpoint(self, resolved, service_name, region_name,
endpoint_url, is_secure):
explicit_region = region_name is not None
region_name, signing_region = self._pick_region_values(
resolved, region_name, endpoint_url)
if endpoint_url is None:
if self._is_s3_dualstack_mode(service_name):
endpoint_url = self._create_dualstack_endpoint(
service_name, region_name,
resolved['dnsSuffix'], is_secure, explicit_region)
else:
# Use the sslCommonName over the hostname for Python 2.6 compat.
hostname = resolved.get('sslCommonName', resolved.get('hostname'))
endpoint_url = self._make_url(hostname, is_secure,
resolved.get('protocols', []))
signature_version = self._resolve_signature_version(
service_name, resolved)
signing_name = self._resolve_signing_name(service_name, resolved)
return self._create_result(
service_name=service_name, region_name=region_name,
signing_region=signing_region, signing_name=signing_name,
endpoint_url=endpoint_url, metadata=resolved,
signature_version=signature_version)
def _is_s3_dualstack_mode(self, service_name):
if service_name not in self._DUALSTACK_ENABLED_SERVICES:
return False
# TODO: This normalization logic is duplicated from the
# ClientArgsCreator class. Consolidate everything to
# ClientArgsCreator. _resolve_signature_version also has similarly
# duplicated logic.
client_config = self.client_config
if client_config is not None and client_config.s3 is not None and \
'use_dualstack_endpoint' in client_config.s3:
# Client config trumps scoped config.
return client_config.s3['use_dualstack_endpoint']
if self.scoped_config is None:
return False
enabled = self.scoped_config.get('s3', {}).get(
'use_dualstack_endpoint', False)
if enabled in [True, 'True', 'true']:
return True
return False
def _create_dualstack_endpoint(self, service_name, region_name,
dns_suffix, is_secure, explicit_region):
if not explicit_region and region_name == 'aws-global':
# If the region_name passed was not explicitly set, default to
# us-east-1 instead of the modeled default aws-global. Dualstack
# does not support aws-global
region_name = 'us-east-1'
hostname = '{service}.dualstack.{region}.{dns_suffix}'.format(
service=service_name, region=region_name,
dns_suffix=dns_suffix)
# Dualstack supports http and https so were hardcoding this value for
# now. This can potentially move into the endpoints.json file.
return self._make_url(hostname, is_secure, ['http', 'https'])
def _assume_endpoint(self, service_name, region_name, endpoint_url,
is_secure):
if endpoint_url is None:
# Expand the default hostname URI template.
hostname = self.default_endpoint.format(
service=service_name, region=region_name)
endpoint_url = self._make_url(hostname, is_secure,
['http', 'https'])
logger.debug('Assuming an endpoint for %s, %s: %s',
service_name, region_name, endpoint_url)
# We still want to allow the user to provide an explicit version.
signature_version = self._resolve_signature_version(
service_name, {'signatureVersions': ['v4']})
signing_name = self._resolve_signing_name(service_name, resolved={})
return self._create_result(
service_name=service_name, region_name=region_name,
signing_region=region_name, signing_name=signing_name,
signature_version=signature_version, endpoint_url=endpoint_url,
metadata={})
def _create_result(self, service_name, region_name, signing_region,
signing_name, endpoint_url, signature_version,
metadata):
return {
'service_name': service_name,
'region_name': region_name,
'signing_region': signing_region,
'signing_name': signing_name,
'endpoint_url': endpoint_url,
'signature_version': signature_version,
'metadata': metadata
}
def _make_url(self, hostname, is_secure, supported_protocols):
if is_secure and 'https' in supported_protocols:
scheme = 'https'
else:
scheme = 'http'
return '%s://%s' % (scheme, hostname)
def _resolve_signing_name(self, service_name, resolved):
# CredentialScope overrides everything else.
if 'credentialScope' in resolved \
and 'service' in resolved['credentialScope']:
return resolved['credentialScope']['service']
# Use the signingName from the model if present.
if self.service_signing_name:
return self.service_signing_name
# Just assume is the same as the service name.
return service_name
def _pick_region_values(self, resolved, region_name, endpoint_url):
signing_region = region_name
if endpoint_url is None:
# Do not use the region name or signing name from the resolved
# endpoint if the user explicitly provides an endpoint_url. This
# would happen if we resolve to an endpoint where the service has
# a "defaults" section that overrides all endpoint with a single
# hostname and credentialScope. This has been the case historically
# for how STS has worked. The only way to resolve an STS endpoint
# was to provide a region_name and an endpoint_url. In that case,
# we would still resolve an endpoint, but we would not use the
# resolved endpointName or signingRegion because we want to allow
# custom endpoints.
region_name = resolved['endpointName']
signing_region = region_name
if 'credentialScope' in resolved \
and 'region' in resolved['credentialScope']:
signing_region = resolved['credentialScope']['region']
return region_name, signing_region
def _resolve_signature_version(self, service_name, resolved):
configured_version = _get_configured_signature_version(
service_name, self.client_config, self.scoped_config)
if configured_version is not None:
return configured_version
# Pick a signature version from the endpoint metadata if present.
if 'signatureVersions' in resolved:
potential_versions = resolved['signatureVersions']
if service_name == 's3':
return 's3v4'
if 'v4' in potential_versions:
return 'v4'
# Now just iterate over the signature versions in order until we
# find the first one that is known to Botocore.
for known in potential_versions:
if known in AUTH_TYPE_MAPS:
return known
raise UnknownSignatureVersionError(
signature_version=resolved.get('signatureVersions'))
class BaseClient(object):
# This is actually reassigned with the py->op_name mapping
# when the client creator creates the subclass. This value is used
# because calls such as client.get_paginator('list_objects') use the
# snake_case name, but we need to know the ListObjects form.
# xform_name() does the ListObjects->list_objects conversion, but
# we need the reverse mapping here.
_PY_TO_OP_NAME = {}
def __init__(self, serializer, endpoint, response_parser,
event_emitter, request_signer, service_model, loader,
client_config, partition, exceptions_factory):
self._serializer = serializer
self._endpoint = endpoint
self._response_parser = response_parser
self._request_signer = request_signer
self._cache = {}
self._loader = loader
self._client_config = client_config
self.meta = ClientMeta(event_emitter, self._client_config,
endpoint.host, service_model,
self._PY_TO_OP_NAME, partition)
self._exceptions_factory = exceptions_factory
self._exceptions = None
self._register_handlers()
def __getattr__(self, item):
event_name = 'getattr.%s.%s' % (
self._service_model.service_id.hyphenize(), item
)
handler, event_response = self.meta.events.emit_until_response(
event_name, client=self)
if event_response is not None:
return event_response
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, item)
)
def _register_handlers(self):
# Register the handler required to sign requests.
service_id = self.meta.service_model.service_id.hyphenize()
self.meta.events.register(
'request-created.%s' % service_id,
self._request_signer.handler
)
@property
def _service_model(self):
return self.meta.service_model
def _make_api_call(self, operation_name, api_params):
operation_model = self._service_model.operation_model(operation_name)
service_name = self._service_model.service_name
history_recorder.record('API_CALL', {
'service': service_name,
'operation': operation_name,
'params': api_params,
})
if operation_model.deprecated:
logger.debug('Warning: %s.%s() is deprecated',
service_name, operation_name)
request_context = {
'client_region': self.meta.region_name,
'client_config': self.meta.config,
'has_streaming_input': operation_model.has_streaming_input,
'auth_type': operation_model.auth_type,
}
request_dict = self._convert_to_request_dict(
api_params, operation_model, context=request_context)
service_id = self._service_model.service_id.hyphenize()
handler, event_response = self.meta.events.emit_until_response(
'before-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
model=operation_model, params=request_dict,
request_signer=self._request_signer, context=request_context)
if event_response is not None:
http, parsed_response = event_response
else:
http, parsed_response = self._make_request(
operation_model, request_dict, request_context)
self.meta.events.emit(
'after-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
http_response=http, parsed=parsed_response,
model=operation_model, context=request_context
)
if http.status_code >= 300:
error_code = parsed_response.get("Error", {}).get("Code")
error_class = self.exceptions.from_code(error_code)
raise error_class(parsed_response, operation_name)
else:
return parsed_response
def _make_request(self, operation_model, request_dict, request_context):
try:
return self._endpoint.make_request(operation_model, request_dict)
except Exception as e:
self.meta.events.emit(
'after-call-error.{service_id}.{operation_name}'.format(
service_id=self._service_model.service_id.hyphenize(),
operation_name=operation_model.name),
exception=e, context=request_context
)
raise
def _convert_to_request_dict(self, api_params, operation_model,
context=None):
api_params = self._emit_api_params(
api_params, operation_model, context)
request_dict = self._serializer.serialize_to_request(
api_params, operation_model)
if not self._client_config.inject_host_prefix:
request_dict.pop('host_prefix', None)
prepare_request_dict(request_dict, endpoint_url=self._endpoint.host,
user_agent=self._client_config.user_agent,
context=context)
return request_dict
def _emit_api_params(self, api_params, operation_model, context):
# Given the API params provided by the user and the operation_model
# we can serialize the request to a request_dict.
operation_name = operation_model.name
# Emit an event that allows users to modify the parameters at the
# beginning of the method. It allows handlers to modify existing
# parameters or return a new set of parameters to use.
service_id = self._service_model.service_id.hyphenize()
responses = self.meta.events.emit(
'provide-client-params.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
api_params = first_non_none_response(responses, default=api_params)
event_name = (
'before-parameter-build.{service_id}.{operation_name}')
self.meta.events.emit(
event_name.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
return api_params
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return Paginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Add the docstring for the paginate method.
paginate.__doc__ = PaginatorDocstring(
paginator_name=actual_operation_name,
event_emitter=self.meta.events,
service_model=self.meta.service_model,
paginator_config=paginator_config,
include_signature=False
)
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (Paginator,), {'paginate': paginate})
operation_model = self._service_model.operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator
def can_paginate(self, operation_name):
"""Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
if 'page_config' not in self._cache:
try:
page_config = self._loader.load_service_model(
self._service_model.service_name,
'paginators-1',
self._service_model.api_version)['pagination']
self._cache['page_config'] = page_config
except DataNotFoundError:
self._cache['page_config'] = {}
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
return actual_operation_name in self._cache['page_config']
def _get_waiter_config(self):
if 'waiter_config' not in self._cache:
try:
waiter_config = self._loader.load_service_model(
self._service_model.service_name,
'waiters-2',
self._service_model.api_version)
self._cache['waiter_config'] = waiter_config
except DataNotFoundError:
self._cache['waiter_config'] = {}
return self._cache['waiter_config']
def get_waiter(self, waiter_name):
"""Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
config = self._get_waiter_config()
if not config:
raise ValueError("Waiter does not exist: %s" % waiter_name)
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name
if waiter_name not in mapping:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return waiter.create_waiter_with_client(
mapping[waiter_name], model, self)
@CachedProperty
def waiter_names(self):
"""Returns a list of all available waiters."""
config = self._get_waiter_config()
if not config:
return []
model = waiter.WaiterModel(config)
# Waiter configs is a dict, we just want the waiter names
# which are the keys in the dict.
return [xform_name(name) for name in model.waiter_names]
@property
def exceptions(self):
if self._exceptions is None:
self._exceptions = self._load_exceptions()
return self._exceptions
def _load_exceptions(self):
return self._exceptions_factory.create_client_exceptions(
self._service_model)
class ClientMeta(object):
"""Holds additional client methods.
This class holds additional information for clients. It exists for
two reasons:
* To give advanced functionality to clients
* To namespace additional client attributes from the operation
names which are mapped to methods at runtime. This avoids
ever running into collisions with operation names.
"""
def __init__(self, events, client_config, endpoint_url, service_model,
method_to_api_mapping, partition):
self.events = events
self._client_config = client_config
self._endpoint_url = endpoint_url
self._service_model = service_model
self._method_to_api_mapping = method_to_api_mapping
self._partition = partition
@property
def service_model(self):
return self._service_model
@property
def region_name(self):
return self._client_config.region_name
@property
def endpoint_url(self):
return self._endpoint_url
@property
def config(self):
return self._client_config
@property
def method_to_api_mapping(self):
return self._method_to_api_mapping
@property
def partition(self):
return self._partition
def _get_configured_signature_version(service_name, client_config,
scoped_config):
"""
Gets the manually configured signature version.
:returns: the customer configured signature version, or None if no
signature version was configured.
"""
# Client config overrides everything.
if client_config and client_config.signature_version is not None:
return client_config.signature_version
# Scoped config overrides picking from the endpoint metadata.
if scoped_config is not None:
# A given service may have service specific configuration in the
# config file, so we need to check there as well.
service_config = scoped_config.get(service_name)
if service_config is not None and isinstance(service_config, dict):
version = service_config.get('signature_version')
if version:
logger.debug(
"Switching signature version for service %s "
"to version %s based on config file override.",
service_name, version)
return version
return None
| 41,514 | Python | 42.977754 | 88 | 0.624368 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/session.py | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
import re
import socket
import warnings
from botocore import __version__
from botocore import UNSIGNED
import botocore.configloader
import botocore.credentials
import botocore.client
from botocore.configprovider import ConfigValueStore
from botocore.configprovider import ConfigChainFactory
from botocore.configprovider import create_botocore_default_config_mapping
from botocore.configprovider import BOTOCORE_DEFAUT_SESSION_VARIABLES
from botocore.exceptions import (
ConfigNotFound, ProfileNotFound, UnknownServiceError,
PartialCredentialsError,
)
from botocore.errorfactory import ClientExceptionsFactory
from botocore import handlers
from botocore.hooks import HierarchicalEmitter, first_non_none_response
from botocore.hooks import EventAliaser
from botocore.loaders import create_loader
from botocore.parsers import ResponseParserFactory
from botocore.regions import EndpointResolver
from botocore.model import ServiceModel
from botocore import monitoring
from botocore import paginate
from botocore import waiter
from botocore import retryhandler, translate
from botocore import utils
from botocore.utils import EVENT_ALIASES, validate_region_name
from botocore.compat import MutableMapping
logger = logging.getLogger(__name__)
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
SESSION_VARIABLES = copy.copy(BOTOCORE_DEFAUT_SESSION_VARIABLES)
#: The default format string to use when configuring the botocore logger.
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SESSION_VARIABLES``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
:type profile: str
:param profile: The name of the profile to use for this
session. Note that the profile can only be set when
the session is created.
"""
if event_hooks is None:
self._original_handler = HierarchicalEmitter()
else:
self._original_handler = event_hooks
self._events = EventAliaser(self._original_handler)
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
# The _profile attribute is just used to cache the value
# of the current profile to avoid going through the normal
# config lookup process each access time.
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if profile is not None:
self._session_instance_vars['profile'] = profile
self._client_config = None
self._last_client_region_used = None
self._components = ComponentLocator()
self._internal_components = ComponentLocator()
self._register_components()
self.session_var_map = SessionVarDict(self, self.SESSION_VARIABLES)
if session_vars is not None:
self.session_var_map.update(session_vars)
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
self._register_response_parser_factory()
self._register_exceptions_factory()
self._register_config_store()
self._register_monitor()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider', self._create_credential_resolver)
def _create_credential_resolver(self):
return botocore.credentials.create_credential_resolver(
self, region_name=self._last_client_region_used
)
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: create_loader(self.get_config_variable('data_path')))
def _register_endpoint_resolver(self):
def create_default_resolver():
loader = self.get_component('data_loader')
endpoints = loader.load_data('endpoints')
return EndpointResolver(endpoints)
self._internal_components.lazy_register_component(
'endpoint_resolver', create_default_resolver)
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
ResponseParserFactory())
def _register_exceptions_factory(self):
self._internal_components.register_component(
'exceptions_factory', ClientExceptionsFactory())
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_type is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
def _register_config_store(self):
config_store_component = ConfigValueStore(
mapping=create_botocore_default_config_mapping(self)
)
self._components.register_component('config_store',
config_store_component)
def _register_monitor(self):
self._internal_components.lazy_register_component(
'monitor', self._create_csm_monitor)
def _create_csm_monitor(self):
if self.get_config_variable('csm_enabled'):
client_id = self.get_config_variable('csm_client_id')
host = self.get_config_variable('csm_host')
port = self.get_config_variable('csm_port')
handler = monitoring.Monitor(
adapter=monitoring.MonitorEventAdapter(),
publisher=monitoring.SocketPublisher(
socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
host=host,
port=port,
serializer=monitoring.CSMSerializer(
csm_client_id=client_id)
)
)
return handler
return None
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
if self._profile is None:
profile = self.get_config_variable('profile')
self._profile = profile
return self._profile
def get_config_variable(self, logical_name, methods=None):
if methods is not None:
return self._get_config_variable_with_custom_methods(
logical_name, methods)
return self.get_component('config_store').get_config_variable(
logical_name)
def _get_config_variable_with_custom_methods(self, logical_name, methods):
# If a custom list of methods was supplied we need to perserve the
# behavior with the new system. To do so a new chain that is a copy of
# the old one will be constructed, but only with the supplied methods
# being added to the chain. This chain will be consulted for a value
# and then thrown out. This is not efficient, nor is the methods arg
# used in botocore, this is just for backwards compatibility.
chain_builder = SubsetChainConfigFactory(session=self, methods=methods)
mapping = create_botocore_default_config_mapping(self)
for name, config_options in self.session_var_map.items():
config_name, env_vars, default, typecast = config_options
build_chain_config_args = {
'conversion_func': typecast,
'default': default,
}
if 'instance' in methods:
build_chain_config_args['instance_name'] = name
if 'env' in methods:
build_chain_config_args['env_var_names'] = env_vars
if 'config' in methods:
build_chain_config_args['config_property_name'] = config_name
mapping[name] = chain_builder.create_config_chain(
**build_chain_config_args
)
config_store_component = ConfigValueStore(
mapping=mapping
)
value = config_store_component.get_config_variable(logical_name)
return value
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
logger.debug(
"Setting config variable for %s to %r",
logical_name,
value,
)
self._session_instance_vars[logical_name] = value
def instance_variables(self):
return copy.copy(self._session_instance_vars)
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = botocore.configloader.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = botocore.configloader.raw_config_parse(
cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def get_default_client_config(self):
"""Retrieves the default config for creating clients
:rtype: botocore.client.Config
:returns: The default client config object when creating clients. If
the value is ``None`` then there is no default config object
attached to the session.
"""
return self._client_config
def set_default_client_config(self, client_config):
"""Sets the default config for creating clients
:type client_config: botocore.client.Config
:param client_config: The default client config object when creating
clients. If the value is ``None`` then there is no default config
object attached to the session.
"""
self._client_config = client_config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = botocore.credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver> <exec_env>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Botocore` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
- exec_env is exec-env/$AWS_EXECUTION_ENV
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if os.environ.get('AWS_EXECUTION_ENV') is not None:
base += ' exec-env/%s' % os.environ.get('AWS_EXECUTION_ENV')
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
waiter_config = loader.load_service_model(
service_name, 'waiters-2', api_version)
return waiter.WaiterModel(waiter_config)
def get_paginator_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
paginator_config = loader.load_service_model(
service_name, 'paginators-1', api_version)
return paginate.PaginatorModel(paginator_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
service_id = EVENT_ALIASES.get(service_name, service_name)
self._events.emit('service-data-loaded.%s' % service_id,
service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
return self.get_component('data_loader')\
.list_available_services(type_name='service-2')
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.LOG_FORMAT``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.LOG_FORMAT
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
try:
return self._components.get_component(name)
except ValueError:
if name in ['endpoint_resolver', 'exceptions_factory']:
warnings.warn(
'Fetching the %s component with the get_component() '
'method is deprecated as the component has always been '
'considered an internal interface of botocore' % name,
DeprecationWarning)
return self._internal_components.get_component(name)
raise
def _get_internal_component(self, name):
# While this method may be called by botocore classes outside of the
# Session, this method should **never** be used by a class that lives
# outside of botocore.
return self._internal_components.get_component(name)
def _register_internal_component(self, name, component):
# While this method may be called by botocore classes outside of the
# Session, this method should **never** be used by a class that lives
# outside of botocore.
return self._internal_components.register_component(name, component)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
region_name = self._resolve_region_name(region_name, config)
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
if api_version is None:
api_version = self.get_config_variable('api_versions').get(
service_name, None)
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if config is not None and config.signature_version is UNSIGNED:
credentials = None
elif aws_access_key_id is not None and aws_secret_access_key is not None:
credentials = botocore.credentials.Credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
elif self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key):
raise PartialCredentialsError(
provider='explicit',
cred_var=self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key))
else:
credentials = self.get_credentials()
endpoint_resolver = self._get_internal_component('endpoint_resolver')
exceptions_factory = self._get_internal_component('exceptions_factory')
config_store = self.get_component('config_store')
client_creator = botocore.client.ClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory,
exceptions_factory, config_store)
client = client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
monitor = self._get_internal_component('monitor')
if monitor is not None:
monitor.register(client.meta.events)
return client
def _resolve_region_name(self, region_name, config):
# Figure out the user-provided region based on the various
# configuration options.
if region_name is None:
if config and config.region_name is not None:
region_name = config.region_name
else:
region_name = self.get_config_variable('region')
validate_region_name(region_name)
# For any client that we create in retrieving credentials
# we want to create it using the same region as specified in
# creating this client. It is important to note though that the
# credentials client is only created once per session. So if a new
# client is created with a different region, its credential resolver
# will use the region of the first client. However, that is not an
# issue as of now because the credential resolver uses only STS and
# the credentials returned at regional endpoints are valid across
# all regions in the partition.
self._last_client_region_used = region_name
return region_name
def _missing_cred_vars(self, access_key, secret_key):
if access_key is not None and secret_key is None:
return 'aws_secret_access_key'
if secret_key is not None and access_key is None:
return 'aws_access_key_id'
return None
def get_available_partitions(self):
"""Lists the available partitions found on disk
:rtype: list
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
"""
resolver = self._get_internal_component('endpoint_resolver')
return resolver.get_available_partitions()
def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the region and endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3).
This parameter accepts a service name (e.g., "elb") or endpoint
prefix (e.g., "elasticloadbalancing").
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
resolver = self._get_internal_component('endpoint_resolver')
results = []
try:
service_data = self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred[name]
self._components[name] = factory()
# Only delete the component from the deferred dict after
# successfully creating the object from the factory as well as
# injecting the instantiated value into the _components dict.
del self._deferred[name]
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
class SessionVarDict(MutableMapping):
def __init__(self, session, session_vars):
self._session = session
self._store = copy.copy(session_vars)
def __getitem__(self, key):
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
self._update_config_store_from_session_vars(key, value)
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def _update_config_store_from_session_vars(self, logical_name,
config_options):
# This is for backwards compatibility. The new preferred way to
# modify configuration logic is to use the component system to get
# the config_store component from the session, and then update
# a key with a custom config provider(s).
# This backwards compatibility method takes the old session_vars
# list of tuples and and transforms that into a set of updates to
# the config_store component.
config_chain_builder = ConfigChainFactory(session=self._session)
config_name, env_vars, default, typecast = config_options
config_store = self._session.get_component('config_store')
config_store.set_config_provider(
logical_name,
config_chain_builder.create_config_chain(
instance_name=logical_name,
env_var_names=env_vars,
config_property_names=config_name,
default=default,
conversion_func=typecast,
)
)
class SubsetChainConfigFactory(object):
"""A class for creating backwards compatible configuration chains.
This class can be used instead of
:class:`botocore.configprovider.ConfigChainFactory` to make it honor the
methods argument to get_config_variable. This class can be used to filter
out providers that are not in the methods tuple when creating a new config
chain.
"""
def __init__(self, session, methods, environ=None):
self._factory = ConfigChainFactory(session, environ)
self._supported_methods = methods
def create_config_chain(self, instance_name=None, env_var_names=None,
config_property_name=None, default=None,
conversion_func=None):
"""Build a config chain following the standard botocore pattern.
This config chain factory will omit any providers not in the methods
tuple provided at initialization. For example if given the tuple
('instance', 'config',) it will not inject the environment provider
into the standard config chain. This lets the botocore session support
the custom ``methods`` argument for all the default botocore config
variables when calling ``get_config_variable``.
"""
if 'instance' not in self._supported_methods:
instance_name = None
if 'env' not in self._supported_methods:
env_var_names = None
if 'config' not in self._supported_methods:
config_property_name = None
return self._factory.create_config_chain(
instance_name=instance_name,
env_var_names=env_var_names,
config_property_names=config_property_name,
default=default,
conversion_func=conversion_func,
)
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
| 43,433 | Python | 40.803657 | 86 | 0.623627 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/compat.py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import datetime
import sys
import inspect
import warnings
import hashlib
import logging
import shlex
from math import floor
from botocore.vendored import six
from botocore.exceptions import MD5UnavailableError
from dateutil.tz import tzlocal
from urllib3 import exceptions
logger = logging.getLogger(__name__)
if six.PY3:
from botocore.vendored.six.moves import http_client
class HTTPHeaders(http_client.HTTPMessage):
pass
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import unquote_plus
from urllib.parse import urlparse
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from urllib.parse import urljoin
from urllib.parse import parse_qsl
from urllib.parse import parse_qs
from http.client import HTTPResponse
from io import IOBase as _IOBase
from base64 import encodebytes
from email.utils import formatdate
from itertools import zip_longest
file_type = _IOBase
zip = zip
# In python3, unquote takes a str() object, url decodes it,
# then takes the bytestring and decodes it to utf-8.
# Python2 we'll have to do this ourself (see below).
unquote_str = unquote_plus
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp.raw._sock.settimeout(timeout)
def accepts_kwargs(func):
# In python3.4.1, there's backwards incompatible
# changes when using getargspec with functools.partials.
return inspect.getfullargspec(func)[2]
def ensure_unicode(s, encoding=None, errors=None):
# NOOP in Python 3, because every string is already unicode
return s
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, str):
return s.encode(encoding, errors)
if isinstance(s, bytes):
return s
raise ValueError("Expected str or bytes, received %s." % type(s))
else:
from urllib import quote
from urllib import urlencode
from urllib import unquote
from urllib import unquote_plus
from urlparse import urlparse
from urlparse import urlsplit
from urlparse import urlunsplit
from urlparse import urljoin
from urlparse import parse_qsl
from urlparse import parse_qs
from email.message import Message
from email.Utils import formatdate
file_type = file
from itertools import izip as zip
from itertools import izip_longest as zip_longest
from httplib import HTTPResponse
from base64 import encodestring as encodebytes
class HTTPHeaders(Message):
# The __iter__ method is not available in python2.x, so we have
# to port the py3 version.
def __iter__(self):
for field, value in self._headers:
yield field
def unquote_str(value, encoding='utf-8'):
# In python2, unquote() gives us a string back that has the urldecoded
# bits, but not the unicode parts. We need to decode this manually.
# unquote has special logic in which if it receives a unicode object it
# will decode it to latin1. This is hard coded. To avoid this, we'll
# encode the string with the passed in encoding before trying to
# unquote it.
byte_string = value.encode(encoding)
return unquote_plus(byte_string).decode(encoding)
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp._sock.settimeout(timeout)
def accepts_kwargs(func):
return inspect.getargspec(func)[2]
def ensure_unicode(s, encoding='utf-8', errors='strict'):
if isinstance(s, six.text_type):
return s
return unicode(s, encoding, errors)
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
if isinstance(s, str):
return s
raise ValueError("Expected str or unicode, received %s." % type(s))
from collections import OrderedDict
try:
import xml.etree.cElementTree as ETree
except ImportError:
# cElementTree does not exist from Python3.9+
import xml.etree.ElementTree as ETree
XMLParseError = ETree.ParseError
import json
def filter_ssl_warnings():
# Ignore warnings related to SNI as it is not being used in validations.
warnings.filterwarnings(
'ignore',
message="A true SSLContext object is not available.*",
category=exceptions.InsecurePlatformWarning,
module=r".*urllib3\.util\.ssl_")
@classmethod
def from_dict(cls, d):
new_instance = cls()
for key, value in d.items():
new_instance[key] = value
return new_instance
@classmethod
def from_pairs(cls, pairs):
new_instance = cls()
for key, value in pairs:
new_instance[key] = value
return new_instance
HTTPHeaders.from_dict = from_dict
HTTPHeaders.from_pairs = from_pairs
def copy_kwargs(kwargs):
"""
This used to be a compat shim for 2.6 but is now just an alias.
"""
copy_kwargs = copy.copy(kwargs)
return copy_kwargs
def total_seconds(delta):
"""
Returns the total seconds in a ``datetime.timedelta``.
This used to be a compat shim for 2.6 but is now just an alias.
:param delta: The timedelta object
:type delta: ``datetime.timedelta``
"""
return delta.total_seconds()
# Checks to see if md5 is available on this system. A given system might not
# have access to it for various reasons, such as FIPS mode being enabled.
try:
hashlib.md5()
MD5_AVAILABLE = True
except ValueError:
MD5_AVAILABLE = False
def get_md5(*args, **kwargs):
"""
Attempts to get an md5 hashing object.
:param raise_error_if_unavailable: raise an error if md5 is unavailable on
this system. If False, None will be returned if it is unavailable.
:type raise_error_if_unavailable: bool
:param args: Args to pass to the MD5 constructor
:param kwargs: Key word arguments to pass to the MD5 constructor
:return: An MD5 hashing object if available. If it is unavailable, None
is returned if raise_error_if_unavailable is set to False.
"""
if MD5_AVAILABLE:
return hashlib.md5(*args, **kwargs)
else:
raise MD5UnavailableError()
def compat_shell_split(s, platform=None):
if platform is None:
platform = sys.platform
if platform == "win32":
return _windows_shell_split(s)
else:
return shlex.split(s)
def _windows_shell_split(s):
"""Splits up a windows command as the built-in command parser would.
Windows has potentially bizarre rules depending on where you look. When
spawning a process via the Windows C runtime (which is what python does
when you call popen) the rules are as follows:
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
To summarize:
* Only space and tab are valid delimiters
* Double quotes are the only valid quotes
* Backslash is interpreted literally unless it is part of a chain that
leads up to a double quote. Then the backslashes escape the backslashes,
and if there is an odd number the final backslash escapes the quote.
:param s: The command string to split up into parts.
:return: A list of command components.
"""
if not s:
return []
components = []
buff = []
is_quoted = False
num_backslashes = 0
for character in s:
if character == '\\':
# We can't simply append backslashes because we don't know if
# they are being used as escape characters or not. Instead we
# keep track of how many we've encountered and handle them when
# we encounter a different character.
num_backslashes += 1
elif character == '"':
if num_backslashes > 0:
# The backslashes are in a chain leading up to a double
# quote, so they are escaping each other.
buff.append('\\' * int(floor(num_backslashes / 2)))
remainder = num_backslashes % 2
num_backslashes = 0
if remainder == 1:
# The number of backslashes is uneven, so they are also
# escaping the double quote, so it needs to be added to
# the current component buffer.
buff.append('"')
continue
# We've encountered a double quote that is not escaped,
# so we toggle is_quoted.
is_quoted = not is_quoted
# If there are quotes, then we may want an empty string. To be
# safe, we add an empty string to the buffer so that we make
# sure it sticks around if there's nothing else between quotes.
# If there is other stuff between quotes, the empty string will
# disappear during the joining process.
buff.append('')
elif character in [' ', '\t'] and not is_quoted:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
# Excess whitespace is ignored, so only add the components list
# if there is anything in the buffer.
if buff:
components.append(''.join(buff))
buff = []
else:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
buff.append(character)
# Quotes must be terminated.
if is_quoted:
raise ValueError('No closing quotation in string: %s' % s)
# There may be some leftover backslashes, so we need to add them in.
# There's no quote so we add the exact number.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
# Add the final component in if there is anything in the buffer.
if buff:
components.append(''.join(buff))
return components
def get_tzinfo_options():
# Due to dateutil/dateutil#197, Windows may fail to parse times in the past
# with the system clock. We can alternatively fallback to tzwininfo when
# this happens, which will get time info from the Windows registry.
if sys.platform == 'win32':
from dateutil.tz import tzwinlocal
return (tzlocal, tzwinlocal)
else:
return (tzlocal,)
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
| 11,715 | Python | 32.284091 | 79 | 0.656594 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/regions.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Resolves regions and endpoints.
This module implements endpoint resolution, including resolving endpoints for a
given service and region and resolving the available endpoints for a service
in a specific AWS partition.
"""
import logging
import re
from botocore.exceptions import NoRegionError
LOG = logging.getLogger(__name__)
DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}'
DEFAULT_SERVICE_DATA = {'endpoints': {}}
class BaseEndpointResolver(object):
"""Resolves regions and endpoints. Must be subclassed."""
def construct_endpoint(self, service_name, region_name=None):
"""Resolves an endpoint for a service and region combination.
:type service_name: string
:param service_name: Name of the service to resolve an endpoint for
(e.g., s3)
:type region_name: string
:param region_name: Region/endpoint name to resolve (e.g., us-east-1)
if no region is provided, the first found partition-wide endpoint
will be used if available.
:rtype: dict
:return: Returns a dict containing the following keys:
- partition: (string, required) Resolved partition name
- endpointName: (string, required) Resolved endpoint name
- hostname: (string, required) Hostname to use for this endpoint
- sslCommonName: (string) sslCommonName to use for this endpoint.
- credentialScope: (dict) Signature version 4 credential scope
- region: (string) region name override when signing.
- service: (string) service name override when signing.
- signatureVersions: (list<string>) A list of possible signature
versions, including s3, v4, v2, and s3v4
- protocols: (list<string>) A list of supported protocols
(e.g., http, https)
- ...: Other keys may be included as well based on the metadata
"""
raise NotImplementedError
def get_available_partitions(self):
"""Lists the partitions available to the endpoint resolver.
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"]).
"""
raise NotImplementedError
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3)
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
raise NotImplementedError
class EndpointResolver(BaseEndpointResolver):
"""Resolves endpoints based on partition endpoint metadata"""
def __init__(self, endpoint_data):
"""
:param endpoint_data: A dict of partition data.
"""
if 'partitions' not in endpoint_data:
raise ValueError('Missing "partitions" in endpoint data')
self._endpoint_data = endpoint_data
def get_available_partitions(self):
result = []
for partition in self._endpoint_data['partitions']:
result.append(partition['partition'])
return result
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
result = []
for partition in self._endpoint_data['partitions']:
if partition['partition'] != partition_name:
continue
services = partition['services']
if service_name not in services:
continue
for endpoint_name in services[service_name]['endpoints']:
if allow_non_regional or endpoint_name in partition['regions']:
result.append(endpoint_name)
return result
def construct_endpoint(self, service_name, region_name=None, partition_name=None):
if partition_name is not None:
valid_partition = None
for partition in self._endpoint_data['partitions']:
if partition['partition'] == partition_name:
valid_partition = partition
if valid_partition is not None:
result = self._endpoint_for_partition(valid_partition, service_name,
region_name, True)
return result
return None
# Iterate over each partition until a match is found.
for partition in self._endpoint_data['partitions']:
result = self._endpoint_for_partition(
partition, service_name, region_name)
if result:
return result
def _endpoint_for_partition(self, partition, service_name, region_name,
force_partition=False):
# Get the service from the partition, or an empty template.
service_data = partition['services'].get(
service_name, DEFAULT_SERVICE_DATA)
# Use the partition endpoint if no region is supplied.
if region_name is None:
if 'partitionEndpoint' in service_data:
region_name = service_data['partitionEndpoint']
else:
raise NoRegionError()
# Attempt to resolve the exact region for this partition.
if region_name in service_data['endpoints']:
return self._resolve(
partition, service_name, service_data, region_name)
# Check to see if the endpoint provided is valid for the partition.
if self._region_match(partition, region_name) or force_partition:
# Use the partition endpoint if set and not regionalized.
partition_endpoint = service_data.get('partitionEndpoint')
is_regionalized = service_data.get('isRegionalized', True)
if partition_endpoint and not is_regionalized:
LOG.debug('Using partition endpoint for %s, %s: %s',
service_name, region_name, partition_endpoint)
return self._resolve(
partition, service_name, service_data, partition_endpoint)
LOG.debug('Creating a regex based endpoint for %s, %s',
service_name, region_name)
return self._resolve(
partition, service_name, service_data, region_name)
def _region_match(self, partition, region_name):
if region_name in partition['regions']:
return True
if 'regionRegex' in partition:
return re.compile(partition['regionRegex']).match(region_name)
return False
def _resolve(self, partition, service_name, service_data, endpoint_name):
result = service_data['endpoints'].get(endpoint_name, {})
result['partition'] = partition['partition']
result['endpointName'] = endpoint_name
# Merge in the service defaults then the partition defaults.
self._merge_keys(service_data.get('defaults', {}), result)
self._merge_keys(partition.get('defaults', {}), result)
hostname = result.get('hostname', DEFAULT_URI_TEMPLATE)
result['hostname'] = self._expand_template(
partition, result['hostname'], service_name, endpoint_name)
if 'sslCommonName' in result:
result['sslCommonName'] = self._expand_template(
partition, result['sslCommonName'], service_name,
endpoint_name)
result['dnsSuffix'] = partition['dnsSuffix']
return result
def _merge_keys(self, from_data, result):
for key in from_data:
if key not in result:
result[key] = from_data[key]
def _expand_template(self, partition, template, service_name,
endpoint_name):
return template.format(
service=service_name, region=endpoint_name,
dnsSuffix=partition['dnsSuffix'])
| 8,975 | Python | 43.435643 | 86 | 0.62585 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/parsers.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Response parsers for the various protocol types.
The module contains classes that can take an HTTP response, and given
an output shape, parse the response into a dict according to the
rules in the output shape.
There are many similarities amongst the different protocols with regard
to response parsing, and the code is structured in a way to avoid
code duplication when possible. The diagram below is a diagram
showing the inheritance hierarchy of the response classes.
::
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +-------------------+
| | |
+----------+----------+ +------+-------+ +-------+------+
|BaseXMLResponseParser| |BaseRestParser| |BaseJSONParser|
+---------------------+ +--------------+ +--------------+
^ ^ ^ ^ ^ ^
| | | | | |
| | | | | |
| ++----------+-+ +-+-----------++ |
| |RestXMLParser| |RestJSONParser| |
+-----+-----+ +-------------+ +--------------+ +----+-----+
|QueryParser| |JSONParser|
+-----------+ +----------+
The diagram above shows that there is a base class, ``ResponseParser`` that
contains logic that is similar amongst all the different protocols (``query``,
``json``, ``rest-json``, ``rest-xml``). Amongst the various services there
is shared logic that can be grouped several ways:
* The ``query`` and ``rest-xml`` both have XML bodies that are parsed in the
same way.
* The ``json`` and ``rest-json`` protocols both have JSON bodies that are
parsed in the same way.
* The ``rest-json`` and ``rest-xml`` protocols have additional attributes
besides body parameters that are parsed the same (headers, query string,
status code).
This is reflected in the class diagram above. The ``BaseXMLResponseParser``
and the BaseJSONParser contain logic for parsing the XML/JSON body,
and the BaseRestParser contains logic for parsing out attributes that
come from other parts of the HTTP response. Classes like the
``RestXMLParser`` inherit from the ``BaseXMLResponseParser`` to get the
XML body parsing logic and the ``BaseRestParser`` to get the HTTP
header/status code/query string parsing.
Additionally, there are event stream parsers that are used by the other parsers
to wrap streaming bodies that represent a stream of events. The
BaseEventStreamParser extends from ResponseParser and defines the logic for
parsing values from the headers and payload of a message from the underlying
binary encoding protocol. Currently, event streams support parsing bodies
encoded as JSON and XML through the following hierarchy.
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +------------------+
| | |
+----------+----------+ +----------+----------+ +-------+------+
|BaseXMLResponseParser| |BaseEventStreamParser| |BaseJSONParser|
+---------------------+ +---------------------+ +--------------+
^ ^ ^ ^
| | | |
| | | |
+-+----------------+-+ +-+-----------------+-+
|EventStreamXMLParser| |EventStreamJSONParser|
+--------------------+ +---------------------+
Return Values
=============
Each call to ``parse()`` returns a dict has this form::
Standard Response
{
"ResponseMetadata": {"RequestId": <requestid>}
<response keys>
}
Error response
{
"ResponseMetadata": {"RequestId": <requestid>}
"Error": {
"Code": <string>,
"Message": <string>,
"Type": <string>,
<additional keys>
}
}
"""
import re
import base64
import json
import logging
from botocore.compat import six, ETree, XMLParseError
from botocore.eventstream import EventStream, NoInitialResponseError
from botocore.utils import parse_timestamp, merge_dicts, \
is_json_value_header, lowercase_dict
LOG = logging.getLogger(__name__)
DEFAULT_TIMESTAMP_PARSER = parse_timestamp
class ResponseParserFactory(object):
def __init__(self):
self._defaults = {}
def set_parser_defaults(self, **kwargs):
"""Set default arguments when a parser instance is created.
You can specify any kwargs that are allowed by a ResponseParser
class. There are currently two arguments:
* timestamp_parser - A callable that can parse a timestamp string
* blob_parser - A callable that can parse a blob type
"""
self._defaults.update(kwargs)
def create_parser(self, protocol_name):
parser_cls = PROTOCOL_PARSERS[protocol_name]
return parser_cls(**self._defaults)
def create_parser(protocol):
return ResponseParserFactory().create_parser(protocol)
def _text_content(func):
# This decorator hides the difference between
# an XML node with text or a plain string. It's used
# to ensure that scalar processing operates only on text
# strings, which allows the same scalar handlers to be used
# for XML nodes from the body and HTTP headers.
def _get_text_content(self, shape, node_or_string):
if hasattr(node_or_string, 'text'):
text = node_or_string.text
if text is None:
# If an XML node is empty <foo></foo>,
# we want to parse that as an empty string,
# not as a null/None value.
text = ''
else:
text = node_or_string
return func(self, shape, text)
return _get_text_content
class ResponseParserError(Exception):
pass
class ResponseParser(object):
"""Base class for response parsing.
This class represents the interface that all ResponseParsers for the
various protocols must implement.
This class will take an HTTP response and a model shape and parse the
HTTP response into a dictionary.
There is a single public method exposed: ``parse``. See the ``parse``
docstring for more info.
"""
DEFAULT_ENCODING = 'utf-8'
EVENT_STREAM_PARSER_CLS = None
def __init__(self, timestamp_parser=None, blob_parser=None):
if timestamp_parser is None:
timestamp_parser = DEFAULT_TIMESTAMP_PARSER
self._timestamp_parser = timestamp_parser
if blob_parser is None:
blob_parser = self._default_blob_parser
self._blob_parser = blob_parser
self._event_stream_parser = None
if self.EVENT_STREAM_PARSER_CLS is not None:
self._event_stream_parser = self.EVENT_STREAM_PARSER_CLS(
timestamp_parser, blob_parser)
def _default_blob_parser(self, value):
# Blobs are always returned as bytes type (this matters on python3).
# We don't decode this to a str because it's entirely possible that the
# blob contains binary data that actually can't be decoded.
return base64.b64decode(value)
def parse(self, response, shape):
"""Parse the HTTP response given a shape.
:param response: The HTTP response dictionary. This is a dictionary
that represents the HTTP request. The dictionary must have the
following keys, ``body``, ``headers``, and ``status_code``.
:param shape: The model shape describing the expected output.
:return: Returns a dictionary representing the parsed response
described by the model. In addition to the shape described from
the model, each response will also have a ``ResponseMetadata``
which contains metadata about the response, which contains at least
two keys containing ``RequestId`` and ``HTTPStatusCode``. Some
responses may populate additional keys, but ``RequestId`` will
always be present.
"""
LOG.debug('Response headers: %s', response['headers'])
LOG.debug('Response body:\n%s', response['body'])
if response['status_code'] >= 301:
if self._is_generic_error_response(response):
parsed = self._do_generic_error_parse(response)
elif self._is_modeled_error_shape(shape):
parsed = self._do_modeled_error_parse(response, shape)
# We don't want to decorate the modeled fields with metadata
return parsed
else:
parsed = self._do_error_parse(response, shape)
else:
parsed = self._do_parse(response, shape)
# We don't want to decorate event stream responses with metadata
if shape and shape.serialization.get('eventstream'):
return parsed
# Add ResponseMetadata if it doesn't exist and inject the HTTP
# status code and headers from the response.
if isinstance(parsed, dict):
response_metadata = parsed.get('ResponseMetadata', {})
response_metadata['HTTPStatusCode'] = response['status_code']
# Ensure that the http header keys are all lower cased. Older
# versions of urllib3 (< 1.11) would unintentionally do this for us
# (see urllib3#633). We need to do this conversion manually now.
headers = response['headers']
response_metadata['HTTPHeaders'] = lowercase_dict(headers)
parsed['ResponseMetadata'] = response_metadata
return parsed
def _is_modeled_error_shape(self, shape):
return shape is not None and shape.metadata.get('exception', False)
def _is_generic_error_response(self, response):
# There are times when a service will respond with a generic
# error response such as:
# '<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
#
# This can also happen if you're going through a proxy.
# In this case the protocol specific _do_error_parse will either
# fail to parse the response (in the best case) or silently succeed
# and treat the HTML above as an XML response and return
# non sensical parsed data.
# To prevent this case from happening we first need to check
# whether or not this response looks like the generic response.
if response['status_code'] >= 500:
if 'body' not in response or response['body'] is None:
return True
body = response['body'].strip()
return body.startswith(b'<html>') or not body
def _do_generic_error_parse(self, response):
# There's not really much we can do when we get a generic
# html response.
LOG.debug("Received a non protocol specific error response from the "
"service, unable to populate error code and message.")
return {
'Error': {'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], '')},
'ResponseMetadata': {},
}
def _do_parse(self, response, shape):
raise NotImplementedError("%s._do_parse" % self.__class__.__name__)
def _do_error_parse(self, response, shape):
raise NotImplementedError(
"%s._do_error_parse" % self.__class__.__name__)
def _do_modeled_error_parse(self, response, shape, parsed):
raise NotImplementedError(
"%s._do_modeled_error_parse" % self.__class__.__name__)
def _parse_shape(self, shape, node):
handler = getattr(self, '_handle_%s' % shape.type_name,
self._default_handle)
return handler(shape, node)
def _handle_list(self, shape, node):
# Enough implementations share list serialization that it's moved
# up here in the base class.
parsed = []
member_shape = shape.member
for item in node:
parsed.append(self._parse_shape(member_shape, item))
return parsed
def _default_handle(self, shape, value):
return value
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return EventStream(response['body'], shape, parser, name)
class BaseXMLResponseParser(ResponseParser):
def __init__(self, timestamp_parser=None, blob_parser=None):
super(BaseXMLResponseParser, self).__init__(timestamp_parser,
blob_parser)
self._namespace_re = re.compile('{.*}')
def _handle_map(self, shape, node):
parsed = {}
key_shape = shape.key
value_shape = shape.value
key_location_name = key_shape.serialization.get('name') or 'key'
value_location_name = value_shape.serialization.get('name') or 'value'
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
for keyval_node in node:
for single_pair in keyval_node:
# Within each <entry> there's a <key> and a <value>
tag_name = self._node_tag(single_pair)
if tag_name == key_location_name:
key_name = self._parse_shape(key_shape, single_pair)
elif tag_name == value_location_name:
val_name = self._parse_shape(value_shape, single_pair)
else:
raise ResponseParserError("Unknown tag: %s" % tag_name)
parsed[key_name] = val_name
return parsed
def _node_tag(self, node):
return self._namespace_re.sub('', node.tag)
def _handle_list(self, shape, node):
# When we use _build_name_to_xml_node, repeated elements are aggregated
# into a list. However, we can't tell the difference between a scalar
# value and a single element flattened list. So before calling the
# real _handle_list, we know that "node" should actually be a list if
# it's flattened, and if it's not, then we make it a one element list.
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
return super(BaseXMLResponseParser, self)._handle_list(shape, node)
def _handle_structure(self, shape, node):
parsed = {}
members = shape.members
if shape.metadata.get('exception', False):
node = self._get_error_root(node)
xml_dict = self._build_name_to_xml_node(node)
for member_name in members:
member_shape = members[member_name]
if 'location' in member_shape.serialization or \
member_shape.serialization.get('eventheader'):
# All members with locations have already been handled,
# so we don't need to parse these members.
continue
xml_name = self._member_key_name(member_shape, member_name)
member_node = xml_dict.get(xml_name)
if member_node is not None:
parsed[member_name] = self._parse_shape(
member_shape, member_node)
elif member_shape.serialization.get('xmlAttribute'):
attribs = {}
location_name = member_shape.serialization['name']
for key, value in node.attrib.items():
new_key = self._namespace_re.sub(
location_name.split(':')[0] + ':', key)
attribs[new_key] = value
if location_name in attribs:
parsed[member_name] = attribs[location_name]
return parsed
def _get_error_root(self, original_root):
if self._node_tag(original_root) == 'ErrorResponse':
for child in original_root:
if self._node_tag(child) == 'Error':
return child
return original_root
def _member_key_name(self, shape, member_name):
# This method is needed because we have to special case flattened list
# with a serialization name. If this is the case we use the
# locationName from the list's member shape as the key name for the
# surrounding structure.
if shape.type_name == 'list' and shape.serialization.get('flattened'):
list_member_serialized_name = shape.member.serialization.get(
'name')
if list_member_serialized_name is not None:
return list_member_serialized_name
serialized_name = shape.serialization.get('name')
if serialized_name is not None:
return serialized_name
return member_name
def _build_name_to_xml_node(self, parent_node):
# If the parent node is actually a list. We should not be trying
# to serialize it to a dictionary. Instead, return the first element
# in the list.
if isinstance(parent_node, list):
return self._build_name_to_xml_node(parent_node[0])
xml_dict = {}
for item in parent_node:
key = self._node_tag(item)
if key in xml_dict:
# If the key already exists, the most natural
# way to handle this is to aggregate repeated
# keys into a single list.
# <foo>1</foo><foo>2</foo> -> {'foo': [Node(1), Node(2)]}
if isinstance(xml_dict[key], list):
xml_dict[key].append(item)
else:
# Convert from a scalar to a list.
xml_dict[key] = [xml_dict[key], item]
else:
xml_dict[key] = item
return xml_dict
def _parse_xml_string_to_dom(self, xml_string):
try:
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding=self.DEFAULT_ENCODING)
parser.feed(xml_string)
root = parser.close()
except XMLParseError as e:
raise ResponseParserError(
"Unable to parse response (%s), "
"invalid XML received. Further retries may succeed:\n%s" %
(e, xml_string))
return root
def _replace_nodes(self, parsed):
for key, value in parsed.items():
if list(value):
sub_dict = self._build_name_to_xml_node(value)
parsed[key] = self._replace_nodes(sub_dict)
else:
parsed[key] = value.text
return parsed
@_text_content
def _handle_boolean(self, shape, text):
if text == 'true':
return True
else:
return False
@_text_content
def _handle_float(self, shape, text):
return float(text)
@_text_content
def _handle_timestamp(self, shape, text):
return self._timestamp_parser(text)
@_text_content
def _handle_integer(self, shape, text):
return int(text)
@_text_content
def _handle_string(self, shape, text):
return text
@_text_content
def _handle_blob(self, shape, text):
return self._blob_parser(text)
_handle_character = _handle_string
_handle_double = _handle_float
_handle_long = _handle_integer
class QueryParser(BaseXMLResponseParser):
def _do_error_parse(self, response, shape):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
# Once we've converted xml->dict, we need to make one or two
# more adjustments to extract nested errors and to be consistent
# with ResponseMetadata for non-error responses:
# 1. {"Errors": {"Error": {...}}} -> {"Error": {...}}
# 2. {"RequestId": "id"} -> {"ResponseMetadata": {"RequestId": "id"}}
if 'Errors' in parsed:
parsed.update(parsed.pop('Errors'))
if 'RequestId' in parsed:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
return parsed
def _do_modeled_error_parse(self, response, shape):
return self._parse_body_as_xml(response, shape, inject_metadata=False)
def _do_parse(self, response, shape):
return self._parse_body_as_xml(response, shape, inject_metadata=True)
def _parse_body_as_xml(self, response, shape, inject_metadata=True):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = {}
if shape is not None:
start = root
if 'resultWrapper' in shape.serialization:
start = self._find_result_wrapped_shape(
shape.serialization['resultWrapper'],
root)
parsed = self._parse_shape(shape, start)
if inject_metadata:
self._inject_response_metadata(root, parsed)
return parsed
def _find_result_wrapped_shape(self, element_name, xml_root_node):
mapping = self._build_name_to_xml_node(xml_root_node)
return mapping[element_name]
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('ResponseMetadata')
if child_node is not None:
sub_mapping = self._build_name_to_xml_node(child_node)
for key, value in sub_mapping.items():
sub_mapping[key] = value.text
inject_into['ResponseMetadata'] = sub_mapping
class EC2QueryParser(QueryParser):
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('requestId')
if child_node is not None:
inject_into['ResponseMetadata'] = {'RequestId': child_node.text}
def _do_error_parse(self, response, shape):
# EC2 errors look like:
# <Response>
# <Errors>
# <Error>
# <Code>InvalidInstanceID.Malformed</Code>
# <Message>Invalid id: "1343124"</Message>
# </Error>
# </Errors>
# <RequestID>12345</RequestID>
# </Response>
# This is different from QueryParser in that it's RequestID,
# not RequestId
original = super(EC2QueryParser, self)._do_error_parse(response, shape)
if 'RequestID' in original:
original['ResponseMetadata'] = {
'RequestId': original.pop('RequestID')
}
return original
def _get_error_root(self, original_root):
for child in original_root:
if self._node_tag(child) == 'Errors':
for errors_child in child:
if self._node_tag(errors_child) == 'Error':
return errors_child
return original_root
class BaseJSONParser(ResponseParser):
def _handle_structure(self, shape, value):
member_shapes = shape.members
if value is None:
# If the comes across the wire as "null" (None in python),
# we should be returning this unchanged, instead of as an
# empty dict.
return None
final_parsed = {}
for member_name in member_shapes:
member_shape = member_shapes[member_name]
json_name = member_shape.serialization.get('name', member_name)
raw_value = value.get(json_name)
if raw_value is not None:
final_parsed[member_name] = self._parse_shape(
member_shapes[member_name],
raw_value)
return final_parsed
def _handle_map(self, shape, value):
parsed = {}
key_shape = shape.key
value_shape = shape.value
for key, value in value.items():
actual_key = self._parse_shape(key_shape, key)
actual_value = self._parse_shape(value_shape, value)
parsed[actual_key] = actual_value
return parsed
def _handle_blob(self, shape, value):
return self._blob_parser(value)
def _handle_timestamp(self, shape, value):
return self._timestamp_parser(value)
def _do_error_parse(self, response, shape):
body = self._parse_body_as_json(response['body'])
error = {"Error": {"Message": '', "Code": ''}, "ResponseMetadata": {}}
# Error responses can have slightly different structures for json.
# The basic structure is:
#
# {"__type":"ConnectClientException",
# "message":"The error message."}
# The error message can either come in the 'message' or 'Message' key
# so we need to check for both.
error['Error']['Message'] = body.get('message',
body.get('Message', ''))
# if the message did not contain an error code
# include the response status code
response_code = response.get('status_code')
code = body.get('__type', response_code and str(response_code))
if code is not None:
# code has a couple forms as well:
# * "com.aws.dynamodb.vAPI#ProvisionedThroughputExceededException"
# * "ResourceNotFoundException"
if '#' in code:
code = code.rsplit('#', 1)[1]
error['Error']['Code'] = code
self._inject_response_metadata(error, response['headers'])
return error
def _inject_response_metadata(self, parsed, headers):
if 'x-amzn-requestid' in headers:
parsed.setdefault('ResponseMetadata', {})['RequestId'] = (
headers['x-amzn-requestid'])
def _parse_body_as_json(self, body_contents):
if not body_contents:
return {}
body = body_contents.decode(self.DEFAULT_ENCODING)
try:
original_parsed = json.loads(body)
return original_parsed
except ValueError:
# if the body cannot be parsed, include
# the literal string as the message
return { 'message': body }
class BaseEventStreamParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
if shape.serialization.get('eventstream'):
event_type = response['headers'].get(':event-type')
event_shape = shape.members.get(event_type)
if event_shape:
final_parsed[event_type] = self._do_parse(response, event_shape)
else:
self._parse_non_payload_attrs(response, shape,
shape.members, final_parsed)
self._parse_payload(response, shape, shape.members, final_parsed)
return final_parsed
def _do_error_parse(self, response, shape):
exception_type = response['headers'].get(':exception-type')
exception_shape = shape.members.get(exception_type)
if exception_shape is not None:
original_parsed = self._initial_body_parse(response['body'])
body = self._parse_shape(exception_shape, original_parsed)
error = {
'Error': {
'Code': exception_type,
'Message': body.get('Message', body.get('message', ''))
}
}
else:
error = {
'Error': {
'Code': response['headers'].get(':error-code', ''),
'Message': response['headers'].get(':error-message', ''),
}
}
return error
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if shape.serialization.get('event'):
for name in member_shapes:
member_shape = member_shapes[name]
if member_shape.serialization.get('eventpayload'):
body = response['body']
if member_shape.type_name == 'blob':
parsed_body = body
elif member_shape.type_name == 'string':
parsed_body = body.decode(self.DEFAULT_ENCODING)
else:
raw_parse = self._initial_body_parse(body)
parsed_body = self._parse_shape(member_shape, raw_parse)
final_parsed[name] = parsed_body
return
# If we didn't find an explicit payload, use the current shape
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
if member_shape.serialization.get('eventheader'):
if name in headers:
value = headers[name]
if member_shape.type_name == 'timestamp':
# Event stream timestamps are an in milleseconds so we
# divide by 1000 to convert to seconds.
value = self._timestamp_parser(value / 1000.0)
final_parsed[name] = value
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
class EventStreamJSONParser(BaseEventStreamParser, BaseJSONParser):
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser):
def _initial_body_parse(self, xml_string):
if not xml_string:
return ETree.Element('')
return self._parse_xml_string_to_dom(xml_string)
class JSONParser(BaseJSONParser):
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
"""Response parser for the "json" protocol."""
def _do_parse(self, response, shape):
parsed = {}
if shape is not None:
event_name = shape.event_stream_name
if event_name:
parsed = self._handle_event_stream(response, shape, event_name)
else:
parsed = self._handle_json_body(response['body'], shape)
self._inject_response_metadata(parsed, response['headers'])
return parsed
def _do_modeled_error_parse(self, response, shape):
return self._handle_json_body(response['body'], shape)
def _handle_event_stream(self, response, shape, event_name):
event_stream_shape = shape.members[event_name]
event_stream = self._create_event_stream(response, event_stream_shape)
try:
event = event_stream.get_initial_response()
except NoInitialResponseError:
error_msg = 'First event was not of type initial-response'
raise ResponseParserError(error_msg)
parsed = self._handle_json_body(event.payload, shape)
parsed[event_name] = event_stream
return parsed
def _handle_json_body(self, raw_body, shape):
# The json.loads() gives us the primitive JSON types,
# but we need to traverse the parsed JSON data to convert
# to richer types (blobs, timestamps, etc.
parsed_json = self._parse_body_as_json(raw_body)
return self._parse_shape(shape, parsed_json)
class BaseRestParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
final_parsed['ResponseMetadata'] = self._populate_response_metadata(
response)
self._add_modeled_parse(response, shape, final_parsed)
return final_parsed
def _add_modeled_parse(self, response, shape, final_parsed):
if shape is None:
return final_parsed
member_shapes = shape.members
self._parse_non_payload_attrs(response, shape,
member_shapes, final_parsed)
self._parse_payload(response, shape, member_shapes, final_parsed)
def _do_modeled_error_parse(self, response, shape):
final_parsed = {}
self._add_modeled_parse(response, shape, final_parsed)
return final_parsed
def _populate_response_metadata(self, response):
metadata = {}
headers = response['headers']
if 'x-amzn-requestid' in headers:
metadata['RequestId'] = headers['x-amzn-requestid']
elif 'x-amz-request-id' in headers:
metadata['RequestId'] = headers['x-amz-request-id']
# HostId is what it's called whenever this value is returned
# in an XML response body, so to be consistent, we'll always
# call is HostId.
metadata['HostId'] = headers.get('x-amz-id-2', '')
return metadata
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if 'payload' in shape.serialization:
# If a payload is specified in the output shape, then only that
# shape is used for the body payload.
payload_member_name = shape.serialization['payload']
body_shape = member_shapes[payload_member_name]
if body_shape.serialization.get('eventstream'):
body = self._create_event_stream(response, body_shape)
final_parsed[payload_member_name] = body
elif body_shape.type_name in ['string', 'blob']:
# This is a stream
body = response['body']
if isinstance(body, bytes):
body = body.decode(self.DEFAULT_ENCODING)
final_parsed[payload_member_name] = body
else:
original_parsed = self._initial_body_parse(response['body'])
final_parsed[payload_member_name] = self._parse_shape(
body_shape, original_parsed)
else:
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
location = member_shape.serialization.get('location')
if location is None:
continue
elif location == 'statusCode':
final_parsed[name] = self._parse_shape(
member_shape, response['status_code'])
elif location == 'headers':
final_parsed[name] = self._parse_header_map(member_shape,
headers)
elif location == 'header':
header_name = member_shape.serialization.get('name', name)
if header_name in headers:
final_parsed[name] = self._parse_shape(
member_shape, headers[header_name])
def _parse_header_map(self, shape, headers):
# Note that headers are case insensitive, so we .lower()
# all header names and header prefixes.
parsed = {}
prefix = shape.serialization.get('name', '').lower()
for header_name in headers:
if header_name.lower().startswith(prefix):
# The key name inserted into the parsed hash
# strips off the prefix.
name = header_name[len(prefix):]
parsed[name] = headers[header_name]
return parsed
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
def _handle_string(self, shape, value):
parsed = value
if is_json_value_header(shape):
decoded = base64.b64decode(value).decode(self.DEFAULT_ENCODING)
parsed = json.loads(decoded)
return parsed
class RestJSONParser(BaseRestParser, BaseJSONParser):
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
def _do_error_parse(self, response, shape):
error = super(RestJSONParser, self)._do_error_parse(response, shape)
self._inject_error_code(error, response)
return error
def _inject_error_code(self, error, response):
# The "Code" value can come from either a response
# header or a value in the JSON body.
body = self._initial_body_parse(response['body'])
if 'x-amzn-errortype' in response['headers']:
code = response['headers']['x-amzn-errortype']
# Could be:
# x-amzn-errortype: ValidationException:
code = code.split(':')[0]
error['Error']['Code'] = code
elif 'code' in body or 'Code' in body:
error['Error']['Code'] = body.get(
'code', body.get('Code', ''))
class RestXMLParser(BaseRestParser, BaseXMLResponseParser):
EVENT_STREAM_PARSER_CLS = EventStreamXMLParser
def _initial_body_parse(self, xml_string):
if not xml_string:
return ETree.Element('')
return self._parse_xml_string_to_dom(xml_string)
def _do_error_parse(self, response, shape):
# We're trying to be service agnostic here, but S3 does have a slightly
# different response structure for its errors compared to other
# rest-xml serivces (route53/cloudfront). We handle this by just
# trying to parse both forms.
# First:
# <ErrorResponse xmlns="...">
# <Error>
# <Type>Sender</Type>
# <Code>InvalidInput</Code>
# <Message>Invalid resource type: foo</Message>
# </Error>
# <RequestId>request-id</RequestId>
# </ErrorResponse>
if response['body']:
# If the body ends up being invalid xml, the xml parser should not
# blow up. It should at least try to pull information about the
# the error response from other sources like the HTTP status code.
try:
return self._parse_error_from_body(response)
except ResponseParserError as e:
LOG.debug(
'Exception caught when parsing error response body:',
exc_info=True)
return self._parse_error_from_http_status(response)
def _parse_error_from_http_status(self, response):
return {
'Error': {
'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], ''),
},
'ResponseMetadata': {
'RequestId': response['headers'].get('x-amz-request-id', ''),
'HostId': response['headers'].get('x-amz-id-2', ''),
}
}
def _parse_error_from_body(self, response):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
if root.tag == 'Error':
# This is an S3 error response. First we'll populate the
# response metadata.
metadata = self._populate_response_metadata(response)
# The RequestId and the HostId are already in the
# ResponseMetadata, but are also duplicated in the XML
# body. We don't need these values in both places,
# we'll just remove them from the parsed XML body.
parsed.pop('RequestId', '')
parsed.pop('HostId', '')
return {'Error': parsed, 'ResponseMetadata': metadata}
elif 'RequestId' in parsed:
# Other rest-xml serivces:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
default = {'Error': {'Message': '', 'Code': ''}}
merge_dicts(default, parsed)
return default
@_text_content
def _handle_string(self, shape, text):
text = super(RestXMLParser, self)._handle_string(shape, text)
return text
PROTOCOL_PARSERS = {
'ec2': EC2QueryParser,
'query': QueryParser,
'json': JSONParser,
'rest-json': RestJSONParser,
'rest-xml': RestXMLParser,
}
| 42,301 | Python | 40.431929 | 80 | 0.574431 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/throttling.py | from collections import namedtuple
CubicParams = namedtuple('CubicParams', ['w_max', 'k', 'last_fail'])
class CubicCalculator(object):
_SCALE_CONSTANT = 0.4
_BETA = 0.7
def __init__(self, starting_max_rate,
start_time,
scale_constant=_SCALE_CONSTANT, beta=_BETA):
self._w_max = starting_max_rate
self._scale_constant = scale_constant
self._beta = beta
self._k = self._calculate_zero_point()
self._last_fail = start_time
def _calculate_zero_point(self):
k = ((self._w_max * (1 - self._beta)) / self._scale_constant) ** (1 / 3.0)
return k
def success_received(self, timestamp):
dt = timestamp - self._last_fail
new_rate = (
self._scale_constant * (dt - self._k) ** 3 + self._w_max
)
return new_rate
def error_received(self, current_rate, timestamp):
# Consider not having this be the current measured rate.
# We have a new max rate, which is the current rate we were sending
# at when we received an error response.
self._w_max = current_rate
self._k = self._calculate_zero_point()
self._last_fail = timestamp
return current_rate * self._beta
def get_params_snapshot(self):
"""Return a read-only object of the current cubic parameters.
These parameters are intended to be used for debug/troubleshooting
purposes. These object is a read-only snapshot and cannot be used
to modify the behavior of the CUBIC calculations.
New parameters may be added to this object in the future.
"""
return CubicParams(
w_max=self._w_max,
k=self._k,
last_fail=self._last_fail
)
| 1,788 | Python | 31.527272 | 82 | 0.595638 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/quota.py | """Retry quota implementation.
"""
import threading
class RetryQuota(object):
INITIAL_CAPACITY = 500
def __init__(self, initial_capacity=INITIAL_CAPACITY, lock=None):
self._max_capacity = initial_capacity
self._available_capacity = initial_capacity
if lock is None:
lock = threading.Lock()
self._lock = lock
def acquire(self, capacity_amount):
"""Attempt to aquire a certain amount of capacity.
If there's not sufficient amount of capacity available, ``False``
is returned. Otherwise, ``True`` is returned, which indicates that
capacity was successfully allocated.
"""
# The acquire() is only called when we encounter a retryable
# response so we aren't worried about locking the entire method.
with self._lock:
if capacity_amount > self._available_capacity:
return False
self._available_capacity -= capacity_amount
return True
def release(self, capacity_amount):
"""Release capacity back to the retry quota.
The capacity being released will be truncated if necessary
to ensure the max capacity is never exceeded.
"""
# Implementation note: The release() method is called as part
# of the "after-call" event, which means it gets invoked for
# every API call. In the common case where the request is
# successful and we're at full capacity, we can avoid locking.
# We can't exceed max capacity so there's no work we have to do.
if self._max_capacity == self._available_capacity:
return
with self._lock:
amount = min(
self._max_capacity - self._available_capacity,
capacity_amount
)
self._available_capacity += amount
@property
def available_capacity(self):
return self._available_capacity
| 1,963 | Python | 32.862068 | 75 | 0.621498 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/base.py | class BaseRetryBackoff(object):
def delay_amount(self, context):
"""Calculate how long we should delay before retrying.
:type context: RetryContext
"""
raise NotImplementedError("delay_amount")
class BaseRetryableChecker(object):
"""Base class for determining if a retry should happen.
This base class checks for specific retryable conditions.
A single retryable checker doesn't necessarily indicate a retry
will happen. It's up to the ``RetryPolicy`` to use its
``BaseRetryableCheckers`` to make the final decision on whether a retry
should happen.
"""
def is_retryable(self, context):
"""Returns True if retryable, False if not.
:type context: RetryContext
"""
raise NotImplementedError("is_retryable") | 813 | Python | 29.148147 | 75 | 0.681427 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/adaptive.py | import math
import logging
import threading
from botocore.retries import bucket
from botocore.retries import throttling
from botocore.retries import standard
logger = logging.getLogger(__name__)
def register_retry_handler(client):
clock = bucket.Clock()
rate_adjustor = throttling.CubicCalculator(starting_max_rate=0,
start_time=clock.current_time())
token_bucket = bucket.TokenBucket(max_rate=1, clock=clock)
rate_clocker = RateClocker(clock)
throttling_detector = standard.ThrottlingErrorDetector(
retry_event_adapter=standard.RetryEventAdapter(),
)
limiter = ClientRateLimiter(
rate_adjustor=rate_adjustor,
rate_clocker=rate_clocker,
token_bucket=token_bucket,
throttling_detector=throttling_detector,
clock=clock,
)
client.meta.events.register(
'before-send', limiter.on_sending_request,
)
client.meta.events.register(
'needs-retry', limiter.on_receiving_response,
)
return limiter
class ClientRateLimiter(object):
_MAX_RATE_ADJUST_SCALE = 2.0
def __init__(self, rate_adjustor, rate_clocker, token_bucket,
throttling_detector, clock):
self._rate_adjustor = rate_adjustor
self._rate_clocker = rate_clocker
self._token_bucket = token_bucket
self._throttling_detector = throttling_detector
self._clock = clock
self._enabled = False
self._lock = threading.Lock()
def on_sending_request(self, request, **kwargs):
if self._enabled:
self._token_bucket.acquire()
# Hooked up to needs-retry.
def on_receiving_response(self, **kwargs):
measured_rate = self._rate_clocker.record()
timestamp = self._clock.current_time()
with self._lock:
if not self._throttling_detector.is_throttling_error(**kwargs):
throttling = False
new_rate = self._rate_adjustor.success_received(timestamp)
else:
throttling = True
if not self._enabled:
rate_to_use = measured_rate
else:
rate_to_use = min(measured_rate, self._token_bucket.max_rate)
new_rate = self._rate_adjustor.error_received(
rate_to_use, timestamp)
logger.debug("Throttling response received, new send rate: %s "
"measured rate: %s, token bucket capacity "
"available: %s", new_rate, measured_rate,
self._token_bucket.available_capacity)
self._enabled = True
self._token_bucket.max_rate = min(
new_rate, self._MAX_RATE_ADJUST_SCALE * measured_rate)
class RateClocker(object):
"""Tracks the rate at which a client is sending a request."""
_DEFAULT_SMOOTHING = 0.8
# Update the rate every _TIME_BUCKET_RANGE seconds.
_TIME_BUCKET_RANGE = 0.5
def __init__(self, clock, smoothing=_DEFAULT_SMOOTHING,
time_bucket_range=_TIME_BUCKET_RANGE):
self._clock = clock
self._measured_rate = 0
self._smoothing = smoothing
self._last_bucket = math.floor(self._clock.current_time())
self._time_bucket_scale = 1 / self._TIME_BUCKET_RANGE
self._count = 0
self._lock = threading.Lock()
def record(self, amount=1):
with self._lock:
t = self._clock.current_time()
bucket = math.floor(
t * self._time_bucket_scale) / self._time_bucket_scale
self._count += amount
if bucket > self._last_bucket:
current_rate = self._count / float(
bucket - self._last_bucket)
self._measured_rate = (
(current_rate * self._smoothing) +
(self._measured_rate * (1 - self._smoothing))
)
self._count = 0
self._last_bucket = bucket
return self._measured_rate
@property
def measured_rate(self):
return self._measured_rate
| 4,191 | Python | 34.525423 | 81 | 0.580768 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/__init__.py | """New retry v2 handlers.
This package obsoletes the botocore/retryhandler.py module and contains
new retry logic.
"""
| 121 | Python | 16.428569 | 71 | 0.768595 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/bucket.py | """This module implements token buckets used for client side throttling."""
import time
import threading
from botocore.exceptions import CapacityNotAvailableError
class Clock(object):
def __init__(self):
pass
def sleep(self, amount):
time.sleep(amount)
def current_time(self):
return time.time()
class TokenBucket(object):
_MIN_RATE = 0.5
def __init__(self, max_rate, clock, min_rate=_MIN_RATE):
self._fill_rate = None
self._max_capacity = None
self._current_capacity = 0
self._clock = clock
self._last_timestamp = None
self._min_rate = min_rate
self._lock = threading.Lock()
self._new_fill_rate_condition = threading.Condition(self._lock)
self.max_rate = max_rate
@property
def max_rate(self):
return self._fill_rate
@max_rate.setter
def max_rate(self, value):
with self._new_fill_rate_condition:
# Before we can change the rate we need to fill any pending
# tokens we might have based on the current rate. If we don't
# do this it means everything since the last recorded timestamp
# will accumulate at the rate we're about to set which isn't
# correct.
self._refill()
self._fill_rate = max(value, self._min_rate)
if value >= 1:
self._max_capacity = value
else:
self._max_capacity = 1
# If we're scaling down, we also can't have a capacity that's
# more than our max_capacity.
self._current_capacity = min(self._current_capacity,
self._max_capacity)
self._new_fill_rate_condition.notify()
@property
def max_capacity(self):
return self._max_capacity
@property
def available_capacity(self):
return self._current_capacity
def acquire(self, amount=1, block=True):
"""Acquire token or return amount of time until next token available.
If block is True, then this method will block until there's sufficient
capacity to acquire the desired amount.
If block is False, then this method will return True is capacity
was successfully acquired, False otherwise.
"""
with self._new_fill_rate_condition:
return self._acquire(amount=amount, block=block)
def _acquire(self, amount, block):
self._refill()
if amount <= self._current_capacity:
self._current_capacity -= amount
return True
else:
if not block:
raise CapacityNotAvailableError()
# Not enough capacity.
sleep_amount = self._sleep_amount(amount)
while sleep_amount > 0:
# Until python3.2, wait() always returned None so we can't
# tell if a timeout occurred waiting on the cond var.
# Because of this we'll unconditionally call _refill().
# The downside to this is that we were waken up via
# a notify(), we're calling unnecessarily calling _refill() an
# extra time.
self._new_fill_rate_condition.wait(sleep_amount)
self._refill()
sleep_amount = self._sleep_amount(amount)
self._current_capacity -= amount
return True
def _sleep_amount(self, amount):
return (amount - self._current_capacity) / self._fill_rate
def _refill(self):
timestamp = self._clock.current_time()
if self._last_timestamp is None:
self._last_timestamp = timestamp
return
current_capacity = self._current_capacity
fill_amount = (timestamp - self._last_timestamp) * self._fill_rate
new_capacity = min(self._max_capacity, current_capacity + fill_amount)
self._current_capacity = new_capacity
self._last_timestamp = timestamp
| 4,020 | Python | 33.965217 | 78 | 0.590299 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/standard.py | """Standard retry behavior.
This contains the default standard retry behavior.
It provides consistent behavior with other AWS SDKs.
The key base classes uses for retries:
* ``BaseRetryableChecker`` - Use to check a specific condition that
indicates a retry should happen. This can include things like
max attempts, HTTP status code checks, error code checks etc.
* ``RetryBackoff`` - Use to determine how long we should backoff until
we retry a request. This is the class that will implement delay such
as exponential backoff.
* ``RetryPolicy`` - Main class that determines if a retry should
happen. It can combine data from a various BaseRetryableCheckers
to make a final call as to whether or not a retry should happen.
It then uses a ``BaseRetryBackoff`` to determine how long to delay.
* ``RetryHandler`` - The bridge between botocore's event system
used by endpoint.py to manage retries and the interfaces defined
in this module.
This allows us to define an API that has minimal coupling to the event
based API used by botocore.
"""
import random
import logging
from botocore.exceptions import ConnectionError, HTTPClientError
from botocore.exceptions import ReadTimeoutError, ConnectTimeoutError
from botocore.retries import quota
from botocore.retries import special
from botocore.retries.base import BaseRetryBackoff, BaseRetryableChecker
DEFAULT_MAX_ATTEMPTS = 3
logger = logging.getLogger(__name__)
def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS):
retry_quota = RetryQuotaChecker(quota.RetryQuota())
service_id = client.meta.service_model.service_id
service_event_name = service_id.hyphenize()
client.meta.events.register('after-call.%s' % service_event_name,
retry_quota.release_retry_quota)
handler = RetryHandler(
retry_policy=RetryPolicy(
retry_checker=StandardRetryConditions(max_attempts=max_attempts),
retry_backoff=ExponentialBackoff(),
),
retry_event_adapter=RetryEventAdapter(),
retry_quota=retry_quota,
)
unique_id = 'retry-config-%s' % service_event_name
client.meta.events.register(
'needs-retry.%s' % service_event_name, handler.needs_retry,
unique_id=unique_id
)
return handler
class RetryHandler(object):
"""Bridge between botocore's event system and this module.
This class is intended to be hooked to botocore's event system
as an event handler.
"""
def __init__(self, retry_policy, retry_event_adapter, retry_quota):
self._retry_policy = retry_policy
self._retry_event_adapter = retry_event_adapter
self._retry_quota = retry_quota
def needs_retry(self, **kwargs):
"""Connect as a handler to the needs-retry event."""
retry_delay = None
context = self._retry_event_adapter.create_retry_context(**kwargs)
if self._retry_policy.should_retry(context):
# Before we can retry we need to ensure we have sufficient
# capacity in our retry quota.
if self._retry_quota.acquire_retry_quota(context):
retry_delay = self._retry_policy.compute_retry_delay(context)
logger.debug("Retry needed, retrying request after "
"delay of: %s", retry_delay)
else:
logger.debug("Retry needed but retry quota reached, "
"not retrying request.")
else:
logger.debug("Not retrying request.")
self._retry_event_adapter.adapt_retry_response_from_context(
context)
return retry_delay
class RetryEventAdapter(object):
"""Adapter to existing retry interface used in the endpoints layer.
This existing interface for determining if a retry needs to happen
is event based and used in ``botocore.endpoint``. The interface has
grown organically over the years and could use some cleanup. This
adapter converts that interface into the interface used by the
new retry strategies.
"""
def create_retry_context(self, **kwargs):
"""Create context based on needs-retry kwargs."""
response = kwargs['response']
if response is None:
# If response is None it means that an exception was raised
# because we never received a response from the service. This
# could be something like a ConnectionError we get from our
# http layer.
http_response = None
parsed_response = None
else:
http_response, parsed_response = response
# This provides isolation between the kwargs emitted in the
# needs-retry event, and what this module uses to check for
# retries.
context = RetryContext(
attempt_number=kwargs['attempts'],
operation_model=kwargs['operation'],
http_response=http_response,
parsed_response=parsed_response,
caught_exception=kwargs['caught_exception'],
request_context=kwargs['request_dict']['context'],
)
return context
def adapt_retry_response_from_context(self, context):
"""Modify response back to user back from context."""
# This will mutate attributes that are returned back to the end
# user. We do it this way so that all the various retry classes
# don't mutate any input parameters from the needs-retry event.
metadata = context.get_retry_metadata()
if context.parsed_response is not None:
context.parsed_response.setdefault(
'ResponseMetadata', {}).update(metadata)
# Implementation note: this is meant to encapsulate all the misc stuff
# that gets sent in the needs-retry event. This is mapped so that params
# are more clear and explicit.
class RetryContext(object):
"""Normalize a response that we use to check if a retry should occur.
This class smoothes over the different types of responses we may get
from a service including:
* A modeled error response from the service that contains a service
code and error message.
* A raw HTTP response that doesn't contain service protocol specific
error keys.
* An exception received while attempting to retrieve a response.
This could be a ConnectionError we receive from our HTTP layer which
could represent that we weren't able to receive a response from
the service.
This class guarantees that at least one of the above attributes will be
non None.
This class is meant to provide a read-only view into the properties
associated with a possible retryable response. None of the properties
are meant to be modified directly.
"""
def __init__(self, attempt_number, operation_model=None,
parsed_response=None, http_response=None,
caught_exception=None, request_context=None):
# 1-based attempt number.
self.attempt_number = attempt_number
self.operation_model = operation_model
# This is the parsed response dictionary we get from parsing
# the HTTP response from the service.
self.parsed_response = parsed_response
# This is an instance of botocore.awsrequest.AWSResponse.
self.http_response = http_response
# This is a subclass of Exception that will be non None if
# an exception was raised when retrying to retrieve a response.
self.caught_exception = caught_exception
# This is the request context dictionary that's added to the
# request dict. This is used to story any additional state
# about the request. We use this for storing retry quota
# capacity.
if request_context is None:
request_context = {}
self.request_context = request_context
self._retry_metadata = {}
# These are misc helper methods to avoid duplication in the various
# checkers.
def get_error_code(self):
"""Check if there was a parsed response with an error code.
If we could not find any error codes, ``None`` is returned.
"""
if self.parsed_response is None:
return
error = self.parsed_response.get('Error', {})
if not isinstance(error, dict):
return
return error.get('Code')
def add_retry_metadata(self, **kwargs):
"""Add key/value pairs to the retry metadata.
This allows any objects during the retry process to add
metadata about any checks/validations that happened.
This gets added to the response metadata in the retry handler.
"""
self._retry_metadata.update(**kwargs)
def get_retry_metadata(self):
return self._retry_metadata.copy()
class RetryPolicy(object):
def __init__(self, retry_checker, retry_backoff):
self._retry_checker = retry_checker
self._retry_backoff = retry_backoff
def should_retry(self, context):
return self._retry_checker.is_retryable(context)
def compute_retry_delay(self, context):
return self._retry_backoff.delay_amount(context)
class ExponentialBackoff(BaseRetryBackoff):
_BASE = 2
_MAX_BACKOFF = 20
def __init__(self, max_backoff=20, random=random.random):
self._base = self._BASE
self._max_backoff = max_backoff
self._random = random
def delay_amount(self, context):
"""Calculates delay based on exponential backoff.
This class implements truncated binary exponential backoff
with jitter::
t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
where ``i`` is the request attempt (0 based).
"""
# The context.attempt_number is a 1-based value, but we have
# to calculate the delay based on i based a 0-based value. We
# want the first delay to just be ``rand(0, 1)``.
return min(
self._random() * (self._base ** (context.attempt_number - 1)),
self._max_backoff
)
class MaxAttemptsChecker(BaseRetryableChecker):
def __init__(self, max_attempts):
self._max_attempts = max_attempts
def is_retryable(self, context):
under_max_attempts = context.attempt_number < self._max_attempts
if not under_max_attempts:
logger.debug("Max attempts of %s reached.", self._max_attempts)
context.add_retry_metadata(MaxAttemptsReached=True)
return under_max_attempts
class TransientRetryableChecker(BaseRetryableChecker):
_TRANSIENT_ERROR_CODES = [
'RequestTimeout',
'RequestTimeoutException',
'PriorRequestNotComplete',
]
_TRANSIENT_STATUS_CODES = [500, 502, 503, 504]
_TRANSIENT_EXCEPTION_CLS = (
ConnectionError,
HTTPClientError,
)
def __init__(self, transient_error_codes=None,
transient_status_codes=None,
transient_exception_cls=None):
if transient_error_codes is None:
transient_error_codes = self._TRANSIENT_ERROR_CODES[:]
if transient_status_codes is None:
transient_status_codes = self._TRANSIENT_STATUS_CODES[:]
if transient_exception_cls is None:
transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS
self._transient_error_codes = transient_error_codes
self._transient_status_codes = transient_status_codes
self._transient_exception_cls = transient_exception_cls
def is_retryable(self, context):
if context.get_error_code() in self._transient_error_codes:
return True
if context.http_response is not None:
if context.http_response.status_code in \
self._transient_status_codes:
return True
if context.caught_exception is not None:
return isinstance(context.caught_exception,
self._transient_exception_cls)
return False
class ThrottledRetryableChecker(BaseRetryableChecker):
# This is the union of all error codes we've seen that represent
# a throttled error.
_THROTTLED_ERROR_CODES = [
'Throttling',
'ThrottlingException',
'ThrottledException',
'RequestThrottledException',
'TooManyRequestsException',
'ProvisionedThroughputExceededException',
'TransactionInProgressException',
'RequestLimitExceeded',
'BandwidthLimitExceeded',
'LimitExceededException',
'RequestThrottled',
'SlowDown',
'PriorRequestNotComplete',
'EC2ThrottledException',
]
def __init__(self, throttled_error_codes=None):
if throttled_error_codes is None:
throttled_error_codes = self._THROTTLED_ERROR_CODES[:]
self._throttled_error_codes = throttled_error_codes
def is_retryable(self, context):
# Only the error code from a parsed service response is used
# to determine if the response is a throttled response.
return context.get_error_code() in self._throttled_error_codes
class ModeledRetryableChecker(BaseRetryableChecker):
"""Check if an error has been modeled as retryable."""
def __init__(self):
self._error_detector = ModeledRetryErrorDetector()
def is_retryable(self, context):
error_code = context.get_error_code()
if error_code is None:
return False
return self._error_detector.detect_error_type(context) is not None
class ModeledRetryErrorDetector(object):
"""Checks whether or not an error is a modeled retryable error."""
# There are return values from the detect_error_type() method.
TRANSIENT_ERROR = 'TRANSIENT_ERROR'
THROTTLING_ERROR = 'THROTTLING_ERROR'
# This class is lower level than ModeledRetryableChecker, which
# implements BaseRetryableChecker. This object allows you to distinguish
# between the various types of retryable errors.
def detect_error_type(self, context):
"""Detect the error type associated with an error code and model.
This will either return:
* ``self.TRANSIENT_ERROR`` - If the error is a transient error
* ``self.THROTTLING_ERROR`` - If the error is a throttling error
* ``None`` - If the error is neither type of error.
"""
error_code = context.get_error_code()
op_model = context.operation_model
if op_model is None or not op_model.error_shapes:
return
for shape in op_model.error_shapes:
if shape.metadata.get('retryable') is not None:
# Check if this error code matches the shape. This can
# be either by name or by a modeled error code.
error_code_to_check = (
shape.metadata.get('error', {}).get('code')
or shape.name
)
if error_code == error_code_to_check:
if shape.metadata['retryable'].get('throttling'):
return self.THROTTLING_ERROR
return self.TRANSIENT_ERROR
class ThrottlingErrorDetector(object):
def __init__(self, retry_event_adapter):
self._modeled_error_detector = ModeledRetryErrorDetector()
self._fixed_error_code_detector = ThrottledRetryableChecker()
self._retry_event_adapter = retry_event_adapter
# This expects the kwargs from needs-retry to be passed through.
def is_throttling_error(self, **kwargs):
context = self._retry_event_adapter.create_retry_context(**kwargs)
if self._fixed_error_code_detector.is_retryable(context):
return True
error_type = self._modeled_error_detector.detect_error_type(context)
return error_type == self._modeled_error_detector.THROTTLING_ERROR
class StandardRetryConditions(BaseRetryableChecker):
"""Concrete class that implements the standard retry policy checks.
Specifically:
not max_attempts and (transient or throttled or modeled_retry)
"""
def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS):
# Note: This class is for convenience so you can have the
# standard retry condition in a single class.
self._max_attempts_checker = MaxAttemptsChecker(max_attempts)
self._additional_checkers = OrRetryChecker([
TransientRetryableChecker(),
ThrottledRetryableChecker(),
ModeledRetryableChecker(),
OrRetryChecker([
special.RetryIDPCommunicationError(),
special.RetryDDBChecksumError(),
])
])
def is_retryable(self, context):
return (self._max_attempts_checker.is_retryable(context) and
self._additional_checkers.is_retryable(context))
class OrRetryChecker(BaseRetryableChecker):
def __init__(self, checkers):
self._checkers = checkers
def is_retryable(self, context):
return any(checker.is_retryable(context) for checker in self._checkers)
class RetryQuotaChecker(object):
_RETRY_COST = 5
_NO_RETRY_INCREMENT = 1
_TIMEOUT_RETRY_REQUEST = 10
_TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError)
# Implementation note: We're not making this a BaseRetryableChecker
# because this isn't just a check if we can retry. This also changes
# state so we have to careful when/how we call this. Making it
# a BaseRetryableChecker implies you can call .is_retryable(context)
# as many times as you want and not affect anything.
def __init__(self, quota):
self._quota = quota
# This tracks the last amount
self._last_amount_acquired = None
def acquire_retry_quota(self, context):
if self._is_timeout_error(context):
capacity_amount = self._TIMEOUT_RETRY_REQUEST
else:
capacity_amount = self._RETRY_COST
success = self._quota.acquire(capacity_amount)
if success:
# We add the capacity amount to the request context so we know
# how much to release later. The capacity amount can vary based
# on the error.
context.request_context['retry_quota_capacity'] = capacity_amount
return True
context.add_retry_metadata(RetryQuotaReached=True)
return False
def _is_timeout_error(self, context):
return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS)
# This is intended to be hooked up to ``after-call``.
def release_retry_quota(self, context, http_response, **kwargs):
# There's three possible options.
# 1. The HTTP response did not have a 2xx response. In that case we
# give no quota back.
# 2. The HTTP request was successful and was never retried. In
# that case we give _NO_RETRY_INCREMENT back.
# 3. The API call had retries, and we eventually receive an HTTP
# response with a 2xx status code. In that case we give back
# whatever quota was associated with the last acquisition.
if http_response is None:
return
status_code = http_response.status_code
if 200 <= status_code < 300:
if 'retry_quota_capacity' not in context:
self._quota.release(self._NO_RETRY_INCREMENT)
else:
capacity_amount = context['retry_quota_capacity']
self._quota.release(capacity_amount)
| 19,655 | Python | 38.390781 | 79 | 0.651793 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retries/special.py | """Special cased retries.
These are additional retry cases we still have to handle from the legacy
retry handler. They don't make sense as part of the standard mode retry
module. Ideally we should be able to remove this module.
"""
import logging
from binascii import crc32
from botocore.retries.base import BaseRetryableChecker
logger = logging.getLogger(__name__)
# TODO: This is an ideal candidate for the retryable trait once that's
# available.
class RetryIDPCommunicationError(BaseRetryableChecker):
_SERVICE_NAME = 'sts'
def is_retryable(self, context):
service_name = context.operation_model.service_model.service_name
if service_name != self._SERVICE_NAME:
return False
error_code = context.get_error_code()
return error_code == 'IDPCommunicationError'
class RetryDDBChecksumError(BaseRetryableChecker):
_CHECKSUM_HEADER = 'x-amz-crc32'
_SERVICE_NAME = 'dynamodb'
def is_retryable(self, context):
service_name = context.operation_model.service_model.service_name
if service_name != self._SERVICE_NAME:
return False
if context.http_response is None:
return False
checksum = context.http_response.headers.get(self._CHECKSUM_HEADER)
if checksum is None:
return False
actual_crc32 = crc32(context.http_response.content) & 0xffffffff
if actual_crc32 != int(checksum):
logger.debug("DynamoDB crc32 checksum does not match, "
"expected: %s, actual: %s", checksum, actual_crc32)
return True
| 1,611 | Python | 31.897959 | 76 | 0.680323 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/sharedexample.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import numbers
from botocore.utils import parse_timestamp
from botocore.docs.utils import escape_controls
from botocore.compat import six
class SharedExampleDocumenter(object):
def document_shared_example(self, example, prefix, section,
operation_model):
"""Documents a single shared example based on its definition.
:param example: The model of the example
:param prefix: The prefix to use in the method example.
:param section: The section to write to.
:param operation_model: The model of the operation used in the example
"""
section.style.new_paragraph()
section.write(example.get('description'))
section.style.new_line()
self.document_input(section, example, prefix,
operation_model.input_shape)
self.document_output(section, example, operation_model.output_shape)
def document_input(self, section, example, prefix, shape):
input_section = section.add_new_section('input')
input_section.style.start_codeblock()
if prefix is not None:
input_section.write(prefix)
params = example.get('input', {})
comments = example.get('comments')
if comments:
comments = comments.get('input')
param_section = input_section.add_new_section('parameters')
self._document_params(param_section, params, comments, [], shape)
closing_section = input_section.add_new_section('input-close')
closing_section.style.new_line()
closing_section.style.new_line()
closing_section.write('print(response)')
closing_section.style.end_codeblock()
def document_output(self, section, example, shape):
output_section = section.add_new_section('output')
output_section.style.new_line()
output_section.write('Expected Output:')
output_section.style.new_line()
output_section.style.start_codeblock()
params = example.get('output', {})
# There might not be an output, but we will return metadata anyway
params['ResponseMetadata'] = {"...": "..."}
comments = example.get('comments')
if comments:
comments = comments.get('output')
self._document_dict(output_section, params, comments, [], shape, True)
closing_section = output_section.add_new_section('output-close')
closing_section.style.end_codeblock()
def _document(self, section, value, comments, path, shape):
"""
:param section: The section to add the docs to.
:param value: The input / output values representing the parameters that
are included in the example.
:param comments: The dictionary containing all the comments to be
applied to the example.
:param path: A list describing where the documenter is in traversing the
parameters. This is used to find the equivalent location
in the comments dictionary.
"""
if isinstance(value, dict):
self._document_dict(section, value, comments, path, shape)
elif isinstance(value, list):
self._document_list(section, value, comments, path, shape)
elif isinstance(value, numbers.Number):
self._document_number(section, value, path)
elif shape and shape.type_name == 'timestamp':
self._document_datetime(section, value, path)
else:
self._document_str(section, value, path)
def _document_dict(self, section, value, comments, path, shape,
top_level=False):
dict_section = section.add_new_section('dict-value')
self._start_nested_value(dict_section, '{')
for key, val in value.items():
path.append('.%s' % key)
item_section = dict_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write("'%s': " % key)
# Shape could be none if there is no output besides ResponseMetadata
item_shape = None
if shape:
if shape.type_name == 'structure':
item_shape = shape.members.get(key)
elif shape.type_name == 'map':
item_shape = shape.value
self._document(item_section, val, comments, path, item_shape)
path.pop()
dict_section_end = dict_section.add_new_section('ending-brace')
self._end_nested_value(dict_section_end, '}')
if not top_level:
dict_section_end.write(',')
def _document_params(self, section, value, comments, path, shape):
param_section = section.add_new_section('param-values')
self._start_nested_value(param_section, '(')
for key, val in value.items():
path.append('.%s' % key)
item_section = param_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write(key + '=')
# Shape could be none if there are no input parameters
item_shape = None
if shape:
item_shape = shape.members.get(key)
self._document(item_section, val, comments, path, item_shape)
path.pop()
param_section_end = param_section.add_new_section('ending-parenthesis')
self._end_nested_value(param_section_end, ')')
def _document_list(self, section, value, comments, path, shape):
list_section = section.add_new_section('list-section')
self._start_nested_value(list_section, '[')
item_shape = shape.member
for index, val in enumerate(value):
item_section = list_section.add_new_section(index)
item_section.style.new_line()
path.append('[%s]' % index)
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
self._document(item_section, val, comments, path, item_shape)
path.pop()
list_section_end = list_section.add_new_section('ending-bracket')
self._end_nested_value(list_section_end, '],')
def _document_str(self, section, value, path):
# We do the string conversion because this might accept a type that
# we don't specifically address.
safe_value = escape_controls(value)
section.write(u"'%s'," % six.text_type(safe_value))
def _document_number(self, section, value, path):
section.write("%s," % str(value))
def _document_datetime(self, section, value, path):
datetime_tuple = parse_timestamp(value).timetuple()
datetime_str = str(datetime_tuple[0])
for i in range(1, len(datetime_tuple)):
datetime_str += ", " + str(datetime_tuple[i])
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub(r'^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:
return ''
def _start_nested_value(self, section, start):
section.write(start)
section.style.indent()
section.style.indent()
def _end_nested_value(self, section, end):
section.style.dedent()
section.style.dedent()
section.style.new_line()
section.write(end)
def document_shared_examples(section, operation_model, example_prefix,
shared_examples):
"""Documents the shared examples
:param section: The section to write to.
:param operation_model: The model of the operation.
:param example_prefix: The prefix to use in the method example.
:param shared_examples: The shared JSON examples from the model.
"""
container_section = section.add_new_section('shared-examples')
container_section.style.new_paragraph()
container_section.style.bold('Examples')
documenter = SharedExampleDocumenter()
for example in shared_examples:
documenter.document_shared_example(
example=example,
section=container_section.add_new_section(example['id']),
prefix=example_prefix,
operation_model=operation_model
)
| 9,326 | Python | 40.638393 | 80 | 0.615698 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/method.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
from botocore.docs.params import RequestParamsDocumenter
from botocore.docs.params import ResponseParamsDocumenter
from botocore.docs.example import ResponseExampleDocumenter
from botocore.docs.example import RequestExampleDocumenter
AWS_DOC_BASE = 'https://docs.aws.amazon.com/goto/WebAPI'
def get_instance_public_methods(instance):
"""Retrieves an objects public methods
:param instance: The instance of the class to inspect
:rtype: dict
:returns: A dictionary that represents an instance's methods where
the keys are the name of the methods and the
values are the handler to the method.
"""
instance_members = inspect.getmembers(instance)
instance_methods = {}
for name, member in instance_members:
if not name.startswith('_'):
if inspect.ismethod(member):
instance_methods[name] = member
return instance_methods
def document_model_driven_signature(section, name, operation_model,
include=None, exclude=None):
"""Documents the signature of a model-driven method
:param section: The section to write the documentation to.
:param name: The name of the method
:param operation_model: The operation model for the method
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
params = {}
if operation_model.input_shape:
params = operation_model.input_shape.members
parameter_names = list(params.keys())
if include is not None:
for member in include:
parameter_names.append(member.name)
if exclude is not None:
for member in exclude:
if member in parameter_names:
parameter_names.remove(member)
signature_params = ''
if parameter_names:
signature_params = '**kwargs'
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_signature(section, name, method,
include=None, exclude=None):
"""Documents the signature of a custom method
:param section: The section to write the documentation to.
:param name: The name of the method
:param method: The handle to the method being documented
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
args, varargs, keywords, defaults = inspect.getargspec(method)
args = args[1:]
signature_params = inspect.formatargspec(
args, varargs, keywords, defaults)
signature_params = signature_params.lstrip('(')
signature_params = signature_params.rstrip(')')
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_method(section, method_name, method):
"""Documents a non-data driven method
:param section: The section to write the documentation to.
:param method_name: The name of the method
:param method: The handle to the method being documented
"""
document_custom_signature(
section, method_name, method)
method_intro_section = section.add_new_section('method-intro')
method_intro_section.writeln('')
doc_string = inspect.getdoc(method)
if doc_string is not None:
method_intro_section.style.write_py_doc_string(doc_string)
def document_model_driven_method(section, method_name, operation_model,
event_emitter, method_description=None,
example_prefix=None, include_input=None,
include_output=None, exclude_input=None,
exclude_output=None, document_output=True,
include_signature=True):
"""Documents an individual method
:param section: The section to write to
:param method_name: The name of the method
:param operation_model: The model of the operation
:param event_emitter: The event emitter to use to emit events
:param example_prefix: The prefix to use in the method example.
:type include_input: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
input documentation.
:type include_output: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
output documentation.
:type exclude_input: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
input documentation.
:type exclude_output: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
output documentation.
:param document_output: A boolean flag to indicate whether to
document the output.
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
# Add the signature if specified.
if include_signature:
document_model_driven_signature(
section, method_name, operation_model, include=include_input,
exclude=exclude_input)
# Add the description for the method.
method_intro_section = section.add_new_section('method-intro')
method_intro_section.include_doc_string(method_description)
if operation_model.deprecated:
method_intro_section.style.start_danger()
method_intro_section.writeln(
'This operation is deprecated and may not function as '
'expected. This operation should not be used going forward '
'and is only kept for the purpose of backwards compatiblity.')
method_intro_section.style.end_danger()
service_uid = operation_model.service_model.metadata.get('uid')
if service_uid is not None:
method_intro_section.style.new_paragraph()
method_intro_section.write("See also: ")
link = '%s/%s/%s' % (AWS_DOC_BASE, service_uid,
operation_model.name)
method_intro_section.style.external_link(title="AWS API Documentation",
link=link)
method_intro_section.writeln('')
# Add the example section.
example_section = section.add_new_section('example')
example_section.style.new_paragraph()
example_section.style.bold('Request Syntax')
context = {
'special_shape_types': {
'streaming_input_shape': operation_model.get_streaming_input(),
'streaming_output_shape': operation_model.get_streaming_output(),
'eventstream_output_shape': operation_model.get_event_stream_output(),
},
}
if operation_model.input_shape:
RequestExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_example(
example_section, operation_model.input_shape,
prefix=example_prefix, include=include_input,
exclude=exclude_input)
else:
example_section.style.new_paragraph()
example_section.style.start_codeblock()
example_section.write(example_prefix + '()')
# Add the request parameter documentation.
request_params_section = section.add_new_section('request-params')
if operation_model.input_shape:
RequestParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_params(
request_params_section, operation_model.input_shape,
include=include_input, exclude=exclude_input)
# Add the return value documentation
return_section = section.add_new_section('return')
return_section.style.new_line()
if operation_model.output_shape is not None and document_output:
return_section.write(':rtype: dict')
return_section.style.new_line()
return_section.write(':returns: ')
return_section.style.indent()
return_section.style.new_line()
# If the operation is an event stream, describe the tagged union
event_stream_output = operation_model.get_event_stream_output()
if event_stream_output:
event_section = return_section.add_new_section('event-stream')
event_section.style.new_paragraph()
event_section.write(
'The response of this operation contains an '
':class:`.EventStream` member. When iterated the '
':class:`.EventStream` will yield events based on the '
'structure below, where only one of the top level keys '
'will be present for any given event.'
)
event_section.style.new_line()
# Add an example return value
return_example_section = return_section.add_new_section('example')
return_example_section.style.new_line()
return_example_section.style.bold('Response Syntax')
return_example_section.style.new_paragraph()
ResponseExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_example(
return_example_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
# Add a description for the return value
return_description_section = return_section.add_new_section(
'description')
return_description_section.style.new_line()
return_description_section.style.bold('Response Structure')
return_description_section.style.new_paragraph()
ResponseParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_params(
return_description_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
else:
return_section.write(':returns: None')
| 11,548 | Python | 39.953901 | 82 | 0.668081 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/service.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.exceptions import DataNotFoundError
from botocore.docs.utils import get_official_service_name
from botocore.docs.client import ClientDocumenter
from botocore.docs.client import ClientExceptionsDocumenter
from botocore.docs.waiter import WaiterDocumenter
from botocore.docs.paginator import PaginatorDocumenter
from botocore.docs.bcdoc.restdoc import DocumentStructure
class ServiceDocumenter(object):
def __init__(self, service_name, session):
self._session = session
self._service_name = service_name
self._client = self._session.create_client(
service_name, region_name='us-east-1', aws_access_key_id='foo',
aws_secret_access_key='bar')
self._event_emitter = self._client.meta.events
self.sections = [
'title',
'table-of-contents',
'client-api',
'client-exceptions',
'paginator-api',
'waiter-api'
]
def document_service(self):
"""Documents an entire service.
:returns: The reStructured text of the documented service.
"""
doc_structure = DocumentStructure(
self._service_name, section_names=self.sections,
target='html')
self.title(doc_structure.get_section('title'))
self.table_of_contents(doc_structure.get_section('table-of-contents'))
self.client_api(doc_structure.get_section('client-api'))
self.client_exceptions(doc_structure.get_section('client-exceptions'))
self.paginator_api(doc_structure.get_section('paginator-api'))
self.waiter_api(doc_structure.get_section('waiter-api'))
return doc_structure.flush_structure()
def title(self, section):
section.style.h1(self._client.__class__.__name__)
self._event_emitter.emit(
'docs.%s.%s' % ('title',
self._service_name),
section=section
)
def table_of_contents(self, section):
section.style.table_of_contents(title='Table of Contents', depth=2)
def client_api(self, section):
examples = None
try:
examples = self.get_examples(self._service_name)
except DataNotFoundError:
pass
ClientDocumenter(self._client, examples).document_client(section)
def client_exceptions(self, section):
ClientExceptionsDocumenter(self._client).document_exceptions(section)
def paginator_api(self, section):
try:
service_paginator_model = self._session.get_paginator_model(
self._service_name)
except DataNotFoundError:
return
paginator_documenter = PaginatorDocumenter(
self._client, service_paginator_model)
paginator_documenter.document_paginators(section)
def waiter_api(self, section):
if self._client.waiter_names:
service_waiter_model = self._session.get_waiter_model(
self._service_name)
waiter_documenter = WaiterDocumenter(
self._client, service_waiter_model)
waiter_documenter.document_waiters(section)
def get_examples(self, service_name, api_version=None):
loader = self._session.get_component('data_loader')
examples = loader.load_service_model(
service_name, 'examples-1', api_version)
return examples['examples']
| 3,986 | Python | 37.708737 | 78 | 0.655294 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/__init__.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from botocore.docs.service import ServiceDocumenter
def generate_docs(root_dir, session):
"""Generates the reference documentation for botocore
This will go through every available AWS service and output ReSTructured
text files documenting each service.
:param root_dir: The directory to write the reference files to. Each
service's reference documentation is loacated at
root_dir/reference/services/service-name.rst
"""
services_doc_path = os.path.join(root_dir, 'reference', 'services')
if not os.path.exists(services_doc_path):
os.makedirs(services_doc_path)
# Generate reference docs and write them out.
for service_name in session.get_available_services():
docs = ServiceDocumenter(service_name, session).document_service()
service_doc_path = os.path.join(
services_doc_path, service_name + '.rst')
with open(service_doc_path, 'wb') as f:
f.write(docs)
| 1,543 | Python | 38.589743 | 76 | 0.71873 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/waiter.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore import xform_name
from botocore.compat import OrderedDict
from botocore.docs.utils import DocumentedShape
from botocore.utils import get_service_module_name
from botocore.docs.method import document_model_driven_method
class WaiterDocumenter(object):
def __init__(self, client, service_waiter_model):
self._client = client
self._service_name = self._client.meta.service_model.service_name
self._service_waiter_model = service_waiter_model
def document_waiters(self, section):
"""Documents the various waiters for a service.
:param section: The section to write to.
"""
section.style.h2('Waiters')
section.style.new_line()
section.writeln('The available waiters are:')
for waiter_name in self._service_waiter_model.waiter_names:
section.style.li(
':py:class:`%s.Waiter.%s`' % (
self._client.__class__.__name__, waiter_name))
self._add_single_waiter(section, waiter_name)
def _add_single_waiter(self, section, waiter_name):
section = section.add_new_section(waiter_name)
section.style.start_sphinx_py_class(
class_name='%s.Waiter.%s' % (
self._client.__class__.__name__, waiter_name))
# Add example on how to instantiate waiter.
section.style.start_codeblock()
section.style.new_line()
section.write(
'waiter = client.get_waiter(\'%s\')' % xform_name(waiter_name)
)
section.style.end_codeblock()
# Add information on the wait() method
section.style.new_line()
document_wait_method(
section=section,
waiter_name=waiter_name,
event_emitter=self._client.meta.events,
service_model=self._client.meta.service_model,
service_waiter_model=self._service_waiter_model
)
def document_wait_method(section, waiter_name, event_emitter,
service_model, service_waiter_model,
include_signature=True):
"""Documents a the wait method of a waiter
:param section: The section to write to
:param waiter_name: The name of the waiter
:param event_emitter: The event emitter to use to emit events
:param service_model: The service model
:param service_waiter_model: The waiter model associated to the service
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
waiter_model = service_waiter_model.get_waiter(waiter_name)
operation_model = service_model.operation_model(
waiter_model.operation)
waiter_config_members = OrderedDict()
waiter_config_members['Delay'] = DocumentedShape(
name='Delay', type_name='integer',
documentation=(
'<p>The amount of time in seconds to wait between '
'attempts. Default: {0}</p>'.format(waiter_model.delay)))
waiter_config_members['MaxAttempts'] = DocumentedShape(
name='MaxAttempts', type_name='integer',
documentation=(
'<p>The maximum number of attempts to be made. '
'Default: {0}</p>'.format(waiter_model.max_attempts)))
botocore_waiter_params = [
DocumentedShape(
name='WaiterConfig', type_name='structure',
documentation=(
'<p>A dictionary that provides parameters to control '
'waiting behavior.</p>'),
members=waiter_config_members)
]
wait_description = (
'Polls :py:meth:`{0}.Client.{1}` every {2} '
'seconds until a successful state is reached. An error is '
'returned after {3} failed checks.'.format(
get_service_module_name(service_model),
xform_name(waiter_model.operation),
waiter_model.delay, waiter_model.max_attempts)
)
document_model_driven_method(
section, 'wait', operation_model,
event_emitter=event_emitter,
method_description=wait_description,
example_prefix='waiter.wait',
include_input=botocore_waiter_params,
document_output=False,
include_signature=include_signature
)
| 4,823 | Python | 36.6875 | 75 | 0.642131 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/example.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.shape import ShapeDocumenter
from botocore.docs.utils import py_default
class BaseExampleDocumenter(ShapeDocumenter):
def document_example(self, section, shape, prefix=None, include=None,
exclude=None):
"""Generates an example based on a shape
:param section: The section to write the documentation to.
:param shape: The shape of the operation.
:param prefix: Anything to be included before the example
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
history = []
section.style.new_line()
section.style.start_codeblock()
if prefix is not None:
section.write(prefix)
self.traverse_and_document_shape(
section=section, shape=shape, history=history,
include=include, exclude=exclude)
def document_recursive_shape(self, section, shape, **kwargs):
section.write('{\'... recursive ...\'}')
def document_shape_default(self, section, shape, history, include=None,
exclude=None, **kwargs):
py_type = self._get_special_py_default(shape)
if py_type is None:
py_type = py_default(shape.type_name)
if self._context.get('streaming_shape') == shape:
py_type = 'StreamingBody()'
section.write(py_type)
def document_shape_type_string(self, section, shape, history,
include=None, exclude=None, **kwargs):
if 'enum' in shape.metadata:
for i, enum in enumerate(shape.metadata['enum']):
section.write('\'%s\'' % enum)
if i < len(shape.metadata['enum']) - 1:
section.write('|')
else:
self.document_shape_default(section, shape, history)
def document_shape_type_list(self, section, shape, history, include=None,
exclude=None, **kwargs):
param_shape = shape.member
list_section = section.add_new_section('list-value')
self._start_nested_param(list_section, '[')
param_section = list_section.add_new_section(
'member', context={'shape': param_shape.name})
self.traverse_and_document_shape(
section=param_section, shape=param_shape, history=history)
ending_comma_section = list_section.add_new_section('ending-comma')
ending_comma_section.write(',')
ending_bracket_section = list_section.add_new_section(
'ending-bracket')
self._end_nested_param(ending_bracket_section, ']')
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
if not shape.members:
section.write('{}')
return
section = section.add_new_section('structure-value')
self._start_nested_param(section, '{')
input_members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(input_members):
if exclude and param in exclude:
continue
param_section = section.add_new_section(param)
param_section.write('\'%s\': ' % param)
param_shape = input_members[param]
param_value_section = param_section.add_new_section(
'member-value', context={'shape': param_shape.name})
self.traverse_and_document_shape(
section=param_value_section, shape=param_shape,
history=history, name=param)
if i < len(input_members) - 1:
ending_comma_section = param_section.add_new_section(
'ending-comma')
ending_comma_section.write(',')
ending_comma_section.style.new_line()
self._end_structure(section, '{', '}')
def document_shape_type_map(self, section, shape, history,
include=None, exclude=None, **kwargs):
map_section = section.add_new_section('map-value')
self._start_nested_param(map_section, '{')
value_shape = shape.value
key_section = map_section.add_new_section(
'key', context={'shape': shape.key.name})
key_section.write('\'string\': ')
value_section = map_section.add_new_section(
'value', context={'shape': value_shape.name})
self.traverse_and_document_shape(
section=value_section, shape=value_shape, history=history)
end_bracket_section = map_section.add_new_section('ending-bracket')
self._end_nested_param(end_bracket_section, '}')
def _add_members_to_shape(self, members, include):
if include:
members = members.copy()
for param in include:
members[param.name] = param
return members
def _start_nested_param(self, section, start=None):
if start is not None:
section.write(start)
section.style.indent()
section.style.indent()
section.style.new_line()
def _end_nested_param(self, section, end=None):
section.style.dedent()
section.style.dedent()
section.style.new_line()
if end is not None:
section.write(end)
def _end_structure(self, section, start, end):
# If there are no members in the strucuture, then make sure the
# start and the end bracket are on the same line, by removing all
# previous text and writing the start and end.
if not section.available_sections:
section.clear_text()
section.write(start + end)
self._end_nested_param(section)
else:
end_bracket_section = section.add_new_section('ending-bracket')
self._end_nested_param(end_bracket_section, end)
class ResponseExampleDocumenter(BaseExampleDocumenter):
EVENT_NAME = 'response-example'
def document_shape_type_event_stream(self, section, shape, history,
**kwargs):
section.write('EventStream(')
self.document_shape_type_structure(section, shape, history, **kwargs)
end_section = section.add_new_section('event-stream-end')
end_section.write(')')
class RequestExampleDocumenter(BaseExampleDocumenter):
EVENT_NAME = 'request-example'
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
param_format = '\'%s\''
operator = ': '
start = '{'
end = '}'
if len(history) <= 1:
operator = '='
start = '('
end = ')'
param_format = '%s'
section = section.add_new_section('structure-value')
self._start_nested_param(section, start)
input_members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(input_members):
if exclude and param in exclude:
continue
param_section = section.add_new_section(param)
param_section.write(param_format % param)
param_section.write(operator)
param_shape = input_members[param]
param_value_section = param_section.add_new_section(
'member-value', context={'shape': param_shape.name})
self.traverse_and_document_shape(
section=param_value_section, shape=param_shape,
history=history, name=param)
if i < len(input_members) - 1:
ending_comma_section = param_section.add_new_section(
'ending-comma')
ending_comma_section.write(',')
ending_comma_section.style.new_line()
self._end_structure(section, start, end)
| 8,751 | Python | 40.875598 | 77 | 0.597989 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/docstring.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.method import document_model_driven_method
from botocore.docs.waiter import document_wait_method
from botocore.docs.paginator import document_paginate_method
from botocore.docs.bcdoc.restdoc import DocumentStructure
class LazyLoadedDocstring(str):
"""Used for lazily loading docstrings
You can instantiate this class and assign it to a __doc__ value.
The docstring will not be generated till accessed via __doc__ or
help(). Note that all docstring classes **must** subclass from
this class. It cannot be used directly as a docstring.
"""
def __init__(self, *args, **kwargs):
"""
The args and kwargs are the same as the underlying document
generation function. These just get proxied to the underlying
function.
"""
super(LazyLoadedDocstring, self).__init__()
self._gen_args = args
self._gen_kwargs = kwargs
self._docstring = None
def __new__(cls, *args, **kwargs):
# Needed in order to sub class from str with args and kwargs
return super(LazyLoadedDocstring, cls).__new__(cls)
def _write_docstring(self, *args, **kwargs):
raise NotImplementedError(
'_write_docstring is not implemented. Please subclass from '
'this class and provide your own _write_docstring method'
)
def expandtabs(self, tabsize=8):
"""Expands tabs to spaces
So this is a big hack in order to get lazy loaded docstring work
for the ``help()``. In the ``help()`` function, ``pydoc`` and
``inspect`` are used. At some point the ``inspect.cleandoc``
method is called. To clean the docs ``expandtabs`` is called
and that is where we override the method to generate and return the
docstrings.
"""
if self._docstring is None:
self._generate()
return self._docstring.expandtabs(tabsize)
def __str__(self):
return self._generate()
# __doc__ of target will use either __repr__ or __str__ of this class.
__repr__ = __str__
def _generate(self):
# Generate the docstring if it is not already cached.
if self._docstring is None:
self._docstring = self._create_docstring()
return self._docstring
def _create_docstring(self):
docstring_structure = DocumentStructure('docstring', target='html')
# Call the document method function with the args and kwargs
# passed to the class.
self._write_docstring(
docstring_structure, *self._gen_args,
**self._gen_kwargs)
return docstring_structure.flush_structure().decode('utf-8')
class ClientMethodDocstring(LazyLoadedDocstring):
def _write_docstring(self, *args, **kwargs):
document_model_driven_method(*args, **kwargs)
class WaiterDocstring(LazyLoadedDocstring):
def _write_docstring(self, *args, **kwargs):
document_wait_method(*args, **kwargs)
class PaginatorDocstring(LazyLoadedDocstring):
def _write_docstring(self, *args, **kwargs):
document_paginate_method(*args, **kwargs)
| 3,699 | Python | 37.14433 | 75 | 0.666126 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/utils.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
from collections import namedtuple
def py_type_name(type_name):
"""Get the Python type name for a given model type.
>>> py_type_name('list')
'list'
>>> py_type_name('structure')
'dict'
:rtype: string
"""
return {
'blob': 'bytes',
'character': 'string',
'double': 'float',
'long': 'integer',
'map': 'dict',
'structure': 'dict',
'timestamp': 'datetime',
}.get(type_name, type_name)
def py_default(type_name):
"""Get the Python default value for a given model type.
>>> py_default('string')
'\'string\''
>>> py_default('list')
'[...]'
>>> py_default('unknown')
'...'
:rtype: string
"""
return {
'double': '123.0',
'long': '123',
'integer': '123',
'string': "'string'",
'blob': "b'bytes'",
'boolean': 'True|False',
'list': '[...]',
'map': '{...}',
'structure': '{...}',
'timestamp': 'datetime(2015, 1, 1)',
}.get(type_name, '...')
def get_official_service_name(service_model):
"""Generate the official name of an AWS Service
:param service_model: The service model representing the service
"""
official_name = service_model.metadata.get('serviceFullName')
short_name = service_model.metadata.get('serviceAbbreviation', '')
if short_name.startswith('Amazon'):
short_name = short_name[7:]
if short_name.startswith('AWS'):
short_name = short_name[4:]
if short_name and short_name.lower() not in official_name.lower():
official_name += ' ({0})'.format(short_name)
return official_name
_DocumentedShape = namedtuple(
'DocumentedShape', ['name', 'type_name', 'documentation', 'metadata',
'members', 'required_members'])
class DocumentedShape (_DocumentedShape):
"""Use this class to inject new shapes into a model for documentation"""
def __new__(cls, name, type_name, documentation, metadata=None,
members=None, required_members=None):
if metadata is None:
metadata = []
if members is None:
members = []
if required_members is None:
required_members = []
return super(DocumentedShape, cls).__new__(
cls, name, type_name, documentation, metadata, members,
required_members)
class AutoPopulatedParam(object):
def __init__(self, name, param_description=None):
self.name = name
self.param_description = param_description
if param_description is None:
self.param_description = (
'Please note that this parameter is automatically populated '
'if it is not provided. Including this parameter is not '
'required\n')
def document_auto_populated_param(self, event_name, section, **kwargs):
"""Documents auto populated parameters
It will remove any required marks for the parameter, remove the
parameter from the example, and add a snippet about the parameter
being autopopulated in the description.
"""
if event_name.startswith('docs.request-params'):
if self.name in section.available_sections:
section = section.get_section(self.name)
if 'is-required' in section.available_sections:
section.delete_section('is-required')
description_section = section.get_section(
'param-documentation')
description_section.writeln(self.param_description)
elif event_name.startswith('docs.request-example'):
section = section.get_section('structure-value')
if self.name in section.available_sections:
section.delete_section(self.name)
class HideParamFromOperations(object):
"""Hides a single parameter from multiple operations.
This method will remove a parameter from documentation and from
examples. This method is typically used for things that are
automatically populated because a user would be unable to provide
a value (e.g., a checksum of a serialized XML request body)."""
def __init__(self, service_name, parameter_name, operation_names):
"""
:type service_name: str
:param service_name: Name of the service to modify.
:type parameter_name: str
:param parameter_name: Name of the parameter to modify.
:type operation_names: list
:param operation_names: Operation names to modify.
"""
self._parameter_name = parameter_name
self._params_events = set()
self._example_events = set()
# Build up the sets of relevant event names.
param_template = 'docs.request-params.%s.%s.complete-section'
example_template = 'docs.request-example.%s.%s.complete-section'
for name in operation_names:
self._params_events.add(param_template % (service_name, name))
self._example_events.add(example_template % (service_name, name))
def hide_param(self, event_name, section, **kwargs):
if event_name in self._example_events:
# Modify the structure value for example events.
section = section.get_section('structure-value')
elif event_name not in self._params_events:
return
if self._parameter_name in section.available_sections:
section.delete_section(self._parameter_name)
class AppendParamDocumentation(object):
"""Appends documentation to a specific parameter"""
def __init__(self, parameter_name, doc_string):
self._parameter_name = parameter_name
self._doc_string = doc_string
def append_documentation(self, event_name, section, **kwargs):
if self._parameter_name in section.available_sections:
section = section.get_section(self._parameter_name)
description_section = section.get_section(
'param-documentation')
description_section.writeln(self._doc_string)
_CONTROLS = {
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
'\b': '\\b',
'\f': '\\f',
}
# Combines all CONTROLS keys into a big or regular expression
_ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS)))
# Based on the match get the appropriate replacement from CONTROLS
_CONTROLS_MATCH_HANDLER = lambda match: _CONTROLS[match.group(0)]
def escape_controls(value):
return _ESCAPE_CONTROLS_RE.sub(_CONTROLS_MATCH_HANDLER, value)
| 7,176 | Python | 35.247475 | 77 | 0.621098 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/shape.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# NOTE: This class should not be instantiated and its
# ``traverse_and_document_shape`` method called directly. It should be
# inherited from a Documenter class with the appropriate methods
# and attributes.
from botocore.utils import is_json_value_header
class ShapeDocumenter(object):
EVENT_NAME = ''
def __init__(self, service_name, operation_name, event_emitter,
context=None):
self._service_name = service_name
self._operation_name = operation_name
self._event_emitter = event_emitter
self._context = context
if context is None:
self._context = {
'special_shape_types': {}
}
def traverse_and_document_shape(self, section, shape, history,
include=None, exclude=None, name=None,
is_required=False):
"""Traverses and documents a shape
Will take a self class and call its appropriate methods as a shape
is traversed.
:param section: The section to document.
:param history: A list of the names of the shapes that have been
traversed.
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
:param name: The name of the shape.
:param is_required: If the shape is a required member.
"""
param_type = shape.type_name
if getattr(shape, 'serialization', {}).get('eventstream'):
param_type = 'event_stream'
if shape.name in history:
self.document_recursive_shape(section, shape, name=name)
else:
history.append(shape.name)
is_top_level_param = (len(history) == 2)
getattr(self, 'document_shape_type_%s' % param_type,
self.document_shape_default)(
section, shape, history=history, name=name,
include=include, exclude=exclude,
is_top_level_param=is_top_level_param,
is_required=is_required)
if is_top_level_param:
self._event_emitter.emit(
'docs.%s.%s.%s.%s' % (self.EVENT_NAME,
self._service_name,
self._operation_name,
name),
section=section)
at_overlying_method_section = (len(history) == 1)
if at_overlying_method_section:
self._event_emitter.emit(
'docs.%s.%s.%s.complete-section' % (self.EVENT_NAME,
self._service_name,
self._operation_name),
section=section)
history.pop()
def _get_special_py_default(self, shape):
special_defaults = {
'jsonvalue_header': '{...}|[...]|123|123.4|\'string\'|True|None',
'streaming_input_shape': 'b\'bytes\'|file',
'streaming_output_shape': 'StreamingBody()',
'eventstream_output_shape': 'EventStream()',
}
return self._get_value_for_special_type(shape, special_defaults)
def _get_special_py_type_name(self, shape):
special_type_names = {
'jsonvalue_header': 'JSON serializable',
'streaming_input_shape': 'bytes or seekable file-like object',
'streaming_output_shape': ':class:`.StreamingBody`',
'eventstream_output_shape': ':class:`.EventStream`',
}
return self._get_value_for_special_type(shape, special_type_names)
def _get_value_for_special_type(self, shape, special_type_map):
if is_json_value_header(shape):
return special_type_map['jsonvalue_header']
for special_type, marked_shape in self._context[
'special_shape_types'].items():
if special_type in special_type_map:
if shape == marked_shape:
return special_type_map[special_type]
return None
| 4,994 | Python | 41.330508 | 78 | 0.572887 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/paginator.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore import xform_name
from botocore.compat import OrderedDict
from botocore.docs.utils import DocumentedShape
from botocore.utils import get_service_module_name
from botocore.docs.method import document_model_driven_method
class PaginatorDocumenter(object):
def __init__(self, client, service_paginator_model):
self._client = client
self._service_name = self._client.meta.service_model.service_name
self._service_paginator_model = service_paginator_model
def document_paginators(self, section):
"""Documents the various paginators for a service
param section: The section to write to.
"""
section.style.h2('Paginators')
section.style.new_line()
section.writeln('The available paginators are:')
paginator_names = sorted(
self._service_paginator_model._paginator_config)
# List the available paginators and then document each paginator.
for paginator_name in paginator_names:
section.style.li(
':py:class:`%s.Paginator.%s`' % (
self._client.__class__.__name__, paginator_name))
self._add_paginator(section, paginator_name)
def _add_paginator(self, section, paginator_name):
section = section.add_new_section(paginator_name)
# Docment the paginator class
section.style.start_sphinx_py_class(
class_name='%s.Paginator.%s' % (
self._client.__class__.__name__, paginator_name))
section.style.start_codeblock()
section.style.new_line()
# Document how to instantiate the paginator.
section.write(
'paginator = client.get_paginator(\'%s\')' % xform_name(
paginator_name)
)
section.style.end_codeblock()
section.style.new_line()
# Get the pagination model for the particular paginator.
paginator_config = self._service_paginator_model.get_paginator(
paginator_name)
document_paginate_method(
section=section,
paginator_name=paginator_name,
event_emitter=self._client.meta.events,
service_model=self._client.meta.service_model,
paginator_config=paginator_config
)
def document_paginate_method(section, paginator_name, event_emitter,
service_model, paginator_config,
include_signature=True):
"""Documents the paginate method of a paginator
:param section: The section to write to
:param paginator_name: The name of the paginator. It is snake cased.
:param event_emitter: The event emitter to use to emit events
:param service_model: The service model
:param paginator_config: The paginator config associated to a particular
paginator.
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
# Retrieve the operation model of the underlying operation.
operation_model = service_model.operation_model(
paginator_name)
# Add representations of the request and response parameters
# we want to include in the description of the paginate method.
# These are parameters we expose via the botocore interface.
pagination_config_members = OrderedDict()
pagination_config_members['MaxItems'] = DocumentedShape(
name='MaxItems', type_name='integer',
documentation=(
'<p>The total number of items to return. If the total '
'number of items available is more than the value '
'specified in max-items then a <code>NextToken</code> '
'will be provided in the output that you can use to '
'resume pagination.</p>'))
if paginator_config.get('limit_key', None):
pagination_config_members['PageSize'] = DocumentedShape(
name='PageSize', type_name='integer',
documentation='<p>The size of each page.<p>')
pagination_config_members['StartingToken'] = DocumentedShape(
name='StartingToken', type_name='string',
documentation=(
'<p>A token to specify where to start paginating. '
'This is the <code>NextToken</code> from a previous '
'response.</p>'))
botocore_pagination_params = [
DocumentedShape(
name='PaginationConfig', type_name='structure',
documentation=(
'<p>A dictionary that provides parameters to control '
'pagination.</p>'),
members=pagination_config_members)
]
botocore_pagination_response_params = [
DocumentedShape(
name='NextToken', type_name='string',
documentation=(
'<p>A token to resume pagination.</p>'))
]
service_pagination_params = []
# Add the normal input token of the method to a list
# of input paramters that we wish to hide since we expose our own.
if isinstance(paginator_config['input_token'], list):
service_pagination_params += paginator_config['input_token']
else:
service_pagination_params.append(paginator_config['input_token'])
# Hide the limit key in the documentation.
if paginator_config.get('limit_key', None):
service_pagination_params.append(paginator_config['limit_key'])
# Hide the output tokens in the documentation.
service_pagination_response_params = []
if isinstance(paginator_config['output_token'], list):
service_pagination_response_params += paginator_config[
'output_token']
else:
service_pagination_response_params.append(paginator_config[
'output_token'])
paginate_description = (
'Creates an iterator that will paginate through responses '
'from :py:meth:`{0}.Client.{1}`.'.format(
get_service_module_name(service_model), xform_name(paginator_name))
)
document_model_driven_method(
section, 'paginate', operation_model,
event_emitter=event_emitter,
method_description=paginate_description,
example_prefix='response_iterator = paginator.paginate',
include_input=botocore_pagination_params,
include_output=botocore_pagination_response_params,
exclude_input=service_pagination_params,
exclude_output=service_pagination_response_params,
include_signature=include_signature
)
| 7,046 | Python | 38.589887 | 79 | 0.654272 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/client.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
from botocore.docs.utils import get_official_service_name
from botocore.docs.method import document_custom_method
from botocore.docs.method import document_model_driven_method
from botocore.docs.method import get_instance_public_methods
from botocore.docs.sharedexample import document_shared_examples
from botocore.docs.example import ResponseExampleDocumenter
from botocore.docs.params import ResponseParamsDocumenter
from botocore.docs.utils import DocumentedShape
from botocore.compat import OrderedDict
class ClientDocumenter(object):
def __init__(self, client, shared_examples=None):
self._client = client
self._shared_examples = shared_examples
if self._shared_examples is None:
self._shared_examples = {}
self._service_name = self._client.meta.service_model.service_name
def document_client(self, section):
"""Documents a client and its methods
:param section: The section to write to.
"""
self._add_title(section)
self._add_class_signature(section)
client_methods = get_instance_public_methods(self._client)
self._add_client_intro(section, client_methods)
self._add_client_methods(section, client_methods)
def _add_title(self, section):
section.style.h2('Client')
def _add_client_intro(self, section, client_methods):
section = section.add_new_section('intro')
# Write out the top level description for the client.
official_service_name = get_official_service_name(
self._client.meta.service_model)
section.write(
'A low-level client representing %s' % official_service_name)
section.style.new_line()
section.include_doc_string(self._client.meta.service_model.documentation)
# Write out the client example instantiation.
self._add_client_creation_example(section)
# List out all of the possible client methods.
section.style.new_line()
section.write('These are the available methods:')
section.style.new_line()
class_name = self._client.__class__.__name__
for method_name in sorted(client_methods):
section.style.li(':py:meth:`~%s.Client.%s`' % (
class_name, method_name))
def _add_class_signature(self, section):
section.style.start_sphinx_py_class(
class_name='%s.Client' % self._client.__class__.__name__)
def _add_client_creation_example(self, section):
section.style.start_codeblock()
section.style.new_line()
section.write(
'client = session.create_client(\'{service}\')'.format(
service=self._service_name)
)
section.style.end_codeblock()
def _add_client_methods(self, section, client_methods):
section = section.add_new_section('methods')
for method_name in sorted(client_methods):
self._add_client_method(
section, method_name, client_methods[method_name])
def _add_client_method(self, section, method_name, method):
section = section.add_new_section(method_name)
if self._is_custom_method(method_name):
self._add_custom_method(section, method_name, method)
else:
self._add_model_driven_method(section, method_name)
def _is_custom_method(self, method_name):
return method_name not in self._client.meta.method_to_api_mapping
def _add_custom_method(self, section, method_name, method):
document_custom_method(section, method_name, method)
def _add_method_exceptions_list(self, section, operation_model):
error_section = section.add_new_section('exceptions')
error_section.style.new_line()
error_section.style.bold('Exceptions')
error_section.style.new_line()
client_name = self._client.__class__.__name__
for error in operation_model.error_shapes:
class_name = '%s.Client.exceptions.%s' % (client_name, error.name)
error_section.style.li(':py:class:`%s`' % class_name)
def _add_model_driven_method(self, section, method_name):
service_model = self._client.meta.service_model
operation_name = self._client.meta.method_to_api_mapping[method_name]
operation_model = service_model.operation_model(operation_name)
example_prefix = 'response = client.%s' % method_name
document_model_driven_method(
section, method_name, operation_model,
event_emitter=self._client.meta.events,
method_description=operation_model.documentation,
example_prefix=example_prefix,
)
# Add any modeled exceptions
if operation_model.error_shapes:
self._add_method_exceptions_list(section, operation_model)
# Add the shared examples
shared_examples = self._shared_examples.get(operation_name)
if shared_examples:
document_shared_examples(
section, operation_model, example_prefix, shared_examples)
class ClientExceptionsDocumenter(object):
_USER_GUIDE_LINK = (
'https://boto3.amazonaws.com/'
'v1/documentation/api/latest/guide/error-handling.html'
)
_GENERIC_ERROR_SHAPE = DocumentedShape(
name='Error',
type_name='structure',
documentation=(
'Normalized access to common exception attributes.'
),
members=OrderedDict([
('Code', DocumentedShape(
name='Code',
type_name='string',
documentation=(
'An identifier specifying the exception type.'
),
)),
('Message', DocumentedShape(
name='Message',
type_name='string',
documentation=(
'A descriptive message explaining why the exception '
'occured.'
),
)),
]),
)
def __init__(self, client):
self._client = client
self._service_name = self._client.meta.service_model.service_name
def document_exceptions(self, section):
self._add_title(section)
self._add_overview(section)
self._add_exceptions_list(section)
self._add_exception_classes(section)
def _add_title(self, section):
section.style.h2('Client Exceptions')
def _add_overview(self, section):
section.style.new_line()
section.write(
'Client exceptions are available on a client instance '
'via the ``exceptions`` property. For more detailed instructions '
'and examples on the exact usage of client exceptions, see the '
'error handling '
)
section.style.external_link(
title='user guide',
link=self._USER_GUIDE_LINK,
)
section.write('.')
section.style.new_line()
def _exception_class_name(self, shape):
cls_name = self._client.__class__.__name__
return '%s.Client.exceptions.%s' % (cls_name, shape.name)
def _add_exceptions_list(self, section):
error_shapes = self._client.meta.service_model.error_shapes
if not error_shapes:
section.style.new_line()
section.write('This client has no modeled exception classes.')
section.style.new_line()
return
section.style.new_line()
section.write('The available client exceptions are:')
section.style.new_line()
for shape in error_shapes:
class_name = self._exception_class_name(shape)
section.style.li(':py:class:`%s`' % class_name)
def _add_exception_classes(self, section):
for shape in self._client.meta.service_model.error_shapes:
self._add_exception_class(section, shape)
def _add_exception_class(self, section, shape):
class_section = section.add_new_section(shape.name)
class_name = self._exception_class_name(shape)
class_section.style.start_sphinx_py_class(class_name=class_name)
self._add_top_level_documentation(class_section, shape)
self._add_exception_catch_example(class_section, shape)
self._add_response_attr(class_section, shape)
class_section.style.end_sphinx_py_class()
def _add_top_level_documentation(self, section, shape):
if shape.documentation:
section.style.new_line()
section.include_doc_string(shape.documentation)
section.style.new_line()
def _add_exception_catch_example(self, section, shape):
section.style.new_line()
section.style.bold('Example')
section.style.start_codeblock()
section.write('try:')
section.style.indent()
section.style.new_line()
section.write('...')
section.style.dedent()
section.style.new_line()
section.write('except client.exceptions.%s as e:' % shape.name)
section.style.indent()
section.style.new_line()
section.write('print(e.response)')
section.style.dedent()
section.style.end_codeblock()
def _add_response_attr(self, section, shape):
response_section = section.add_new_section('response')
response_section.style.start_sphinx_py_attr('response')
self._add_response_attr_description(response_section)
self._add_response_example(response_section, shape)
self._add_response_params(response_section, shape)
response_section.style.end_sphinx_py_attr()
def _add_response_attr_description(self, section):
section.style.new_line()
section.include_doc_string(
'The parsed error response. All exceptions have a top level '
'``Error`` key that provides normalized access to common '
'exception atrributes. All other keys are specific to this '
'service or exception class.'
)
section.style.new_line()
def _add_response_example(self, section, shape):
example_section = section.add_new_section('syntax')
example_section.style.new_line()
example_section.style.bold('Syntax')
example_section.style.new_paragraph()
documenter = ResponseExampleDocumenter(
service_name=self._service_name,
operation_name=None,
event_emitter=self._client.meta.events,
)
documenter.document_example(
example_section, shape, include=[self._GENERIC_ERROR_SHAPE],
)
def _add_response_params(self, section, shape):
params_section = section.add_new_section('Structure')
params_section.style.new_line()
params_section.style.bold('Structure')
params_section.style.new_paragraph()
documenter = ResponseParamsDocumenter(
service_name=self._service_name,
operation_name=None,
event_emitter=self._client.meta.events,
)
documenter.document_params(
params_section, shape, include=[self._GENERIC_ERROR_SHAPE],
)
| 11,726 | Python | 39.023891 | 81 | 0.634232 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/params.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.shape import ShapeDocumenter
from botocore.docs.utils import py_type_name
class BaseParamsDocumenter(ShapeDocumenter):
def document_params(self, section, shape, include=None, exclude=None):
"""Fills out the documentation for a section given a model shape.
:param section: The section to write the documentation to.
:param shape: The shape of the operation.
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
history = []
self.traverse_and_document_shape(
section=section, shape=shape, history=history,
name=None, include=include, exclude=exclude)
def document_recursive_shape(self, section, shape, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
def document_shape_default(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
def document_shape_type_list(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
param_shape = shape.member
param_section = section.add_new_section(
param_shape.name, context={'shape': shape.member.name})
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=None)
section = section.add_new_section('end-list')
self._end_nested_param(section)
def document_shape_type_map(self, section, shape, history, include=None,
exclude=None, **kwargs):
self._add_member_documentation(section, shape, **kwargs)
key_section = section.add_new_section(
'key', context={'shape': shape.key.name})
self._start_nested_param(key_section)
self._add_member_documentation(key_section, shape.key)
param_section = section.add_new_section(
shape.value.name, context={'shape': shape.value.name})
param_section.style.indent()
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=shape.value,
history=history, name=None)
end_section = section.add_new_section('end-map')
self._end_nested_param(end_section)
self._end_nested_param(end_section)
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None,
name=None, **kwargs):
members = self._add_members_to_shape(shape.members, include)
self._add_member_documentation(section, shape, name=name)
for param in members:
if exclude and param in exclude:
continue
param_shape = members[param]
param_section = section.add_new_section(
param, context={'shape': param_shape.name})
self._start_nested_param(param_section)
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=param)
section = section.add_new_section('end-structure')
self._end_nested_param(section)
def _add_member_documentation(self, section, shape, **kwargs):
pass
def _add_members_to_shape(self, members, include):
if include:
members = members.copy()
for param in include:
members[param.name] = param
return members
def _document_non_top_level_param_type(self, type_section, shape):
special_py_type = self._get_special_py_type_name(shape)
py_type = py_type_name(shape.type_name)
type_format = '(%s) -- '
if special_py_type is not None:
# Special type can reference a linked class.
# Italicizing it blows away the link.
type_section.write(type_format % special_py_type)
else:
type_section.style.italics(type_format % py_type)
def _start_nested_param(self, section):
section.style.indent()
section.style.new_line()
def _end_nested_param(self, section):
section.style.dedent()
section.style.new_line()
class ResponseParamsDocumenter(BaseParamsDocumenter):
"""Generates the description for the response parameters"""
EVENT_NAME = 'response-params'
def _add_member_documentation(self, section, shape, name=None, **kwargs):
name_section = section.add_new_section('param-name')
name_section.write('- ')
if name is not None:
name_section.style.bold('%s ' % name)
type_section = section.add_new_section('param-type')
self._document_non_top_level_param_type(type_section, shape)
documentation_section = section.add_new_section('param-documentation')
if shape.documentation:
documentation_section.style.indent()
documentation_section.include_doc_string(shape.documentation)
section.style.new_paragraph()
def document_shape_type_event_stream(self, section, shape, history,
**kwargs):
self.document_shape_type_structure(section, shape, history, **kwargs)
class RequestParamsDocumenter(BaseParamsDocumenter):
"""Generates the description for the request parameters"""
EVENT_NAME = 'request-params'
def document_shape_type_structure(self, section, shape, history,
include=None, exclude=None, **kwargs):
if len(history) > 1:
self._add_member_documentation(section, shape, **kwargs)
section.style.indent()
members = self._add_members_to_shape(shape.members, include)
for i, param in enumerate(members):
if exclude and param in exclude:
continue
param_shape = members[param]
param_section = section.add_new_section(
param, context={'shape': param_shape.name})
param_section.style.new_line()
is_required = param in shape.required_members
self.traverse_and_document_shape(
section=param_section, shape=param_shape,
history=history, name=param, is_required=is_required)
section = section.add_new_section('end-structure')
if len(history) > 1:
section.style.dedent()
section.style.new_line()
def _add_member_documentation(self, section, shape, name=None,
is_top_level_param=False, is_required=False,
**kwargs):
py_type = self._get_special_py_type_name(shape)
if py_type is None:
py_type = py_type_name(shape.type_name)
if is_top_level_param:
type_section = section.add_new_section('param-type')
type_section.write(':type %s: %s' % (name, py_type))
end_type_section = type_section.add_new_section('end-param-type')
end_type_section.style.new_line()
name_section = section.add_new_section('param-name')
name_section.write(':param %s: ' % name)
else:
name_section = section.add_new_section('param-name')
name_section.write('- ')
if name is not None:
name_section.style.bold('%s ' % name)
type_section = section.add_new_section('param-type')
self._document_non_top_level_param_type(type_section, shape)
if is_required:
is_required_section = section.add_new_section('is-required')
is_required_section.style.indent()
is_required_section.style.bold('[REQUIRED] ')
if shape.documentation:
documentation_section = section.add_new_section(
'param-documentation')
documentation_section.style.indent()
documentation_section.include_doc_string(shape.documentation)
self._add_special_trait_documentation(documentation_section, shape)
end_param_section = section.add_new_section('end-param')
end_param_section.style.new_paragraph()
def _add_special_trait_documentation(self, section, shape):
if 'idempotencyToken' in shape.metadata:
self._append_idempotency_documentation(section)
def _append_idempotency_documentation(self, section):
docstring = 'This field is autopopulated if not provided.'
section.write(docstring)
| 9,561 | Python | 42.266968 | 79 | 0.622843 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/docstringparser.py | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.compat import six
class DocStringParser(six.moves.html_parser.HTMLParser):
"""
A simple HTML parser. Focused on converting the subset of HTML
that appears in the documentation strings of the JSON models into
simple ReST format.
"""
def __init__(self, doc):
self.tree = None
self.doc = doc
six.moves.html_parser.HTMLParser.__init__(self)
def reset(self):
six.moves.html_parser.HTMLParser.reset(self)
self.tree = HTMLTree(self.doc)
def feed(self, data):
# HTMLParser is an old style class, so the super() method will not work.
six.moves.html_parser.HTMLParser.feed(self, data)
self.tree.write()
self.tree = HTMLTree(self.doc)
def close(self):
six.moves.html_parser.HTMLParser.close(self)
# Write if there is anything remaining.
self.tree.write()
self.tree = HTMLTree(self.doc)
def handle_starttag(self, tag, attrs):
self.tree.add_tag(tag, attrs=attrs)
def handle_endtag(self, tag):
self.tree.add_tag(tag, is_start=False)
def handle_data(self, data):
self.tree.add_data(data)
class HTMLTree(object):
"""
A tree which handles HTML nodes. Designed to work with a python HTML parser,
meaning that the current_node will be the most recently opened tag. When
a tag is closed, the current_node moves up to the parent node.
"""
def __init__(self, doc):
self.doc = doc
self.head = StemNode()
self.current_node = self.head
self.unhandled_tags = []
def add_tag(self, tag, attrs=None, is_start=True):
if not self._doc_has_handler(tag, is_start):
self.unhandled_tags.append(tag)
return
if is_start:
if tag == 'li':
node = LineItemNode(attrs)
else:
node = TagNode(tag, attrs)
self.current_node.add_child(node)
self.current_node = node
else:
self.current_node = self.current_node.parent
def _doc_has_handler(self, tag, is_start):
if is_start:
handler_name = 'start_%s' % tag
else:
handler_name = 'end_%s' % tag
return hasattr(self.doc.style, handler_name)
def add_data(self, data):
self.current_node.add_child(DataNode(data))
def write(self):
self.head.write(self.doc)
class Node(object):
def __init__(self, parent=None):
self.parent = parent
def write(self, doc):
raise NotImplementedError
class StemNode(Node):
def __init__(self, parent=None):
super(StemNode, self).__init__(parent)
self.children = []
def add_child(self, child):
child.parent = self
self.children.append(child)
def write(self, doc):
self._write_children(doc)
def _write_children(self, doc):
for child in self.children:
child.write(doc)
class TagNode(StemNode):
"""
A generic Tag node. It will verify that handlers exist before writing.
"""
def __init__(self, tag, attrs=None, parent=None):
super(TagNode, self).__init__(parent)
self.attrs = attrs
self.tag = tag
def write(self, doc):
self._write_start(doc)
self._write_children(doc)
self._write_end(doc)
def _write_start(self, doc):
handler_name = 'start_%s' % self.tag
if hasattr(doc.style, handler_name):
getattr(doc.style, handler_name)(self.attrs)
def _write_end(self, doc):
handler_name = 'end_%s' % self.tag
if hasattr(doc.style, handler_name):
getattr(doc.style, handler_name)()
class LineItemNode(TagNode):
def __init__(self, attrs=None, parent=None):
super(LineItemNode, self).__init__('li', attrs, parent)
def write(self, doc):
self._lstrip(self)
super(LineItemNode, self).write(doc)
def _lstrip(self, node):
"""
Traverses the tree, stripping out whitespace until text data is found
:param node: The node to strip
:return: True if non-whitespace data was found, False otherwise
"""
for child in node.children:
if isinstance(child, DataNode):
child.lstrip()
if child.data:
return True
else:
found = self._lstrip(child)
if found:
return True
return False
class DataNode(Node):
"""
A Node that contains only string data.
"""
def __init__(self, data, parent=None):
super(DataNode, self).__init__(parent)
if not isinstance(data, six.string_types):
raise ValueError("Expecting string type, %s given." % type(data))
self.data = data
def lstrip(self):
self.data = self.data.lstrip()
def write(self, doc):
if not self.data:
return
if self.data.isspace():
str_data = ' '
else:
end_space = self.data[-1].isspace()
words = self.data.split()
words = doc.translate_words(words)
str_data = ' '.join(words)
if end_space:
str_data += ' '
doc.handle_data(str_data)
| 5,889 | Python | 28.303482 | 80 | 0.591272 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/style.py | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
logger = logging.getLogger('bcdocs')
class BaseStyle(object):
def __init__(self, doc, indent_width=2):
self.doc = doc
self.indent_width = indent_width
self._indent = 0
self.keep_data = True
@property
def indentation(self):
return self._indent
@indentation.setter
def indentation(self, value):
self._indent = value
def new_paragraph(self):
return '\n%s' % self.spaces()
def indent(self):
self._indent += 1
def dedent(self):
if self._indent > 0:
self._indent -= 1
def spaces(self):
return ' ' * (self._indent * self.indent_width)
def bold(self, s):
return s
def ref(self, link, title=None):
return link
def h2(self, s):
return s
def h3(self, s):
return s
def underline(self, s):
return s
def italics(self, s):
return s
class ReSTStyle(BaseStyle):
def __init__(self, doc, indent_width=2):
BaseStyle.__init__(self, doc, indent_width)
self.do_p = True
self.a_href = None
self.list_depth = 0
def new_paragraph(self):
self.doc.write('\n\n%s' % self.spaces())
def new_line(self):
self.doc.write('\n%s' % self.spaces())
def _start_inline(self, markup):
self.doc.write(markup)
def _end_inline(self, markup):
# Sometimes the HTML markup has whitespace between the end
# of the text inside the inline markup and the closing element
# (e.g. <b>foobar </b>). This trailing space will cause
# problems in the ReST inline markup so we remove it here
# by popping the last item written off the stack, striping
# the whitespace and then pushing it back on the stack.
last_write = self.doc.pop_write().rstrip(' ')
# Sometimes, for whatever reason, a tag like <b/> is present. This
# is problematic because if we simply translate that directly then
# we end up with something like ****, which rst will assume is a
# heading instead of an empty bold.
if last_write == markup:
return
self.doc.push_write(last_write)
self.doc.write(markup + ' ')
def start_bold(self, attrs=None):
self._start_inline('**')
def end_bold(self):
self._end_inline('**')
def start_b(self, attrs=None):
self.doc.do_translation = True
self.start_bold(attrs)
def end_b(self):
self.doc.do_translation = False
self.end_bold()
def bold(self, s):
if s:
self.start_bold()
self.doc.write(s)
self.end_bold()
def ref(self, title, link=None):
if link is None:
link = title
self.doc.write(':doc:`%s <%s>`' % (title, link))
def _heading(self, s, border_char):
border = border_char * len(s)
self.new_paragraph()
self.doc.write('%s\n%s\n%s' % (border, s, border))
self.new_paragraph()
def h1(self, s):
self._heading(s, '*')
def h2(self, s):
self._heading(s, '=')
def h3(self, s):
self._heading(s, '-')
def start_italics(self, attrs=None):
self._start_inline('*')
def end_italics(self):
self._end_inline('*')
def italics(self, s):
if s:
self.start_italics()
self.doc.write(s)
self.end_italics()
def start_p(self, attrs=None):
if self.do_p:
self.doc.write('\n\n%s' % self.spaces())
def end_p(self):
if self.do_p:
self.doc.write('\n\n%s' % self.spaces())
def start_code(self, attrs=None):
self.doc.do_translation = True
self._start_inline('``')
def end_code(self):
self.doc.do_translation = False
self._end_inline('``')
def code(self, s):
if s:
self.start_code()
self.doc.write(s)
self.end_code()
def start_note(self, attrs=None):
self.new_paragraph()
self.doc.write('.. note::')
self.indent()
self.new_paragraph()
def end_note(self):
self.dedent()
self.new_paragraph()
def start_important(self, attrs=None):
self.new_paragraph()
self.doc.write('.. warning::')
self.indent()
self.new_paragraph()
def end_important(self):
self.dedent()
self.new_paragraph()
def start_danger(self, attrs=None):
self.new_paragraph()
self.doc.write('.. danger::')
self.indent()
self.new_paragraph()
def end_danger(self):
self.dedent()
self.new_paragraph()
def start_a(self, attrs=None):
if attrs:
for attr_key, attr_value in attrs:
if attr_key == 'href':
self.a_href = attr_value
self.doc.write('`')
else:
# There are some model documentation that
# looks like this: <a>DescribeInstances</a>.
# In this case we just write out an empty
# string.
self.doc.write(' ')
self.doc.do_translation = True
def link_target_definition(self, refname, link):
self.doc.writeln('.. _%s: %s' % (refname, link))
def sphinx_reference_label(self, label, text=None):
if text is None:
text = label
if self.doc.target == 'html':
self.doc.write(':ref:`%s <%s>`' % (text, label))
else:
self.doc.write(text)
def end_a(self):
self.doc.do_translation = False
if self.a_href:
last_write = self.doc.pop_write()
last_write = last_write.rstrip(' ')
if last_write and last_write != '`':
if ':' in last_write:
last_write = last_write.replace(':', r'\:')
self.doc.push_write(last_write)
self.doc.push_write(' <%s>`__' % self.a_href)
elif last_write == '`':
# Look at start_a(). It will do a self.doc.write('`')
# which is the start of the link title. If that is the
# case then there was no link text. We should just
# use an inline link. The syntax of this is
# `<http://url>`_
self.doc.push_write('`<%s>`__' % self.a_href)
else:
self.doc.push_write(self.a_href)
self.doc.hrefs[self.a_href] = self.a_href
self.doc.write('`__')
self.a_href = None
self.doc.write(' ')
def start_i(self, attrs=None):
self.doc.do_translation = True
self.start_italics()
def end_i(self):
self.doc.do_translation = False
self.end_italics()
def start_li(self, attrs=None):
self.new_line()
self.do_p = False
self.doc.write('* ')
def end_li(self):
self.do_p = True
self.new_line()
def li(self, s):
if s:
self.start_li()
self.doc.writeln(s)
self.end_li()
def start_ul(self, attrs=None):
if self.list_depth != 0:
self.indent()
self.list_depth += 1
self.new_paragraph()
def end_ul(self):
self.list_depth -= 1
if self.list_depth != 0:
self.dedent()
self.new_paragraph()
def start_ol(self, attrs=None):
# TODO: Need to control the bullets used for LI items
if self.list_depth != 0:
self.indent()
self.list_depth += 1
self.new_paragraph()
def end_ol(self):
self.list_depth -= 1
if self.list_depth != 0:
self.dedent()
self.new_paragraph()
def start_examples(self, attrs=None):
self.doc.keep_data = False
def end_examples(self):
self.doc.keep_data = True
def start_fullname(self, attrs=None):
self.doc.keep_data = False
def end_fullname(self):
self.doc.keep_data = True
def start_codeblock(self, attrs=None):
self.doc.write('::')
self.indent()
self.new_paragraph()
def end_codeblock(self):
self.dedent()
self.new_paragraph()
def codeblock(self, code):
"""
Literal code blocks are introduced by ending a paragraph with
the special marker ::. The literal block must be indented
(and, like all paragraphs, separated from the surrounding
ones by blank lines).
"""
self.start_codeblock()
self.doc.writeln(code)
self.end_codeblock()
def toctree(self):
if self.doc.target == 'html':
self.doc.write('\n.. toctree::\n')
self.doc.write(' :maxdepth: 1\n')
self.doc.write(' :titlesonly:\n\n')
else:
self.start_ul()
def tocitem(self, item, file_name=None):
if self.doc.target == 'man':
self.li(item)
else:
if file_name:
self.doc.writeln(' %s' % file_name)
else:
self.doc.writeln(' %s' % item)
def hidden_toctree(self):
if self.doc.target == 'html':
self.doc.write('\n.. toctree::\n')
self.doc.write(' :maxdepth: 1\n')
self.doc.write(' :hidden:\n\n')
def hidden_tocitem(self, item):
if self.doc.target == 'html':
self.tocitem(item)
def table_of_contents(self, title=None, depth=None):
self.doc.write('.. contents:: ')
if title is not None:
self.doc.writeln(title)
if depth is not None:
self.doc.writeln(' :depth: %s' % depth)
def start_sphinx_py_class(self, class_name):
self.new_paragraph()
self.doc.write('.. py:class:: %s' % class_name)
self.indent()
self.new_paragraph()
def end_sphinx_py_class(self):
self.dedent()
self.new_paragraph()
def start_sphinx_py_method(self, method_name, parameters=None):
self.new_paragraph()
content = '.. py:method:: %s' % method_name
if parameters is not None:
content += '(%s)' % parameters
self.doc.write(content)
self.indent()
self.new_paragraph()
def end_sphinx_py_method(self):
self.dedent()
self.new_paragraph()
def start_sphinx_py_attr(self, attr_name):
self.new_paragraph()
self.doc.write('.. py:attribute:: %s' % attr_name)
self.indent()
self.new_paragraph()
def end_sphinx_py_attr(self):
self.dedent()
self.new_paragraph()
def write_py_doc_string(self, docstring):
docstring_lines = docstring.splitlines()
for docstring_line in docstring_lines:
self.doc.writeln(docstring_line)
def external_link(self, title, link):
if self.doc.target == 'html':
self.doc.write('`%s <%s>`_' % (title, link))
else:
self.doc.write(title)
def internal_link(self, title, page):
if self.doc.target == 'html':
self.doc.write(':doc:`%s <%s>`' % (title, page))
else:
self.doc.write(title)
| 11,833 | Python | 27.243437 | 78 | 0.542381 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/__init__.py | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
__version__ = '0.16.0'
| 588 | Python | 41.071426 | 73 | 0.741497 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/restdoc.py | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from botocore.compat import OrderedDict
from botocore.docs.bcdoc.docstringparser import DocStringParser
from botocore.docs.bcdoc.style import ReSTStyle
LOG = logging.getLogger('bcdocs')
class ReSTDocument(object):
def __init__(self, target='man'):
self.style = ReSTStyle(self)
self.target = target
self.parser = DocStringParser(self)
self.keep_data = True
self.do_translation = False
self.translation_map = {}
self.hrefs = {}
self._writes = []
self._last_doc_string = None
def _write(self, s):
if self.keep_data and s is not None:
self._writes.append(s)
def write(self, content):
"""
Write content into the document.
"""
self._write(content)
def writeln(self, content):
"""
Write content on a newline.
"""
self._write('%s%s\n' % (self.style.spaces(), content))
def peek_write(self):
"""
Returns the last content written to the document without
removing it from the stack.
"""
return self._writes[-1]
def pop_write(self):
"""
Removes and returns the last content written to the stack.
"""
return self._writes.pop()
def push_write(self, s):
"""
Places new content on the stack.
"""
self._writes.append(s)
def getvalue(self):
"""
Returns the current content of the document as a string.
"""
if self.hrefs:
self.style.new_paragraph()
for refname, link in self.hrefs.items():
self.style.link_target_definition(refname, link)
return ''.join(self._writes).encode('utf-8')
def translate_words(self, words):
return [self.translation_map.get(w, w) for w in words]
def handle_data(self, data):
if data and self.keep_data:
self._write(data)
def include_doc_string(self, doc_string):
if doc_string:
try:
start = len(self._writes)
self.parser.feed(doc_string)
self.parser.close()
end = len(self._writes)
self._last_doc_string = (start, end)
except Exception:
LOG.debug('Error parsing doc string', exc_info=True)
LOG.debug(doc_string)
def remove_last_doc_string(self):
# Removes all writes inserted by last doc string
if self._last_doc_string is not None:
start, end = self._last_doc_string
del self._writes[start:end]
class DocumentStructure(ReSTDocument):
def __init__(self, name, section_names=None, target='man', context=None):
"""Provides a Hierarichial structure to a ReSTDocument
You can write to it similiar to as you can to a ReSTDocument but
has an innate structure for more orginaztion and abstraction.
:param name: The name of the document
:param section_names: A list of sections to be included
in the document.
:param target: The target documentation of the Document structure
:param context: A dictionary of data to store with the strucuture. These
are only stored per section not the entire structure.
"""
super(DocumentStructure, self).__init__(target=target)
self._name = name
self._structure = OrderedDict()
self._path = [self._name]
self._context = {}
if context is not None:
self._context = context
if section_names is not None:
self._generate_structure(section_names)
@property
def name(self):
"""The name of the document structure"""
return self._name
@property
def path(self):
"""
A list of where to find a particular document structure in the
overlying document structure.
"""
return self._path
@path.setter
def path(self, value):
self._path = value
@property
def available_sections(self):
return list(self._structure)
@property
def context(self):
return self._context
def _generate_structure(self, section_names):
for section_name in section_names:
self.add_new_section(section_name)
def add_new_section(self, name, context=None):
"""Adds a new section to the current document structure
This document structure will be considered a section to the
current document structure but will in itself be an entirely
new document structure that can be written to and have sections
as well
:param name: The name of the section.
:param context: A dictionary of data to store with the strucuture. These
are only stored per section not the entire structure.
:rtype: DocumentStructure
:returns: A new document structure to add to but lives as a section
to the document structure it was instantiated from.
"""
# Add a new section
section = self.__class__(name=name, target=self.target,
context=context)
section.path = self.path + [name]
# Indent the section apporpriately as well
section.style.indentation = self.style.indentation
section.translation_map = self.translation_map
section.hrefs = self.hrefs
self._structure[name] = section
return section
def get_section(self, name):
"""Retrieve a section"""
return self._structure[name]
def delete_section(self, name):
"""Delete a section"""
del self._structure[name]
def flush_structure(self):
"""Flushes a doc structure to a ReSTructed string
The document is flushed out in a DFS style where sections and their
subsections' values are added to the string as they are visited.
"""
# We are at the root flush the links at the beginning of the
# document
if len(self.path) == 1:
if self.hrefs:
self.style.new_paragraph()
for refname, link in self.hrefs.items():
self.style.link_target_definition(refname, link)
value = self.getvalue()
for name, section in self._structure.items():
value += section.flush_structure()
return value
def getvalue(self):
return ''.join(self._writes).encode('utf-8')
def remove_all_sections(self):
self._structure = OrderedDict()
def clear_text(self):
self._writes = []
| 7,226 | Python | 32 | 80 | 0.608912 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/vengine_gen.py | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, os
import types
from . import model
from .error import VerificationError
class VGenericEngine(object):
_class_key = 'g'
_gen_python_module = False
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self.export_symbols = []
self._struct_pending_verification = {}
def patch_extension_kwds(self, kwds):
# add 'export_symbols' to the dictionary. Note that we add the
# list before filling it. When we fill it, it will thus also show
# up in kwds['export_symbols'].
kwds.setdefault('export_symbols', self.export_symbols)
def find_module(self, module_name, path, so_suffixes):
for so_suffix in so_suffixes:
basename = module_name + so_suffix
if path is None:
path = sys.path
for dirname in path:
filename = os.path.join(dirname, basename)
if os.path.isfile(filename):
return filename
def collect_types(self):
pass # not needed in the generic engine
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self):
prnt = self._prnt
# first paste some standard set of lines that are mostly '#include'
prnt(cffimod_header)
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
#
# call generate_gen_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate('decl')
#
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
if sys.platform == 'win32':
if sys.version_info >= (3,):
prefix = 'PyInit_'
else:
prefix = 'init'
modname = self.verifier.get_module_name()
prnt("void %s%s(void) { }\n" % (prefix, modname))
def load_library(self, flags=0):
# import it with the CFFI backend
backend = self.ffi._backend
# needs to make a path that contains '/', on Posix
filename = os.path.join(os.curdir, self.verifier.modulefilename)
module = backend.load_library(filename, flags)
#
# call loading_gen_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
# build the FFILibrary class and instance, this is a module subclass
# because modules are expected to have usually-constant-attributes and
# in PyPy this means the JIT is able to treat attributes as constant,
# which we want.
class FFILibrary(types.ModuleType):
_cffi_generic_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir
library = FFILibrary("")
#
# finally, call the loaded_gen_xxx() functions. This will set
# up the 'library' object.
self._load(module, 'loaded', library=library)
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_gen_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_gen_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
# typedefs: generates no code so far
_generate_gen_typedef_decl = _generate_nothing
_loading_gen_typedef = _loaded_noop
_loaded_gen_typedef = _loaded_noop
# ----------
# function declarations
def _generate_gen_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no _cffi_f_%s wrapper)
self._generate_gen_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
argnames = []
for i, type in enumerate(tp.args):
indirection = ''
if isinstance(type, model.StructOrUnion):
indirection = '*'
argnames.append('%sx%d' % (indirection, i))
context = 'argument of %s' % name
arglist = [type.get_c_name(' %s' % arg, context)
for type, arg in zip(tp.args, argnames)]
tpresult = tp.result
if isinstance(tpresult, model.StructOrUnion):
arglist.insert(0, tpresult.get_c_name(' *r', context))
tpresult = model.void_type
arglist = ', '.join(arglist) or 'void'
wrappername = '_cffi_f_%s' % name
self.export_symbols.append(wrappername)
if tp.abi:
abi = tp.abi + ' '
else:
abi = ''
funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist)
context = 'result of %s' % name
prnt(tpresult.get_c_name(funcdecl, context))
prnt('{')
#
if isinstance(tp.result, model.StructOrUnion):
result_code = '*r = '
elif not isinstance(tp.result, model.VoidType):
result_code = 'return '
else:
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames)))
prnt('}')
prnt()
_loading_gen_function = _loaded_noop
def _loaded_gen_function(self, tp, name, module, library):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
newfunction = self._load_constant(False, tp, name, module)
else:
indirections = []
base_tp = tp
if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args)
or isinstance(tp.result, model.StructOrUnion)):
indirect_args = []
for i, typ in enumerate(tp.args):
if isinstance(typ, model.StructOrUnion):
typ = model.PointerType(typ)
indirections.append((i, typ))
indirect_args.append(typ)
indirect_result = tp.result
if isinstance(indirect_result, model.StructOrUnion):
if indirect_result.fldtypes is None:
raise TypeError("'%s' is used as result type, "
"but is opaque" % (
indirect_result._get_c_name(),))
indirect_result = model.PointerType(indirect_result)
indirect_args.insert(0, indirect_result)
indirections.insert(0, ("result", indirect_result))
indirect_result = model.void_type
tp = model.FunctionPtrType(tuple(indirect_args),
indirect_result, tp.ellipsis)
BFunc = self.ffi._get_cached_btype(tp)
wrappername = '_cffi_f_%s' % name
newfunction = module.load_function(BFunc, wrappername)
for i, typ in indirections:
newfunction = self._make_struct_wrapper(newfunction, i, typ,
base_tp)
setattr(library, name, newfunction)
type(library)._cffi_dir.append(name)
def _make_struct_wrapper(self, oldfunc, i, tp, base_tp):
backend = self.ffi._backend
BType = self.ffi._get_cached_btype(tp)
if i == "result":
ffi = self.ffi
def newfunc(*args):
res = ffi.new(BType)
oldfunc(res, *args)
return res[0]
else:
def newfunc(*args):
args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:]
return oldfunc(*args)
newfunc._cffi_base_type = base_tp
return newfunc
# ----------
# named structs
def _generate_gen_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _loading_gen_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_gen_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_gen_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _loading_gen_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_gen_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
self.export_symbols.append(layoutfuncname)
prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static intptr_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' return nums[i];')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0]
function = module.load_function(BFunc, layoutfuncname)
layout = []
num = 0
while True:
x = function(num)
if x < 0: break
layout.append(x)
num += 1
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_gen_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_gen_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _loading_gen_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_gen_enum(tp, name, module, '')
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_gen_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_gen_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_gen_const(self, is_int, name, tp=None, category='const',
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
self.export_symbols.append(funcname)
if check_value is not None:
assert is_int
assert category == 'const'
prnt('int %s(char *out_error)' % funcname)
prnt('{')
self._check_int_constant_value(name, check_value)
prnt(' return 0;')
prnt('}')
elif is_int:
assert category == 'const'
prnt('int %s(long long *out_value)' % funcname)
prnt('{')
prnt(' *out_value = (long long)(%s);' % (name,))
prnt(' return (%s) <= 0;' % (name,))
prnt('}')
else:
assert tp is not None
assert check_value is None
if category == 'var':
ampersand = '&'
else:
ampersand = ''
extra = ''
if category == 'const' and isinstance(tp, model.StructOrUnion):
extra = 'const *'
ampersand = '&'
prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name))
prnt('{')
prnt(' return (%s%s);' % (ampersand, name))
prnt('}')
prnt()
def _generate_gen_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_gen_const(is_int, name, tp)
_loading_gen_constant = _loaded_noop
def _load_constant(self, is_int, tp, name, module, check_value=None):
funcname = '_cffi_const_%s' % name
if check_value is not None:
assert is_int
self._load_known_int_constant(module, funcname)
value = check_value
elif is_int:
BType = self.ffi._typeof_locked("long long*")[0]
BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0]
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType)
negative = function(p)
value = int(p[0])
if value < 0 and not negative:
BLongLong = self.ffi._typeof_locked("long long")[0]
value += (1 << (8*self.ffi.sizeof(BLongLong)))
else:
assert check_value is None
fntypeextra = '(*)(void)'
if isinstance(tp, model.StructOrUnion):
fntypeextra = '*' + fntypeextra
BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0]
function = module.load_function(BFunc, funcname)
value = function()
if isinstance(tp, model.StructOrUnion):
value = value[0]
return value
def _loaded_gen_constant(self, tp, name, module, library):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
value = self._load_constant(is_int, tp, name, module)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
# ----------
# enums
def _check_int_constant_value(self, name, value):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' sprintf(buf, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' %
name)
prnt(' sprintf(out_error, "%s has the real value %s, not %s",')
prnt(' "%s", buf, "%d");' % (name[:100], value))
prnt(' return -1;')
prnt(' }')
def _load_known_int_constant(self, module, funcname):
BType = self.ffi._typeof_locked("char[]")[0]
BFunc = self.ffi._typeof_locked("int(*)(char*)")[0]
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType, 256)
if function(p) < 0:
error = self.ffi.string(p)
if sys.version_info >= (3,):
error = str(error, 'utf-8')
raise VerificationError(error)
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_gen_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_gen_const(True, enumerator)
return
#
funcname = self._enum_funcname(prefix, name)
self.export_symbols.append(funcname)
prnt = self._prnt
prnt('int %s(char *out_error)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue)
prnt(' return 0;')
prnt('}')
prnt()
def _loading_gen_enum(self, tp, name, module, prefix='enum'):
if tp.partial:
enumvalues = [self._load_constant(True, tp, enumerator, module)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
else:
funcname = self._enum_funcname(prefix, name)
self._load_known_int_constant(module, funcname)
def _loaded_gen_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
type(library)._cffi_dir.append(enumerator)
# ----------
# macros: for now only for integers
def _generate_gen_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_gen_const(True, name, check_value=check_value)
_loading_gen_macro = _loaded_noop
def _loaded_gen_macro(self, tp, name, module, library):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
value = self._load_constant(True, tp, name, module,
check_value=check_value)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
# ----------
# global variables
def _generate_gen_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
if tp.length_is_unknown():
prnt = self._prnt
funcname = '_cffi_sizeof_%s' % (name,)
self.export_symbols.append(funcname)
prnt("size_t %s(void)" % funcname)
prnt("{")
prnt(" return sizeof(%s);" % (name,))
prnt("}")
tp_ptr = model.PointerType(tp.item)
self._generate_gen_const(False, name, tp_ptr)
else:
tp_ptr = model.PointerType(tp)
self._generate_gen_const(False, name, tp_ptr, category='var')
_loading_gen_variable = _loaded_noop
def _loaded_gen_variable(self, tp, name, module, library):
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length_is_unknown():
funcname = '_cffi_sizeof_%s' % (name,)
BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0]
function = module.load_function(BFunc, funcname)
size = function()
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
tp_ptr = model.PointerType(tp.item)
value = self._load_constant(False, tp_ptr, name, module)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
funcname = '_cffi_var_%s' % name
BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0]
function = module.load_function(BFunc, funcname)
ptr = function()
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
cffimod_header = r'''
#include <stdio.h>
#include <stddef.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/types.h> /* XXX for ssize_t on some platforms */
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
'''
| 26,684 | Python | 38.474852 | 80 | 0.532716 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/pkgconfig.py | # pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi
import sys, os, subprocess
from .error import PkgConfigError
def merge_flags(cfg1, cfg2):
"""Merge values from cffi config flags cfg2 to cf1
Example:
merge_flags({"libraries": ["one"]}, {"libraries": ["two"]})
{"libraries": ["one", "two"]}
"""
for key, value in cfg2.items():
if key not in cfg1:
cfg1[key] = value
else:
if not isinstance(cfg1[key], list):
raise TypeError("cfg1[%r] should be a list of strings" % (key,))
if not isinstance(value, list):
raise TypeError("cfg2[%r] should be a list of strings" % (key,))
cfg1[key].extend(value)
return cfg1
def call(libname, flag, encoding=sys.getfilesystemencoding()):
"""Calls pkg-config and returns the output if found
"""
a = ["pkg-config", "--print-errors"]
a.append(flag)
a.append(libname)
try:
pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except EnvironmentError as e:
raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),))
bout, berr = pc.communicate()
if pc.returncode != 0:
try:
berr = berr.decode(encoding)
except Exception:
pass
raise PkgConfigError(berr.strip())
if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x
try:
bout = bout.decode(encoding)
except UnicodeDecodeError:
raise PkgConfigError("pkg-config %s %s returned bytes that cannot "
"be decoded with encoding %r:\n%r" %
(flag, libname, encoding, bout))
if os.altsep != '\\' and '\\' in bout:
raise PkgConfigError("pkg-config %s %s returned an unsupported "
"backslash-escaped output:\n%r" %
(flag, libname, bout))
return bout
def flags_from_pkgconfig(libs):
r"""Return compiler line flags for FFI.set_source based on pkg-config output
Usage
...
ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"])
If pkg-config is installed on build machine, then arguments include_dirs,
library_dirs, libraries, define_macros, extra_compile_args and
extra_link_args are extended with an output of pkg-config for libfoo and
libbar.
Raises PkgConfigError in case the pkg-config call fails.
"""
def get_include_dirs(string):
return [x[2:] for x in string.split() if x.startswith("-I")]
def get_library_dirs(string):
return [x[2:] for x in string.split() if x.startswith("-L")]
def get_libraries(string):
return [x[2:] for x in string.split() if x.startswith("-l")]
# convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils
def get_macros(string):
def _macro(x):
x = x[2:] # drop "-D"
if '=' in x:
return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar")
else:
return (x, None) # "-Dfoo" => ("foo", None)
return [_macro(x) for x in string.split() if x.startswith("-D")]
def get_other_cflags(string):
return [x for x in string.split() if not x.startswith("-I") and
not x.startswith("-D")]
def get_other_libs(string):
return [x for x in string.split() if not x.startswith("-L") and
not x.startswith("-l")]
# return kwargs for given libname
def kwargs(libname):
fse = sys.getfilesystemencoding()
all_cflags = call(libname, "--cflags")
all_libs = call(libname, "--libs")
return {
"include_dirs": get_include_dirs(all_cflags),
"library_dirs": get_library_dirs(all_libs),
"libraries": get_libraries(all_libs),
"define_macros": get_macros(all_cflags),
"extra_compile_args": get_other_cflags(all_cflags),
"extra_link_args": get_other_libs(all_libs),
}
# merge all arguments together
ret = {}
for libname in libs:
lib_flags = kwargs(libname)
merge_flags(ret, lib_flags)
return ret
| 4,374 | Python | 34.860655 | 88 | 0.562414 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/_cffi_include.h | #define _CFFI_
/* We try to define Py_LIMITED_API before including Python.h.
Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and
Py_REF_DEBUG are not defined. This is a best-effort approximation:
we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
the same works for the other two macros. Py_DEBUG implies them,
but not the other way around.
The implementation is messy (issue #350): on Windows, with _MSC_VER,
we have to define Py_LIMITED_API even before including pyconfig.h.
In that case, we guess what pyconfig.h will do to the macros above,
and check our guess after the #include.
Note that on Windows, with CPython 3.x, you need >= 3.5 and virtualenv
version >= 16.0.0. With older versions of either, you don't get a
copy of PYTHON3.DLL in the virtualenv. We can't check the version of
CPython *before* we even include pyconfig.h. ffi.set_source() puts
a ``#define _CFFI_NO_LIMITED_API'' at the start of this file if it is
running on Windows < 3.5, as an attempt at fixing it, but that's
arguably wrong because it may not be the target version of Python.
Still better than nothing I guess. As another workaround, you can
remove the definition of Py_LIMITED_API here.
See also 'py_limited_api' in cffi/setuptools_ext.py.
*/
#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API)
# ifdef _MSC_VER
# if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API)
# define Py_LIMITED_API
# endif
# include <pyconfig.h>
/* sanity-check: Py_LIMITED_API will cause crashes if any of these
are also defined. Normally, the Python file PC/pyconfig.h does not
cause any of these to be defined, with the exception that _DEBUG
causes Py_DEBUG. Double-check that. */
# ifdef Py_LIMITED_API
# if defined(Py_DEBUG)
# error "pyconfig.h unexpectedly defines Py_DEBUG, but Py_LIMITED_API is set"
# endif
# if defined(Py_TRACE_REFS)
# error "pyconfig.h unexpectedly defines Py_TRACE_REFS, but Py_LIMITED_API is set"
# endif
# if defined(Py_REF_DEBUG)
# error "pyconfig.h unexpectedly defines Py_REF_DEBUG, but Py_LIMITED_API is set"
# endif
# endif
# else
# include <pyconfig.h>
# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API)
# define Py_LIMITED_API
# endif
# endif
#endif
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#include "parse_c_type.h"
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
#ifdef __GNUC__
# define _CFFI_UNUSED_FN __attribute__((unused))
#else
# define _CFFI_UNUSED_FN /* nothing */
#endif
#ifdef __cplusplus
# ifndef _Bool
typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_from_c__Bool PyBool_FromLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, struct _cffi_ctypedescr *))_cffi_exports[11])
#define _cffi_get_struct_layout \
not used any more
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(struct _cffi_ctypedescr *, \
PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[24])
#define _CFFI_CPIDX 25
#define _cffi_call_python \
((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX])
#define _cffi_to_c_wchar3216_t \
((int(*)(PyObject *))_cffi_exports[26])
#define _cffi_from_c_wchar3216_t \
((PyObject *(*)(int))_cffi_exports[27])
#define _CFFI_NUM_EXPORTS 28
struct _cffi_ctypedescr;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
#define _cffi_type(index) ( \
assert((((uintptr_t)_cffi_types[index]) & 1) == 0), \
(struct _cffi_ctypedescr *)_cffi_types[index])
static PyObject *_cffi_init(const char *module_name, Py_ssize_t version,
const struct _cffi_type_context_s *ctx)
{
PyObject *module, *o_arg, *new_module;
void *raw[] = {
(void *)module_name,
(void *)version,
(void *)_cffi_exports,
(void *)ctx,
};
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
o_arg = PyLong_FromVoidPtr((void *)raw);
if (o_arg == NULL)
goto failure;
new_module = PyObject_CallMethod(
module, (char *)"_init_cffi_1_0_external_module", (char *)"O", o_arg);
Py_DECREF(o_arg);
Py_DECREF(module);
return new_module;
failure:
Py_XDECREF(module);
return NULL;
}
#ifdef HAVE_WCHAR_H
typedef wchar_t _cffi_wchar_t;
#else
typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */
#endif
_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o)
{
if (sizeof(_cffi_wchar_t) == 2)
return (uint16_t)_cffi_to_c_wchar_t(o);
else
return (uint16_t)_cffi_to_c_wchar3216_t(o);
}
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x)
{
if (sizeof(_cffi_wchar_t) == 2)
return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t((int)x);
}
_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o)
{
if (sizeof(_cffi_wchar_t) == 4)
return (int)_cffi_to_c_wchar_t(o);
else
return (int)_cffi_to_c_wchar3216_t(o);
}
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(unsigned int x)
{
if (sizeof(_cffi_wchar_t) == 4)
return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t((int)x);
}
union _cffi_union_alignment_u {
unsigned char m_char;
unsigned short m_short;
unsigned int m_int;
unsigned long m_long;
unsigned long long m_longlong;
float m_float;
double m_double;
long double m_longdouble;
};
struct _cffi_freeme_s {
struct _cffi_freeme_s *next;
union _cffi_union_alignment_u alignment;
};
_CFFI_UNUSED_FN static int
_cffi_convert_array_argument(struct _cffi_ctypedescr *ctptr, PyObject *arg,
char **output_data, Py_ssize_t datasize,
struct _cffi_freeme_s **freeme)
{
char *p;
if (datasize < 0)
return -1;
p = *output_data;
if (p == NULL) {
struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc(
offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize);
if (fp == NULL)
return -1;
fp->next = *freeme;
*freeme = fp;
p = *output_data = (char *)&fp->alignment;
}
memset((void *)p, 0, (size_t)datasize);
return _cffi_convert_array_from_object(p, ctptr, arg);
}
_CFFI_UNUSED_FN static void
_cffi_free_array_arguments(struct _cffi_freeme_s *freeme)
{
do {
void *p = (void *)freeme;
freeme = freeme->next;
PyObject_Free(p);
} while (freeme != NULL);
}
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *);
# define _cffi_call_python _cffi_call_python_org
#endif
#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0]))
#define _cffi_prim_int(size, sign) \
((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \
(size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \
(size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \
(size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \
_CFFI__UNKNOWN_PRIM)
#define _cffi_prim_float(size) \
((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \
(size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \
(size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \
_CFFI__UNKNOWN_FLOAT_PRIM)
#define _cffi_check_int(got, got_nonpos, expected) \
((got_nonpos) == (expected <= 0) && \
(got) == (unsigned long long)expected)
#ifdef MS_WIN32
# define _cffi_stdcall __stdcall
#else
# define _cffi_stdcall /* nothing */
#endif
#ifdef __cplusplus
}
#endif
| 14,800 | C | 37.344559 | 133 | 0.53473 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/cparser.py | from . import model
from .commontypes import COMMON_TYPES, resolve_common_type
from .error import FFIError, CDefError
try:
from . import _pycparser as pycparser
except ImportError:
import pycparser
import weakref, re, sys
try:
if sys.version_info < (3,):
import thread as _thread
else:
import _thread
lock = _thread.allocate_lock()
except ImportError:
lock = None
def _workaround_for_static_import_finders():
# Issue #392: packaging tools like cx_Freeze can not find these
# because pycparser uses exec dynamic import. This is an obscure
# workaround. This function is never called.
import pycparser.yacctab
import pycparser.lextab
CDEF_SOURCE_STRING = "<cdef source string>"
_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$",
re.DOTALL | re.MULTILINE)
_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)"
r"\b((?:[^\n\\]|\\.)*?)$",
re.DOTALL | re.MULTILINE)
_r_line_directive = re.compile(r"^[ \t]*#[ \t]*(?:line|\d+)\b.*$", re.MULTILINE)
_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}")
_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$")
_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]")
_r_words = re.compile(r"\w+|\S")
_parser_cache = None
_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE)
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
_r_extern_python = re.compile(r'\bextern\s*"'
r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+"
r"\.\.\.")
_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.")
def _get_parser():
global _parser_cache
if _parser_cache is None:
_parser_cache = pycparser.CParser()
return _parser_cache
def _workaround_for_old_pycparser(csource):
# Workaround for a pycparser issue (fixed between pycparser 2.10 and
# 2.14): "char*const***" gives us a wrong syntax tree, the same as
# for "char***(*const)". This means we can't tell the difference
# afterwards. But "char(*const(***))" gives us the right syntax
# tree. The issue only occurs if there are several stars in
# sequence with no parenthesis inbetween, just possibly qualifiers.
# Attempt to fix it by adding some parentheses in the source: each
# time we see "* const" or "* const *", we add an opening
# parenthesis before each star---the hard part is figuring out where
# to close them.
parts = []
while True:
match = _r_star_const_space.search(csource)
if not match:
break
#print repr(''.join(parts)+csource), '=>',
parts.append(csource[:match.start()])
parts.append('('); closing = ')'
parts.append(match.group()) # e.g. "* const "
endpos = match.end()
if csource.startswith('*', endpos):
parts.append('('); closing += ')'
level = 0
i = endpos
while i < len(csource):
c = csource[i]
if c == '(':
level += 1
elif c == ')':
if level == 0:
break
level -= 1
elif c in ',;=':
if level == 0:
break
i += 1
csource = csource[endpos:i] + closing + csource[i:]
#print repr(''.join(parts)+csource)
parts.append(csource)
return ''.join(parts)
def _preprocess_extern_python(csource):
# input: `extern "Python" int foo(int);` or
# `extern "Python" { int foo(int); }`
# output:
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
#
# input: `extern "Python+C" int foo(int);`
# output:
# void __cffi_extern_python_plus_c_start;
# int foo(int);
# void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
if not match:
break
endpos = match.end() - 1
#print
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
if 'C' in match.group(1):
parts.append('void __cffi_extern_python_plus_c_start; ')
else:
parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
if closing < 0:
raise CDefError("'extern \"Python\" {': no '}' found")
if csource.find('{', endpos + 1, closing) >= 0:
raise NotImplementedError("cannot use { } inside a block "
"'extern \"Python\" { ... }'")
parts.append(csource[endpos+1:closing])
csource = csource[closing+1:]
else:
# non-grouping variant
semicolon = csource.find(';', endpos)
if semicolon < 0:
raise CDefError("'extern \"Python\": no ';' found")
parts.append(csource[endpos:semicolon+1])
csource = csource[semicolon+1:]
parts.append(' void __cffi_extern_python_stop;')
#print ''.join(parts)+csource
#print
parts.append(csource)
return ''.join(parts)
def _warn_for_string_literal(csource):
if '"' not in csource:
return
for line in csource.splitlines():
if '"' in line and not line.lstrip().startswith('#'):
import warnings
warnings.warn("String literal found in cdef() or type source. "
"String literals are ignored here, but you should "
"remove them anyway because some character sequences "
"confuse pre-parsing.")
break
def _warn_for_non_extern_non_static_global_variable(decl):
if not decl.storage:
import warnings
warnings.warn("Global variable '%s' in cdef(): for consistency "
"with C it should have a storage class specifier "
"(usually 'extern')" % (decl.name,))
def _remove_line_directives(csource):
# _r_line_directive matches whole lines, without the final \n, if they
# start with '#line' with some spacing allowed, or '#NUMBER'. This
# function stores them away and replaces them with exactly the string
# '#line@N', where N is the index in the list 'line_directives'.
line_directives = []
def replace(m):
i = len(line_directives)
line_directives.append(m.group())
return '#line@%d' % i
csource = _r_line_directive.sub(replace, csource)
return csource, line_directives
def _put_back_line_directives(csource, line_directives):
def replace(m):
s = m.group()
if not s.startswith('#line@'):
raise AssertionError("unexpected #line directive "
"(should have been processed and removed")
return line_directives[int(s[6:])]
return _r_line_directive.sub(replace, csource)
def _preprocess(csource):
# First, remove the lines of the form '#line N "filename"' because
# the "filename" part could confuse the rest
csource, line_directives = _remove_line_directives(csource)
# Remove comments. NOTE: this only work because the cdef() section
# should not contain any string literals (except in line directives)!
def replace_keeping_newlines(m):
return ' ' + m.group().count('\n') * '\n'
csource = _r_comment.sub(replace_keeping_newlines, csource)
# Remove the "#define FOO x" lines
macros = {}
for match in _r_define.finditer(csource):
macroname, macrovalue = match.groups()
macrovalue = macrovalue.replace('\\\n', '').strip()
macros[macroname] = macrovalue
csource = _r_define.sub('', csource)
#
if pycparser.__version__ < '2.14':
csource = _workaround_for_old_pycparser(csource)
#
# BIG HACK: replace WINAPI or __stdcall with "volatile const".
# It doesn't make sense for the return type of a function to be
# "volatile volatile const", so we abuse it to detect __stdcall...
# Hack number 2 is that "int(volatile *fptr)();" is not valid C
# syntax, so we place the "volatile" before the opening parenthesis.
csource = _r_stdcall2.sub(' volatile volatile const(', csource)
csource = _r_stdcall1.sub(' volatile volatile const ', csource)
csource = _r_cdecl.sub(' ', csource)
#
# Replace `extern "Python"` with start/end markers
csource = _preprocess_extern_python(csource)
#
# Now there should not be any string literal left; warn if we get one
_warn_for_string_literal(csource)
#
# Replace "[...]" with "[__dotdotdotarray__]"
csource = _r_partial_array.sub('[__dotdotdotarray__]', csource)
#
# Replace "...}" with "__dotdotdotNUM__}". This construction should
# occur only at the end of enums; at the end of structs we have "...;}"
# and at the end of vararg functions "...);". Also replace "=...[,}]"
# with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when
# giving an unknown value.
matches = list(_r_partial_enum.finditer(csource))
for number, match in enumerate(reversed(matches)):
p = match.start()
if csource[p] == '=':
p2 = csource.find('...', p, match.end())
assert p2 > p
csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number,
csource[p2+3:])
else:
assert csource[p:p+3] == '...'
csource = '%s __dotdotdot%d__ %s' % (csource[:p], number,
csource[p+3:])
# Replace "int ..." or "unsigned long int..." with "__dotdotdotint__"
csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource)
# Replace "float ..." or "double..." with "__dotdotdotfloat__"
csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource)
# Replace all remaining "..." with the same name, "__dotdotdot__",
# which is declared with a typedef for the purpose of C parsing.
csource = csource.replace('...', ' __dotdotdot__ ')
# Finally, put back the line directives
csource = _put_back_line_directives(csource, line_directives)
return csource, macros
def _common_type_names(csource):
# Look in the source for what looks like usages of types from the
# list of common types. A "usage" is approximated here as the
# appearance of the word, minus a "definition" of the type, which
# is the last word in a "typedef" statement. Approximative only
# but should be fine for all the common types.
look_for_words = set(COMMON_TYPES)
look_for_words.add(';')
look_for_words.add(',')
look_for_words.add('(')
look_for_words.add(')')
look_for_words.add('typedef')
words_used = set()
is_typedef = False
paren = 0
previous_word = ''
for word in _r_words.findall(csource):
if word in look_for_words:
if word == ';':
if is_typedef:
words_used.discard(previous_word)
look_for_words.discard(previous_word)
is_typedef = False
elif word == 'typedef':
is_typedef = True
paren = 0
elif word == '(':
paren += 1
elif word == ')':
paren -= 1
elif word == ',':
if is_typedef and paren == 0:
words_used.discard(previous_word)
look_for_words.discard(previous_word)
else: # word in COMMON_TYPES
words_used.add(word)
previous_word = word
return words_used
class Parser(object):
def __init__(self):
self._declarations = {}
self._included_declarations = set()
self._anonymous_counter = 0
self._structnode2type = weakref.WeakKeyDictionary()
self._options = {}
self._int_constants = {}
self._recomplete = []
self._uses_new_feature = None
def _parse(self, csource):
csource, macros = _preprocess(csource)
# XXX: for more efficiency we would need to poke into the
# internals of CParser... the following registers the
# typedefs, because their presence or absence influences the
# parsing itself (but what they are typedef'ed to plays no role)
ctn = _common_type_names(csource)
typenames = []
for name in sorted(self._declarations):
if name.startswith('typedef '):
name = name[8:]
typenames.append(name)
ctn.discard(name)
typenames += sorted(ctn)
#
csourcelines = []
csourcelines.append('# 1 "<cdef automatic initialization code>"')
for typename in typenames:
csourcelines.append('typedef int %s;' % typename)
csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,'
' __dotdotdot__;')
# this forces pycparser to consider the following in the file
# called <cdef source string> from line 1
csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,))
csourcelines.append(csource)
fullcsource = '\n'.join(csourcelines)
if lock is not None:
lock.acquire() # pycparser is not thread-safe...
try:
ast = _get_parser().parse(fullcsource)
except pycparser.c_parser.ParseError as e:
self.convert_pycparser_error(e, csource)
finally:
if lock is not None:
lock.release()
# csource will be used to find buggy source text
return ast, macros, csource
def _convert_pycparser_error(self, e, csource):
# xxx look for "<cdef source string>:NUM:" at the start of str(e)
# and interpret that as a line number. This will not work if
# the user gives explicit ``# NUM "FILE"`` directives.
line = None
msg = str(e)
match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg)
if match:
linenum = int(match.group(1), 10)
csourcelines = csource.splitlines()
if 1 <= linenum <= len(csourcelines):
line = csourcelines[linenum-1]
return line
def convert_pycparser_error(self, e, csource):
line = self._convert_pycparser_error(e, csource)
msg = str(e)
if line:
msg = 'cannot parse "%s"\n%s' % (line.strip(), msg)
else:
msg = 'parse error\n%s' % (msg,)
raise CDefError(msg)
def parse(self, csource, override=False, packed=False, pack=None,
dllexport=False):
if packed:
if packed != True:
raise ValueError("'packed' should be False or True; use "
"'pack' to give another value")
if pack:
raise ValueError("cannot give both 'pack' and 'packed'")
pack = 1
elif pack:
if pack & (pack - 1):
raise ValueError("'pack' must be a power of two, not %r" %
(pack,))
else:
pack = 0
prev_options = self._options
try:
self._options = {'override': override,
'packed': pack,
'dllexport': dllexport}
self._internal_parse(csource)
finally:
self._options = prev_options
def _internal_parse(self, csource):
ast, macros, csource = self._parse(csource)
# add the macros
self._process_macros(macros)
# find the first "__dotdotdot__" and use that as a separator
# between the repeated typedefs and the real csource
iterator = iter(ast.ext)
for decl in iterator:
if decl.name == '__dotdotdot__':
break
else:
assert 0
current_decl = None
#
try:
self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
current_decl = decl
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
elif isinstance(decl, pycparser.c_ast.Typedef):
if not decl.name:
raise CDefError("typedef does not declare any name",
decl)
quals = 0
if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and
decl.type.type.names[-1].startswith('__dotdotdot')):
realtype = self._get_unknown_type(decl)
elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and
isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and
isinstance(decl.type.type.type,
pycparser.c_ast.IdentifierType) and
decl.type.type.type.names[-1].startswith('__dotdotdot')):
realtype = self._get_unknown_ptr_type(decl)
else:
realtype, quals = self._get_type_and_quals(
decl.type, name=decl.name, partial_length_ok=True,
typedef_example="*(%s *)0" % (decl.name,))
self._declare('typedef ' + decl.name, realtype, quals=quals)
elif decl.__class__.__name__ == 'Pragma':
pass # skip pragma, only in pycparser 2.15
else:
raise CDefError("unexpected <%s>: this construct is valid "
"C but not valid in cdef()" %
decl.__class__.__name__, decl)
except CDefError as e:
if len(e.args) == 1:
e.args = e.args + (current_decl,)
raise
except FFIError as e:
msg = self._convert_pycparser_error(e, csource)
if msg:
e.args = (e.args[0] + "\n *** Err: %s" % msg,)
raise
def _add_constants(self, key, val):
if key in self._int_constants:
if self._int_constants[key] == val:
return # ignore identical double declarations
raise FFIError(
"multiple declarations of constant: %s" % (key,))
self._int_constants[key] = val
def _add_integer_constant(self, name, int_str):
int_str = int_str.lower().rstrip("ul")
neg = int_str.startswith('-')
if neg:
int_str = int_str[1:]
# "010" is not valid oct in py3
if (int_str.startswith("0") and int_str != '0'
and not int_str.startswith("0x")):
int_str = "0o" + int_str[1:]
pyvalue = int(int_str, 0)
if neg:
pyvalue = -pyvalue
self._add_constants(name, pyvalue)
self._declare('macro ' + name, pyvalue)
def _process_macros(self, macros):
for key, value in macros.items():
value = value.strip()
if _r_int_literal.match(value):
self._add_integer_constant(key, value)
elif value == '...':
self._declare('macro ' + key, value)
else:
raise CDefError(
'only supports one of the following syntax:\n'
' #define %s ... (literally dot-dot-dot)\n'
' #define %s NUMBER (with NUMBER an integer'
' constant, decimal/hex/octal)\n'
'got:\n'
' #define %s %s'
% (key, key, key, value))
def _declare_function(self, tp, quals, decl):
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
def _parse_decl(self, decl):
node = decl.type
if isinstance(node, pycparser.c_ast.FuncDecl):
tp, quals = self._get_type_and_quals(node, name=decl.name)
assert isinstance(tp, model.RawFunctionType)
self._declare_function(tp, quals, decl)
else:
if isinstance(node, pycparser.c_ast.Struct):
self._get_struct_union_enum_type('struct', node)
elif isinstance(node, pycparser.c_ast.Union):
self._get_struct_union_enum_type('union', node)
elif isinstance(node, pycparser.c_ast.Enum):
self._get_struct_union_enum_type('enum', node)
elif not decl.name:
raise CDefError("construct does not declare any variable",
decl)
#
if decl.name:
tp, quals = self._get_type_and_quals(node,
partial_length_ok=True)
if tp.is_raw_function:
self._declare_function(tp, quals, decl)
elif (tp.is_integer_type() and
hasattr(decl, 'init') and
hasattr(decl.init, 'value') and
_r_int_literal.match(decl.init.value)):
self._add_integer_constant(decl.name, decl.init.value)
elif (tp.is_integer_type() and
isinstance(decl.init, pycparser.c_ast.UnaryOp) and
decl.init.op == '-' and
hasattr(decl.init.expr, 'value') and
_r_int_literal.match(decl.init.expr.value)):
self._add_integer_constant(decl.name,
'-' + decl.init.expr.value)
elif (tp is model.void_type and
decl.name.startswith('__cffi_extern_python_')):
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
self._inside_extern_python = decl.name
else:
if self._inside_extern_python !='__cffi_extern_python_stop':
raise CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
if (quals & model.Q_CONST) and not tp.is_array_type:
self._declare('constant ' + decl.name, tp, quals=quals)
else:
_warn_for_non_extern_non_static_global_variable(decl)
self._declare('variable ' + decl.name, tp, quals=quals)
def parse_type(self, cdecl):
return self.parse_type_and_quals(cdecl)[0]
def parse_type_and_quals(self, cdecl):
ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2]
assert not macros
exprnode = ast.ext[-1].type.args.params[0]
if isinstance(exprnode, pycparser.c_ast.ID):
raise CDefError("unknown identifier '%s'" % (exprnode.name,))
return self._get_type_and_quals(exprnode.type)
def _declare(self, name, obj, included=False, quals=0):
if name in self._declarations:
prevobj, prevquals = self._declarations[name]
if prevobj is obj and prevquals == quals:
return
if not self._options.get('override'):
raise FFIError(
"multiple declarations of %s (for interactive usage, "
"try cdef(xx, override=True))" % (name,))
assert '__dotdotdot__' not in name.split()
self._declarations[name] = (obj, quals)
if included:
self._included_declarations.add(obj)
def _extract_quals(self, type):
quals = 0
if isinstance(type, (pycparser.c_ast.TypeDecl,
pycparser.c_ast.PtrDecl)):
if 'const' in type.quals:
quals |= model.Q_CONST
if 'volatile' in type.quals:
quals |= model.Q_VOLATILE
if 'restrict' in type.quals:
quals |= model.Q_RESTRICT
return quals
def _get_type_pointer(self, type, quals, declname=None):
if isinstance(type, model.RawFunctionType):
return type.as_function_pointer()
if (isinstance(type, model.StructOrUnionOrEnum) and
type.name.startswith('$') and type.name[1:].isdigit() and
type.forcename is None and declname is not None):
return model.NamedPointerType(type, declname, quals)
return model.PointerType(type, quals)
def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False,
typedef_example=None):
# first, dereference typedefs, if we have it already parsed, we're good
if (isinstance(typenode, pycparser.c_ast.TypeDecl) and
isinstance(typenode.type, pycparser.c_ast.IdentifierType) and
len(typenode.type.names) == 1 and
('typedef ' + typenode.type.names[0]) in self._declarations):
tp, quals = self._declarations['typedef ' + typenode.type.names[0]]
quals |= self._extract_quals(typenode)
return tp, quals
#
if isinstance(typenode, pycparser.c_ast.ArrayDecl):
# array type
if typenode.dim is None:
length = None
else:
length = self._parse_constant(
typenode.dim, partial_length_ok=partial_length_ok)
# a hack: in 'typedef int foo_t[...][...];', don't use '...' as
# the length but use directly the C expression that would be
# generated by recompiler.py. This lets the typedef be used in
# many more places within recompiler.py
if typedef_example is not None:
if length == '...':
length = '_cffi_array_len(%s)' % (typedef_example,)
typedef_example = "*" + typedef_example
#
tp, quals = self._get_type_and_quals(typenode.type,
partial_length_ok=partial_length_ok,
typedef_example=typedef_example)
return model.ArrayType(tp, length), quals
#
if isinstance(typenode, pycparser.c_ast.PtrDecl):
# pointer type
itemtype, itemquals = self._get_type_and_quals(typenode.type)
tp = self._get_type_pointer(itemtype, itemquals, declname=name)
quals = self._extract_quals(typenode)
return tp, quals
#
if isinstance(typenode, pycparser.c_ast.TypeDecl):
quals = self._extract_quals(typenode)
type = typenode.type
if isinstance(type, pycparser.c_ast.IdentifierType):
# assume a primitive type. get it from .names, but reduce
# synonyms to a single chosen combination
names = list(type.names)
if names != ['signed', 'char']: # keep this unmodified
prefixes = {}
while names:
name = names[0]
if name in ('short', 'long', 'signed', 'unsigned'):
prefixes[name] = prefixes.get(name, 0) + 1
del names[0]
else:
break
# ignore the 'signed' prefix below, and reorder the others
newnames = []
for prefix in ('unsigned', 'short', 'long'):
for i in range(prefixes.get(prefix, 0)):
newnames.append(prefix)
if not names:
names = ['int'] # implicitly
if names == ['int']: # but kill it if 'short' or 'long'
if 'short' in prefixes or 'long' in prefixes:
names = []
names = newnames + names
ident = ' '.join(names)
if ident == 'void':
return model.void_type, quals
if ident == '__dotdotdot__':
raise FFIError(':%d: bad usage of "..."' %
typenode.coord.line)
tp0, quals0 = resolve_common_type(self, ident)
return tp0, (quals | quals0)
#
if isinstance(type, pycparser.c_ast.Struct):
# 'struct foobar'
tp = self._get_struct_union_enum_type('struct', type, name)
return tp, quals
#
if isinstance(type, pycparser.c_ast.Union):
# 'union foobar'
tp = self._get_struct_union_enum_type('union', type, name)
return tp, quals
#
if isinstance(type, pycparser.c_ast.Enum):
# 'enum foobar'
tp = self._get_struct_union_enum_type('enum', type, name)
return tp, quals
#
if isinstance(typenode, pycparser.c_ast.FuncDecl):
# a function type
return self._parse_function_type(typenode, name), 0
#
# nested anonymous structs or unions end up here
if isinstance(typenode, pycparser.c_ast.Struct):
return self._get_struct_union_enum_type('struct', typenode, name,
nested=True), 0
if isinstance(typenode, pycparser.c_ast.Union):
return self._get_struct_union_enum_type('union', typenode, name,
nested=True), 0
#
raise FFIError(":%d: bad or unsupported type declaration" %
typenode.coord.line)
def _parse_function_type(self, typenode, funcname=None):
params = list(getattr(typenode.args, 'params', []))
for i, arg in enumerate(params):
if not hasattr(arg, 'type'):
raise CDefError("%s arg %d: unknown type '%s'"
" (if you meant to use the old C syntax of giving"
" untyped arguments, it is not supported)"
% (funcname or 'in expression', i + 1,
getattr(arg, 'name', '?')))
ellipsis = (
len(params) > 0 and
isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and
isinstance(params[-1].type.type,
pycparser.c_ast.IdentifierType) and
params[-1].type.type.names == ['__dotdotdot__'])
if ellipsis:
params.pop()
if not params:
raise CDefError(
"%s: a function with only '(...)' as argument"
" is not correct C" % (funcname or 'in expression'))
args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type))
for argdeclnode in params]
if not ellipsis and args == [model.void_type]:
args = []
result, quals = self._get_type_and_quals(typenode.type)
# the 'quals' on the result type are ignored. HACK: we absure them
# to detect __stdcall functions: we textually replace "__stdcall"
# with "volatile volatile const" above.
abi = None
if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway
if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']:
abi = '__stdcall'
return model.RawFunctionType(tuple(args), result, ellipsis, abi)
def _as_func_arg(self, type, quals):
if isinstance(type, model.ArrayType):
return model.PointerType(type.item, quals)
elif isinstance(type, model.RawFunctionType):
return type.as_function_pointer()
else:
return type
def _get_struct_union_enum_type(self, kind, type, name=None, nested=False):
# First, a level of caching on the exact 'type' node of the AST.
# This is obscure, but needed because pycparser "unrolls" declarations
# such as "typedef struct { } foo_t, *foo_p" and we end up with
# an AST that is not a tree, but a DAG, with the "type" node of the
# two branches foo_t and foo_p of the trees being the same node.
# It's a bit silly but detecting "DAG-ness" in the AST tree seems
# to be the only way to distinguish this case from two independent
# structs. See test_struct_with_two_usages.
try:
return self._structnode2type[type]
except KeyError:
pass
#
# Note that this must handle parsing "struct foo" any number of
# times and always return the same StructType object. Additionally,
# one of these times (not necessarily the first), the fields of
# the struct can be specified with "struct foo { ...fields... }".
# If no name is given, then we have to create a new anonymous struct
# with no caching; in this case, the fields are either specified
# right now or never.
#
force_name = name
name = type.name
#
# get the type or create it if needed
if name is None:
# 'force_name' is used to guess a more readable name for
# anonymous structs, for the common case "typedef struct { } foo".
if force_name is not None:
explicit_name = '$%s' % force_name
else:
self._anonymous_counter += 1
explicit_name = '$%d' % self._anonymous_counter
tp = None
else:
explicit_name = name
key = '%s %s' % (kind, name)
tp, _ = self._declarations.get(key, (None, None))
#
if tp is None:
if kind == 'struct':
tp = model.StructType(explicit_name, None, None, None)
elif kind == 'union':
tp = model.UnionType(explicit_name, None, None, None)
elif kind == 'enum':
if explicit_name == '__dotdotdot__':
raise CDefError("Enums cannot be declared with ...")
tp = self._build_enum_type(explicit_name, type.values)
else:
raise AssertionError("kind = %r" % (kind,))
if name is not None:
self._declare(key, tp)
else:
if kind == 'enum' and type.values is not None:
raise NotImplementedError(
"enum %s: the '{}' declaration should appear on the first "
"time the enum is mentioned, not later" % explicit_name)
if not tp.forcename:
tp.force_the_name(force_name)
if tp.forcename and '$' in tp.name:
self._declare('anonymous %s' % tp.forcename, tp)
#
self._structnode2type[type] = tp
#
# enums: done here
if kind == 'enum':
return tp
#
# is there a 'type.decls'? If yes, then this is the place in the
# C sources that declare the fields. If no, then just return the
# existing type, possibly still incomplete.
if type.decls is None:
return tp
#
if tp.fldnames is not None:
raise CDefError("duplicate declaration of struct %s" % name)
fldnames = []
fldtypes = []
fldbitsize = []
fldquals = []
for decl in type.decls:
if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and
''.join(decl.type.names) == '__dotdotdot__'):
# XXX pycparser is inconsistent: 'names' should be a list
# of strings, but is sometimes just one string. Use
# str.join() as a way to cope with both.
self._make_partial(tp, nested)
continue
if decl.bitsize is None:
bitsize = -1
else:
bitsize = self._parse_constant(decl.bitsize)
self._partial_length = False
type, fqual = self._get_type_and_quals(decl.type,
partial_length_ok=True)
if self._partial_length:
self._make_partial(tp, nested)
if isinstance(type, model.StructType) and type.partial:
self._make_partial(tp, nested)
fldnames.append(decl.name or '')
fldtypes.append(type)
fldbitsize.append(bitsize)
fldquals.append(fqual)
tp.fldnames = tuple(fldnames)
tp.fldtypes = tuple(fldtypes)
tp.fldbitsize = tuple(fldbitsize)
tp.fldquals = tuple(fldquals)
if fldbitsize != [-1] * len(fldbitsize):
if isinstance(tp, model.StructType) and tp.partial:
raise NotImplementedError("%s: using both bitfields and '...;'"
% (tp,))
tp.packed = self._options.get('packed')
if tp.completed: # must be re-completed: it is not opaque any more
tp.completed = 0
self._recomplete.append(tp)
return tp
def _make_partial(self, tp, nested):
if not isinstance(tp, model.StructOrUnion):
raise CDefError("%s cannot be partial" % (tp,))
if not tp.has_c_name() and not nested:
raise NotImplementedError("%s is partial but has no C name" %(tp,))
tp.partial = True
def _parse_constant(self, exprnode, partial_length_ok=False):
# for now, limited to expressions that are an immediate number
# or positive/negative number
if isinstance(exprnode, pycparser.c_ast.Constant):
s = exprnode.value
if '0' <= s[0] <= '9':
s = s.rstrip('uUlL')
try:
if s.startswith('0'):
return int(s, 8)
else:
return int(s, 10)
except ValueError:
if len(s) > 1:
if s.lower()[0:2] == '0x':
return int(s, 16)
elif s.lower()[0:2] == '0b':
return int(s, 2)
raise CDefError("invalid constant %r" % (s,))
elif s[0] == "'" and s[-1] == "'" and (
len(s) == 3 or (len(s) == 4 and s[1] == "\\")):
return ord(s[-2])
else:
raise CDefError("invalid constant %r" % (s,))
#
if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and
exprnode.op == '+'):
return self._parse_constant(exprnode.expr)
#
if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and
exprnode.op == '-'):
return -self._parse_constant(exprnode.expr)
# load previously defined int constant
if (isinstance(exprnode, pycparser.c_ast.ID) and
exprnode.name in self._int_constants):
return self._int_constants[exprnode.name]
#
if (isinstance(exprnode, pycparser.c_ast.ID) and
exprnode.name == '__dotdotdotarray__'):
if partial_length_ok:
self._partial_length = True
return '...'
raise FFIError(":%d: unsupported '[...]' here, cannot derive "
"the actual array length in this context"
% exprnode.coord.line)
#
if isinstance(exprnode, pycparser.c_ast.BinaryOp):
left = self._parse_constant(exprnode.left)
right = self._parse_constant(exprnode.right)
if exprnode.op == '+':
return left + right
elif exprnode.op == '-':
return left - right
elif exprnode.op == '*':
return left * right
elif exprnode.op == '/':
return self._c_div(left, right)
elif exprnode.op == '%':
return left - self._c_div(left, right) * right
elif exprnode.op == '<<':
return left << right
elif exprnode.op == '>>':
return left >> right
elif exprnode.op == '&':
return left & right
elif exprnode.op == '|':
return left | right
elif exprnode.op == '^':
return left ^ right
#
raise FFIError(":%d: unsupported expression: expected a "
"simple numeric constant" % exprnode.coord.line)
def _c_div(self, a, b):
result = a // b
if ((a < 0) ^ (b < 0)) and (a % b) != 0:
result += 1
return result
def _build_enum_type(self, explicit_name, decls):
if decls is not None:
partial = False
enumerators = []
enumvalues = []
nextenumvalue = 0
for enum in decls.enumerators:
if _r_enum_dotdotdot.match(enum.name):
partial = True
continue
if enum.value is not None:
nextenumvalue = self._parse_constant(enum.value)
enumerators.append(enum.name)
enumvalues.append(nextenumvalue)
self._add_constants(enum.name, nextenumvalue)
nextenumvalue += 1
enumerators = tuple(enumerators)
enumvalues = tuple(enumvalues)
tp = model.EnumType(explicit_name, enumerators, enumvalues)
tp.partial = partial
else: # opaque enum
tp = model.EnumType(explicit_name, (), ())
return tp
def include(self, other):
for name, (tp, quals) in other._declarations.items():
if name.startswith('anonymous $enum_$'):
continue # fix for test_anonymous_enum_include
kind = name.split(' ', 1)[0]
if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'):
self._declare(name, tp, included=True, quals=quals)
for k, v in other._int_constants.items():
self._add_constants(k, v)
def _get_unknown_type(self, decl):
typenames = decl.type.type.names
if typenames == ['__dotdotdot__']:
return model.unknown_type(decl.name)
if typenames == ['__dotdotdotint__']:
if self._uses_new_feature is None:
self._uses_new_feature = "'typedef int... %s'" % decl.name
return model.UnknownIntegerType(decl.name)
if typenames == ['__dotdotdotfloat__']:
# note: not for 'long double' so far
if self._uses_new_feature is None:
self._uses_new_feature = "'typedef float... %s'" % decl.name
return model.UnknownFloatType(decl.name)
raise FFIError(':%d: unsupported usage of "..." in typedef'
% decl.coord.line)
def _get_unknown_ptr_type(self, decl):
if decl.type.type.type.names == ['__dotdotdot__']:
return model.unknown_ptr_type(decl.name)
raise FFIError(':%d: unsupported usage of "..." in typedef'
% decl.coord.line)
| 44,231 | Python | 42.924528 | 86 | 0.525989 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/recompiler.py | import os, sys, io
from . import ffiplatform, model
from .error import VerificationError
from .cffi_opcode import *
VERSION_BASE = 0x2601
VERSION_EMBEDDED = 0x2701
VERSION_CHAR16CHAR32 = 0x2801
USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or
sys.version_info >= (3, 5))
class GlobalExpr:
def __init__(self, name, address, type_op, size=0, check_value=0):
self.name = name
self.address = address
self.type_op = type_op
self.size = size
self.check_value = check_value
def as_c_expr(self):
return ' { "%s", (void *)%s, %s, (void *)%s },' % (
self.name, self.address, self.type_op.as_c_expr(), self.size)
def as_python_expr(self):
return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name,
self.check_value)
class FieldExpr:
def __init__(self, name, field_offset, field_size, fbitsize, field_type_op):
self.name = name
self.field_offset = field_offset
self.field_size = field_size
self.fbitsize = fbitsize
self.field_type_op = field_type_op
def as_c_expr(self):
spaces = " " * len(self.name)
return (' { "%s", %s,\n' % (self.name, self.field_offset) +
' %s %s,\n' % (spaces, self.field_size) +
' %s %s },' % (spaces, self.field_type_op.as_c_expr()))
def as_python_expr(self):
raise NotImplementedError
def as_field_python_expr(self):
if self.field_type_op.op == OP_NOOP:
size_expr = ''
elif self.field_type_op.op == OP_BITFIELD:
size_expr = format_four_bytes(self.fbitsize)
else:
raise NotImplementedError
return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(),
size_expr,
self.name)
class StructUnionExpr:
def __init__(self, name, type_index, flags, size, alignment, comment,
first_field_index, c_fields):
self.name = name
self.type_index = type_index
self.flags = flags
self.size = size
self.alignment = alignment
self.comment = comment
self.first_field_index = first_field_index
self.c_fields = c_fields
def as_c_expr(self):
return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags)
+ '\n %s, %s, ' % (self.size, self.alignment)
+ '%d, %d ' % (self.first_field_index, len(self.c_fields))
+ ('/* %s */ ' % self.comment if self.comment else '')
+ '},')
def as_python_expr(self):
flags = eval(self.flags, G_FLAGS)
fields_expr = [c_field.as_field_python_expr()
for c_field in self.c_fields]
return "(b'%s%s%s',%s)" % (
format_four_bytes(self.type_index),
format_four_bytes(flags),
self.name,
','.join(fields_expr))
class EnumExpr:
def __init__(self, name, type_index, size, signed, allenums):
self.name = name
self.type_index = type_index
self.size = size
self.signed = signed
self.allenums = allenums
def as_c_expr(self):
return (' { "%s", %d, _cffi_prim_int(%s, %s),\n'
' "%s" },' % (self.name, self.type_index,
self.size, self.signed, self.allenums))
def as_python_expr(self):
prim_index = {
(1, 0): PRIM_UINT8, (1, 1): PRIM_INT8,
(2, 0): PRIM_UINT16, (2, 1): PRIM_INT16,
(4, 0): PRIM_UINT32, (4, 1): PRIM_INT32,
(8, 0): PRIM_UINT64, (8, 1): PRIM_INT64,
}[self.size, self.signed]
return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index),
format_four_bytes(prim_index),
self.name, self.allenums)
class TypenameExpr:
def __init__(self, name, type_index):
self.name = name
self.type_index = type_index
def as_c_expr(self):
return ' { "%s", %d },' % (self.name, self.type_index)
def as_python_expr(self):
return "b'%s%s'" % (format_four_bytes(self.type_index), self.name)
# ____________________________________________________________
class Recompiler:
_num_externpy = 0
def __init__(self, ffi, module_name, target_is_python=False):
self.ffi = ffi
self.module_name = module_name
self.target_is_python = target_is_python
self._version = VERSION_BASE
def needs_version(self, ver):
self._version = max(self._version, ver)
def collect_type_table(self):
self._typesdict = {}
self._generate("collecttype")
#
all_decls = sorted(self._typesdict, key=str)
#
# prepare all FUNCTION bytecode sequences first
self.cffi_types = []
for tp in all_decls:
if tp.is_raw_function:
assert self._typesdict[tp] is None
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
for tp1 in tp.args:
assert isinstance(tp1, (model.VoidType,
model.BasePrimitiveType,
model.PointerType,
model.StructOrUnionOrEnum,
model.FunctionPtrType))
if self._typesdict[tp1] is None:
self._typesdict[tp1] = len(self.cffi_types)
self.cffi_types.append(tp1) # placeholder
self.cffi_types.append('END') # placeholder
#
# prepare all OTHER bytecode sequences
for tp in all_decls:
if not tp.is_raw_function and self._typesdict[tp] is None:
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
if tp.is_array_type and tp.length is not None:
self.cffi_types.append('LEN') # placeholder
assert None not in self._typesdict.values()
#
# collect all structs and unions and enums
self._struct_unions = {}
self._enums = {}
for tp in all_decls:
if isinstance(tp, model.StructOrUnion):
self._struct_unions[tp] = None
elif isinstance(tp, model.EnumType):
self._enums[tp] = None
for i, tp in enumerate(sorted(self._struct_unions,
key=lambda tp: tp.name)):
self._struct_unions[tp] = i
for i, tp in enumerate(sorted(self._enums,
key=lambda tp: tp.name)):
self._enums[tp] = i
#
# emit all bytecode sequences now
for tp in all_decls:
method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__)
method(tp, self._typesdict[tp])
#
# consistency check
for op in self.cffi_types:
assert isinstance(op, CffiOp)
self.cffi_types = tuple(self.cffi_types) # don't change any more
def _enum_fields(self, tp):
# When producing C, expand all anonymous struct/union fields.
# That's necessary to have C code checking the offsets of the
# individual fields contained in them. When producing Python,
# don't do it and instead write it like it is, with the
# corresponding fields having an empty name. Empty names are
# recognized at runtime when we import the generated Python
# file.
expand_anonymous_struct_union = not self.target_is_python
return tp.enumfields(expand_anonymous_struct_union)
def _do_collect_type(self, tp):
if not isinstance(tp, model.BaseTypeByIdentity):
if isinstance(tp, tuple):
for x in tp:
self._do_collect_type(x)
return
if tp not in self._typesdict:
self._typesdict[tp] = None
if isinstance(tp, model.FunctionPtrType):
self._do_collect_type(tp.as_raw_function())
elif isinstance(tp, model.StructOrUnion):
if tp.fldtypes is not None and (
tp not in self.ffi._parser._included_declarations):
for name1, tp1, _, _ in self._enum_fields(tp):
self._do_collect_type(self._field_type(tp, name1, tp1))
else:
for _, x in tp._get_items():
self._do_collect_type(x)
def _generate(self, step_name):
lst = self.ffi._parser._declarations.items()
for name, (tp, quals) in sorted(lst):
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in recompile(): %r" % name)
try:
self._current_quals = quals
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
# ----------
ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"]
def collect_step_tables(self):
# collect the declarations for '_cffi_globals', '_cffi_typenames', etc.
self._lsts = {}
for step_name in self.ALL_STEPS:
self._lsts[step_name] = []
self._seen_struct_unions = set()
self._generate("ctx")
self._add_missing_struct_unions()
#
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if step_name != "field":
lst.sort(key=lambda entry: entry.name)
self._lsts[step_name] = tuple(lst) # don't change any more
#
# check for a possible internal inconsistency: _cffi_struct_unions
# should have been generated with exactly self._struct_unions
lst = self._lsts["struct_union"]
for tp, i in self._struct_unions.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._struct_unions)
# same with enums
lst = self._lsts["enum"]
for tp, i in self._enums.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._enums)
# ----------
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self, f, preamble):
if self.target_is_python:
assert preamble is None
self.write_py_source_to_f(f)
else:
assert preamble is not None
self.write_c_source_to_f(f, preamble)
def _rel_readlines(self, filename):
g = open(os.path.join(os.path.dirname(__file__), filename), 'r')
lines = g.readlines()
g.close()
return lines
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
if self.ffi._embedding is not None:
prnt('#define _CFFI_USE_EMBEDDING')
if not USE_LIMITED_API:
prnt('#define _CFFI_NO_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
i = lines.index('#include "parse_c_type.h"\n')
lines[i:i+1] = self._rel_readlines('parse_c_type.h')
prnt(''.join(lines))
#
# if we have ffi._embedding != None, we give it here as a macro
# and include an extra file
base_module_name = self.module_name.split('.')[-1]
if self.ffi._embedding is not None:
prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,))
prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {')
self._print_string_literal_in_array(self.ffi._embedding)
prnt('0 };')
prnt('#ifdef PYPY_VERSION')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % (
base_module_name,))
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % (
base_module_name,))
prnt('#else')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % (
base_module_name,))
prnt('#endif')
lines = self._rel_readlines('_embedding.h')
i = lines.index('#include "_cffi_errors.h"\n')
lines[i:i+1] = self._rel_readlines('_cffi_errors.h')
prnt(''.join(lines))
self.needs_version(VERSION_EMBEDDED)
#
# then paste the C source given by the user, verbatim.
prnt('/************************************************************/')
prnt()
prnt(preamble)
prnt()
prnt('/************************************************************/')
prnt()
#
# the declaration of '_cffi_types'
prnt('static void *_cffi_types[] = {')
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
for i, op in enumerate(self.cffi_types):
comment = ''
if i in typeindex2type:
comment = ' // ' + typeindex2type[i]._get_c_name()
prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment))
if not self.cffi_types:
prnt(' 0')
prnt('};')
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._seen_constants = set()
self._generate("decl")
#
# the declaration of '_cffi_globals' and '_cffi_typenames'
nums = {}
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
nums[step_name] = len(lst)
if nums[step_name] > 0:
prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % (
step_name, step_name))
for entry in lst:
prnt(entry.as_c_expr())
prnt('};')
prnt()
#
# the declaration of '_cffi_includes'
if self.ffi._included_ffis:
prnt('static const char * const _cffi_includes[] = {')
for ffi_to_include in self.ffi._included_ffis:
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is None:
raise VerificationError(
"not implemented yet: ffi.include() of a Python-based "
"ffi inside a C-based ffi")
prnt(' "%s",' % (included_module_name,))
prnt(' NULL')
prnt('};')
prnt()
#
# the declaration of '_cffi_type_context'
prnt('static const struct _cffi_type_context_s _cffi_type_context = {')
prnt(' _cffi_types,')
for step_name in self.ALL_STEPS:
if nums[step_name] > 0:
prnt(' _cffi_%ss,' % step_name)
else:
prnt(' NULL, /* no %ss */' % step_name)
for step_name in self.ALL_STEPS:
if step_name != "field":
prnt(' %d, /* num_%ss */' % (nums[step_name], step_name))
if self.ffi._included_ffis:
prnt(' _cffi_includes,')
else:
prnt(' NULL, /* no includes */')
prnt(' %d, /* num_types */' % (len(self.cffi_types),))
flags = 0
if self._num_externpy > 0 or self.ffi._embedding is not None:
flags |= 1 # set to mean that we use extern "Python"
prnt(' %d, /* flags */' % flags)
prnt('};')
prnt()
#
# the init function
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility push(default) /* for -fvisibility= */')
prnt('#endif')
prnt()
prnt('#ifdef PYPY_VERSION')
prnt('PyMODINIT_FUNC')
prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,))
prnt('{')
if flags & 1:
prnt(' if (((intptr_t)p[0]) >= 0x0A03) {')
prnt(' _cffi_call_python_org = '
'(void(*)(struct _cffi_externpy_s *, char *))p[1];')
prnt(' }')
prnt(' p[0] = (const void *)0x%x;' % self._version)
prnt(' p[1] = &_cffi_type_context;')
prnt('#if PY_MAJOR_VERSION >= 3')
prnt(' return NULL;')
prnt('#endif')
prnt('}')
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
prnt('# ifdef _MSC_VER')
prnt(' PyMODINIT_FUNC')
prnt('# if PY_MAJOR_VERSION >= 3')
prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,))
prnt('# else')
prnt(' init%s(void) { }' % (base_module_name,))
prnt('# endif')
prnt('# endif')
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % (base_module_name,))
prnt('{')
prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
self.module_name, self._version))
prnt('}')
prnt('#else')
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % (base_module_name,))
prnt('{')
prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
self.module_name, self._version))
prnt('}')
prnt('#endif')
prnt()
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility pop')
prnt('#endif')
self._version = None
def _to_py(self, x):
if isinstance(x, str):
return "b'%s'" % (x,)
if isinstance(x, (list, tuple)):
rep = [self._to_py(item) for item in x]
if len(rep) == 1:
rep.append('')
return "(%s)" % (','.join(rep),)
return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp.
def write_py_source_to_f(self, f):
self._f = f
prnt = self._prnt
#
# header
prnt("# auto-generated file")
prnt("import _cffi_backend")
#
# the 'import' of the included ffis
num_includes = len(self.ffi._included_ffis or ())
for i in range(num_includes):
ffi_to_include = self.ffi._included_ffis[i]
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is not None:
raise VerificationError(
"not implemented yet: ffi.include() of a C-based "
"ffi inside a Python-based ffi")
prnt('from %s import ffi as _ffi%d' % (included_module_name, i))
prnt()
prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,))
prnt(" _version = 0x%x," % (self._version,))
self._version = None
#
# the '_types' keyword argument
self.cffi_types = tuple(self.cffi_types) # don't change any more
types_lst = [op.as_python_bytes() for op in self.cffi_types]
prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),))
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
#
# the keyword arguments from ALL_STEPS
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if len(lst) > 0 and step_name != "field":
prnt(' _%ss = %s,' % (step_name, self._to_py(lst)))
#
# the '_includes' keyword argument
if num_includes > 0:
prnt(' _includes = (%s,),' % (
', '.join(['_ffi%d' % i for i in range(num_includes)]),))
#
# the footer
prnt(')')
# ----------
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type():
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
elif isinstance(tp, model.UnknownFloatType):
# don't check with is_float_type(): it may be a 'long
# double' here, and _cffi_to_c_double would loose precision
converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),)
else:
cname = tp.get_c_name('')
converter = '(%s)_cffi_to_c_%s' % (cname,
tp.name.replace(' ', '_'))
if cname in ('char16_t', 'char32_t'):
self.needs_version(VERSION_CHAR16CHAR32)
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif (isinstance(tp, model.StructOrUnionOrEnum) or
isinstance(tp, model.BasePrimitiveType)):
# a struct (not a struct pointer) as a function argument;
# or, a complex (the same code works)
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'(%s)alloca((size_t)datasize) : NULL;' % (
tovar, tp.get_c_name('')))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.BasePrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif isinstance(tp, model.UnknownFloatType):
return '_cffi_from_c_double(%s)' % (var,)
elif tp.name != 'long double' and not tp.is_complex_type():
cname = tp.name.replace(' ', '_')
if cname in ('char16_t', 'char32_t'):
self.needs_version(VERSION_CHAR16CHAR32)
return '_cffi_from_c_%s(%s)' % (cname, var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs
def _typedef_type(self, tp, name):
return self._global_type(tp, "(*(%s *)0)" % (name,))
def _generate_cpy_typedef_collecttype(self, tp, name):
self._do_collect_type(self._typedef_type(tp, name))
def _generate_cpy_typedef_decl(self, tp, name):
pass
def _typedef_ctx(self, tp, name):
type_index = self._typesdict[tp]
self._lsts["typename"].append(TypenameExpr(name, type_index))
def _generate_cpy_typedef_ctx(self, tp, name):
tp = self._typedef_type(tp, name)
self._typedef_ctx(tp, name)
if getattr(tp, "origin", None) == "unknown_type":
self._struct_ctx(tp, tp.name, approxname=None)
elif isinstance(tp, model.NamedPointerType):
self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name,
named_ptr=tp)
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
self._do_collect_type(tp.as_raw_function())
if tp.ellipsis and not self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_function_decl(self, tp, name):
assert not self.target_is_python
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_constant_decl(tp, name)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
#
# ------------------------------
# the 'd' version of the function, only for addressof(lib, 'func')
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arguments.append(type.get_c_name(' x%d' % i, context))
call_arguments.append('x%d' % i)
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
if tp.abi:
abi = tp.abi + ' '
else:
abi = ''
name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments)
prnt('static %s' % (tp.result.get_c_name(name_and_arguments),))
prnt('{')
call_arguments = ', '.join(call_arguments)
result_code = 'return '
if isinstance(tp.result, model.VoidType):
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, call_arguments))
prnt('}')
#
prnt('#ifndef PYPY_VERSION') # ------------------------------
#
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' x%d' % i, context)
prnt(' %s;' % arg)
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
result_decl = ' %s;' % tp.result.get_c_name(' result', context)
prnt(result_decl)
prnt(' PyObject *pyresult;')
else:
result_decl = None
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
name, len(rng), len(rng),
', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
call_arguments = ['x%d' % i for i in range(len(tp.args))]
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' pyresult = %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
for freeline in freelines:
prnt(' ' + freeline)
prnt(' return pyresult;')
else:
for freeline in freelines:
prnt(' ' + freeline)
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
#
prnt('#else') # ------------------------------
#
# the PyPy version: need to replace struct/union arguments with
# pointers, and if the result is a struct/union, insert a first
# arg that is a pointer to the result. We also do that for
# complex args and return type.
def need_indirection(type):
return (isinstance(type, model.StructOrUnion) or
(isinstance(type, model.PrimitiveType) and
type.is_complex_type()))
difference = False
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
indirection = ''
if need_indirection(type):
indirection = '*'
difference = True
arg = type.get_c_name(' %sx%d' % (indirection, i), context)
arguments.append(arg)
call_arguments.append('%sx%d' % (indirection, i))
tp_result = tp.result
if need_indirection(tp_result):
context = 'result of %s' % name
arg = tp_result.get_c_name(' *result', context)
arguments.insert(0, arg)
tp_result = model.void_type
result_decl = None
result_code = '*result = '
difference = True
if difference:
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name,
repr_arguments)
prnt('static %s' % (tp_result.get_c_name(name_and_arguments),))
prnt('{')
if result_decl:
prnt(result_decl)
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
if result_decl:
prnt(' return result;')
prnt('}')
else:
prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name))
#
prnt('#endif') # ------------------------------
prnt()
def _generate_cpy_function_ctx(self, tp, name):
if tp.ellipsis and not self.target_is_python:
self._generate_cpy_constant_ctx(tp, name)
return
type_index = self._typesdict[tp.as_raw_function()]
numargs = len(tp.args)
if self.target_is_python:
meth_kind = OP_DLOPEN_FUNC
elif numargs == 0:
meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS'
elif numargs == 1:
meth_kind = OP_CPYTHON_BLTN_O # 'METH_O'
else:
meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS'
self._lsts["global"].append(
GlobalExpr(name, '_cffi_f_%s' % name,
CffiOp(meth_kind, type_index),
size='_cffi_d_%s' % name))
# ----------
# named structs or unions
def _field_type(self, tp_struct, field_name, tp_field):
if isinstance(tp_field, model.ArrayType):
actual_length = tp_field.length
if actual_length == '...':
ptr_struct_name = tp_struct.get_c_name('*')
actual_length = '_cffi_array_len(((%s)0)->%s)' % (
ptr_struct_name, field_name)
tp_item = self._field_type(tp_struct, '%s[0]' % field_name,
tp_field.item)
tp_field = model.ArrayType(tp_item, actual_length)
return tp_field
def _struct_collecttype(self, tp):
self._do_collect_type(tp)
if self.target_is_python:
# also requires nested anon struct/unions in ABI mode, recursively
for fldtype in tp.anonymous_struct_fields():
self._struct_collecttype(fldtype)
def _struct_decl(self, tp, cname, approxname):
if tp.fldtypes is None:
return
prnt = self._prnt
checkfuncname = '_cffi_checkfld_%s' % (approxname,)
prnt('_CFFI_UNUSED_FN')
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in self._enum_fields(tp):
try:
if ftype.is_integer_type() or fbitsize >= 0:
# accept all integers, but complain on float or double
if fname != '':
prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is "
"an integer */" % (fname, cname, fname))
continue
# only accept exactly the type declared, except that '[]'
# is interpreted as a '*' and so will match any array length.
# (It would also match '*', but that's harder to detect...)
while (isinstance(ftype, model.ArrayType)
and (ftype.length is None or ftype.length == '...')):
ftype = ftype.item
fname = fname + '[0]'
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname))
prnt()
def _struct_ctx(self, tp, cname, approxname, named_ptr=None):
type_index = self._typesdict[tp]
reason_for_not_expanding = None
flags = []
if isinstance(tp, model.UnionType):
flags.append("_CFFI_F_UNION")
if tp.fldtypes is None:
flags.append("_CFFI_F_OPAQUE")
reason_for_not_expanding = "opaque"
if (tp not in self.ffi._parser._included_declarations and
(named_ptr is None or
named_ptr not in self.ffi._parser._included_declarations)):
if tp.fldtypes is None:
pass # opaque
elif tp.partial or any(tp.anonymous_struct_fields()):
pass # field layout obtained silently from the C compiler
else:
flags.append("_CFFI_F_CHECK_FIELDS")
if tp.packed:
if tp.packed > 1:
raise NotImplementedError(
"%r is declared with 'pack=%r'; only 0 or 1 are "
"supported in API mode (try to use \"...;\", which "
"does not require a 'pack' declaration)" %
(tp, tp.packed))
flags.append("_CFFI_F_PACKED")
else:
flags.append("_CFFI_F_EXTERNAL")
reason_for_not_expanding = "external"
flags = '|'.join(flags) or '0'
c_fields = []
if reason_for_not_expanding is None:
enumfields = list(self._enum_fields(tp))
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
self._check_not_opaque(fldtype,
"field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
op = OP_BITFIELD
size = '%d /* bits */' % fbitsize
elif cname is None or (
isinstance(fldtype, model.ArrayType) and
fldtype.length is None):
size = '(size_t)-1'
else:
size = 'sizeof(((%s)0)->%s)' % (
tp.get_c_name('*') if named_ptr is None
else named_ptr.name,
fldname)
if cname is None or fbitsize >= 0:
offset = '(size_t)-1'
elif named_ptr is not None:
offset = '((char *)&((%s)0)->%s) - (char *)0' % (
named_ptr.name, fldname)
else:
offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname)
c_fields.append(
FieldExpr(fldname, offset, size, fbitsize,
CffiOp(op, self._typesdict[fldtype])))
first_field_index = len(self._lsts["field"])
self._lsts["field"].extend(c_fields)
#
if cname is None: # unknown name, for _add_missing_struct_unions
size = '(size_t)-2'
align = -2
comment = "unnamed"
else:
if named_ptr is not None:
size = 'sizeof(*(%s)0)' % (named_ptr.name,)
align = '-1 /* unknown alignment */'
else:
size = 'sizeof(%s)' % (cname,)
align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,)
comment = None
else:
size = '(size_t)-1'
align = -1
first_field_index = -1
comment = reason_for_not_expanding
self._lsts["struct_union"].append(
StructUnionExpr(tp.name, type_index, flags, size, align, comment,
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
def _check_not_opaque(self, tp, location):
while isinstance(tp, model.ArrayType):
tp = tp.item
if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
raise TypeError(
"%s is of an opaque type (not declared in cdef())" % location)
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
# not partial (we can't complete or verify them!) and emit them
# anonymously.
lst = list(self._struct_unions.items())
lst.sort(key=lambda tp_order: tp_order[1])
for tp, order in lst:
if tp not in self._seen_struct_unions:
if tp.partial:
raise NotImplementedError("internal inconsistency: %r is "
"partial but was not seen at "
"this point" % (tp,))
if tp.name.startswith('$') and tp.name[1:].isdigit():
approxname = tp.name[1:]
elif tp.name == '_IO_FILE' and tp.forcename == 'FILE':
approxname = 'FILE'
self._typedef_ctx(tp, 'FILE')
else:
raise NotImplementedError("internal inconsistency: %r" %
(tp,))
self._struct_ctx(tp, None, approxname)
def _generate_cpy_struct_collecttype(self, tp, name):
self._struct_collecttype(tp)
_generate_cpy_union_collecttype = _generate_cpy_struct_collecttype
def _struct_names(self, tp):
cname = tp.get_c_name('')
if ' ' in cname:
return cname, cname.replace(' ', '_')
else:
return cname, '_' + cname
def _generate_cpy_struct_decl(self, tp, name):
self._struct_decl(tp, *self._struct_names(tp))
_generate_cpy_union_decl = _generate_cpy_struct_decl
def _generate_cpy_struct_ctx(self, tp, name):
self._struct_ctx(tp, *self._struct_names(tp))
_generate_cpy_union_ctx = _generate_cpy_struct_ctx
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_cpy_anonymous_collecttype(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_collecttype(tp, name)
else:
self._struct_collecttype(tp)
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp)
else:
self._struct_decl(tp, name, 'typedef_' + name)
def _generate_cpy_anonymous_ctx(self, tp, name):
if isinstance(tp, model.EnumType):
self._enum_ctx(tp, name)
else:
self._struct_ctx(tp, name, 'typedef_' + name)
# ----------
# constants, declared with "static const ..."
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
check_value=None):
if (category, name) in self._seen_constants:
raise VerificationError(
"duplicate declaration of %s '%s'" % (category, name))
self._seen_constants.add((category, name))
#
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
if is_int:
prnt('static int %s(unsigned long long *o)' % funcname)
prnt('{')
prnt(' int n = (%s) <= 0;' % (name,))
prnt(' *o = (unsigned long long)((%s) | 0);'
' /* check that %s is an integer */' % (name, name))
if check_value is not None:
if check_value > 0:
check_value = '%dU' % (check_value,)
prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,))
prnt(' n |= 2;')
prnt(' return n;')
prnt('}')
else:
assert check_value is None
prnt('static void %s(char *o)' % funcname)
prnt('{')
prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name))
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = tp.is_integer_type()
if not is_int or self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
def _generate_cpy_constant_ctx(self, tp, name):
if not self.target_is_python and tp.is_integer_type():
type_op = CffiOp(OP_CONSTANT_INT, -1)
else:
if self.target_is_python:
const_kind = OP_DLOPEN_CONST
else:
const_kind = OP_CONSTANT
type_index = self._typesdict[tp]
type_op = CffiOp(const_kind, type_index)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op))
# ----------
# enums
def _generate_cpy_enum_collecttype(self, tp, name):
self._do_collect_type(tp)
def _generate_cpy_enum_decl(self, tp, name=None):
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator)
def _enum_ctx(self, tp, cname):
type_index = self._typesdict[tp]
type_op = CffiOp(OP_ENUM, -1)
if self.target_is_python:
tp.check_not_partial()
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._lsts["global"].append(
GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
check_value=enumvalue))
#
if cname is not None and '$' not in cname and not self.target_is_python:
size = "sizeof(%s)" % cname
signed = "((%s)-1) <= 0" % cname
else:
basetp = tp.build_baseinttype(self.ffi, [])
size = self.ffi.sizeof(basetp)
signed = int(int(self.ffi.cast(basetp, -1)) < 0)
allenums = ",".join(tp.enumerators)
self._lsts["enum"].append(
EnumExpr(tp.name, type_index, size, signed, allenums))
def _generate_cpy_enum_ctx(self, tp, name):
self._enum_ctx(tp, tp._get_c_name())
# ----------
# macros: for now only for integers
def _generate_cpy_macro_collecttype(self, tp, name):
pass
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
def _generate_cpy_macro_ctx(self, tp, name):
if tp == '...':
if self.target_is_python:
raise VerificationError(
"cannot use the syntax '...' in '#define %s ...' when "
"using the ABI mode" % (name,))
check_value = None
else:
check_value = tp # an integer
type_op = CffiOp(OP_CONSTANT_INT, -1)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op,
check_value=check_value))
# ----------
# global variables
def _global_type(self, tp, global_name):
if isinstance(tp, model.ArrayType):
actual_length = tp.length
if actual_length == '...':
actual_length = '_cffi_array_len(%s)' % (global_name,)
tp_item = self._global_type(tp.item, '%s[0]' % global_name)
tp = model.ArrayType(tp_item, actual_length)
return tp
def _generate_cpy_variable_collecttype(self, tp, name):
self._do_collect_type(self._global_type(tp, name))
def _generate_cpy_variable_decl(self, tp, name):
prnt = self._prnt
tp = self._global_type(tp, name)
if isinstance(tp, model.ArrayType) and tp.length is None:
tp = tp.item
ampersand = ''
else:
ampersand = '&'
# This code assumes that casts from "tp *" to "void *" is a
# no-op, i.e. a function that returns a "tp *" can be called
# as if it returned a "void *". This should be generally true
# on any modern machine. The only exception to that rule (on
# uncommon architectures, and as far as I can tell) might be
# if 'tp' were a function type, but that is not possible here.
# (If 'tp' is a function _pointer_ type, then casts from "fn_t
# **" to "void *" are again no-ops, as far as I can tell.)
decl = '*_cffi_var_%s(void)' % (name,)
prnt('static ' + tp.get_c_name(decl, quals=self._current_quals))
prnt('{')
prnt(' return %s(%s);' % (ampersand, name))
prnt('}')
prnt()
def _generate_cpy_variable_ctx(self, tp, name):
tp = self._global_type(tp, name)
type_index = self._typesdict[tp]
if self.target_is_python:
op = OP_GLOBAL_VAR
else:
op = OP_GLOBAL_VAR_F
self._lsts["global"].append(
GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index)))
# ----------
# extern "Python"
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
_generate_cpy_dllexport_python_collecttype = \
_generate_cpy_extern_python_plus_c_collecttype = \
_generate_cpy_extern_python_collecttype
def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
else:
context = 'result of %s' % name
size_of_result = '(int)sizeof(%s)' % (
tp.result.get_c_name('', context),)
prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name)
prnt(' { "%s.%s", %s, 0, 0 };' % (
self.module_name, name, size_of_result))
prnt()
#
arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' a%d' % i, context)
arguments.append(arg)
#
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '%s(%s)' % (name, repr_arguments)
if tp.abi == "__stdcall":
name_and_arguments = '_cffi_stdcall ' + name_and_arguments
#
def may_need_128_bits(tp):
return (isinstance(tp, model.PrimitiveType) and
tp.name == 'long double')
#
size_of_a = max(len(tp.args)*8, 8)
if may_need_128_bits(tp.result):
size_of_a = max(size_of_a, 16)
if isinstance(tp.result, model.StructOrUnion):
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
for i, type in enumerate(tp.args):
arg = 'a%d' % i
if (isinstance(type, model.StructOrUnion) or
may_need_128_bits(type)):
arg = '&' + arg
type = model.PointerType(type)
prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg))
prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name)
if not isinstance(tp.result, model.VoidType):
prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),))
prnt('}')
prnt()
self._num_externpy += 1
def _generate_cpy_extern_python_decl(self, tp, name):
self._extern_python_decl(tp, name, 'static ')
def _generate_cpy_dllexport_python_decl(self, tp, name):
self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
raise VerificationError(
"cannot use 'extern \"Python\"' in the ABI mode")
if tp.ellipsis:
raise NotImplementedError("a vararg function is extern \"Python\"")
type_index = self._typesdict[tp]
type_op = CffiOp(OP_EXTERN_PYTHON, type_index)
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
_generate_cpy_dllexport_python_ctx = \
_generate_cpy_extern_python_plus_c_ctx = \
_generate_cpy_extern_python_ctx
def _print_string_literal_in_array(self, s):
prnt = self._prnt
prnt('// # NB. this is not a string because of a size limit in MSVC')
if not isinstance(s, bytes): # unicode
s = s.encode('utf-8') # -> bytes
else:
s.decode('utf-8') # got bytes, check for valid utf-8
try:
s.decode('ascii')
except UnicodeDecodeError:
s = b'# -*- encoding: utf8 -*-\n' + s
for line in s.splitlines(True):
comment = line
if type('//') is bytes: # python2
line = map(ord, line) # make a list of integers
else: # python3
# type(line) is bytes, which enumerates like a list of integers
comment = ascii(comment)[1:-1]
prnt(('// ' + comment).rstrip())
printed_line = ''
for c in line:
if len(printed_line) >= 76:
prnt(printed_line)
printed_line = ''
printed_line += '%d,' % (c,)
prnt(printed_line)
# ----------
# emitting the opcodes for individual types
def _emit_bytecode_VoidType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID)
def _emit_bytecode_PrimitiveType(self, tp, index):
prim_index = PRIMITIVE_TO_INDEX[tp.name]
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index)
def _emit_bytecode_UnknownIntegerType(self, tp, index):
s = ('_cffi_prim_int(sizeof(%s), (\n'
' ((%s)-1) | 0 /* check that %s is an integer type */\n'
' ) <= 0)' % (tp.name, tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_UnknownFloatType(self, tp, index):
s = ('_cffi_prim_float(sizeof(%s) *\n'
' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n'
' )' % (tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_RawFunctionType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result])
index += 1
for tp1 in tp.args:
realindex = self._typesdict[tp1]
if index != realindex:
if isinstance(tp1, model.PrimitiveType):
self._emit_bytecode_PrimitiveType(tp1, index)
else:
self.cffi_types[index] = CffiOp(OP_NOOP, realindex)
index += 1
flags = int(tp.ellipsis)
if tp.abi is not None:
if tp.abi == '__stdcall':
flags |= 2
else:
raise NotImplementedError("abi=%r" % (tp.abi,))
self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags)
def _emit_bytecode_PointerType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype])
_emit_bytecode_ConstPointerType = _emit_bytecode_PointerType
_emit_bytecode_NamedPointerType = _emit_bytecode_PointerType
def _emit_bytecode_FunctionPtrType(self, tp, index):
raw = tp.as_raw_function()
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw])
def _emit_bytecode_ArrayType(self, tp, index):
item_index = self._typesdict[tp.item]
if tp.length is None:
self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index)
elif tp.length == '...':
raise VerificationError(
"type %s badly placed: the '...' array length can only be "
"used on global arrays or on fields of structures" % (
str(tp).replace('/*...*/', '...'),))
else:
assert self.cffi_types[index + 1] == 'LEN'
self.cffi_types[index] = CffiOp(OP_ARRAY, item_index)
self.cffi_types[index + 1] = CffiOp(None, str(tp.length))
def _emit_bytecode_StructType(self, tp, index):
struct_index = self._struct_unions[tp]
self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index)
_emit_bytecode_UnionType = _emit_bytecode_StructType
def _emit_bytecode_EnumType(self, tp, index):
enum_index = self._enums[tp]
self.cffi_types[index] = CffiOp(OP_ENUM, enum_index)
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
if verbose:
print("generating %s" % (target_file,))
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
recompiler.collect_step_tables()
f = NativeIO()
recompiler.write_source_to_f(f, preamble)
output = f.getvalue()
try:
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
if verbose:
print("(already up-to-date)")
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
with open(tmp_file, 'w') as f1:
f1.write(output)
try:
os.rename(tmp_file, target_file)
except OSError:
os.unlink(target_file)
os.rename(tmp_file, target_file)
return True
def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
assert preamble is not None
return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
verbose)
def make_py_source(ffi, module_name, target_py_file, verbose=False):
return _make_c_or_py_source(ffi, module_name, None, target_py_file,
verbose)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
try:
os.makedirs(os.path.join(outputdir, *parts[:-1]))
except OSError:
pass
parts[-1] += extension
return os.path.join(outputdir, *parts), parts
# Aaargh. Distutils is not tested at all for the purpose of compiling
# DLLs that are not extension modules. Here are some hacks to work
# around that, in the _patch_for_*() functions...
def _patch_meth(patchlist, cls, name, new_meth):
old = getattr(cls, name)
patchlist.append((cls, name, old))
setattr(cls, name, new_meth)
return old
def _unpatch_meths(patchlist):
for cls, name, old_meth in reversed(patchlist):
setattr(cls, name, old_meth)
def _patch_for_embedding(patchlist):
if sys.platform == 'win32':
# we must not remove the manifest when building for embedding!
from distutils.msvc9compiler import MSVCCompiler
_patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref',
lambda self, manifest_file: manifest_file)
if sys.platform == 'darwin':
# we must not make a '-bundle', but a '-dynamiclib' instead
from distutils.ccompiler import CCompiler
def my_link_shared_object(self, *args, **kwds):
if '-bundle' in self.linker_so:
self.linker_so = list(self.linker_so)
i = self.linker_so.index('-bundle')
self.linker_so[i] = '-dynamiclib'
return old_link_shared_object(self, *args, **kwds)
old_link_shared_object = _patch_meth(patchlist, CCompiler,
'link_shared_object',
my_link_shared_object)
def _patch_for_target(patchlist, target):
from distutils.command.build_ext import build_ext
# if 'target' is different from '*', we need to patch some internal
# method to just return this 'target' value, instead of having it
# built from module_name
if target.endswith('.*'):
target = target[:-2]
if sys.platform == 'win32':
target += '.dll'
elif sys.platform == 'darwin':
target += '.dylib'
else:
target += '.so'
_patch_meth(patchlist, build_ext, 'get_ext_filename',
lambda self, ext_name: target)
def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
c_file=None, source_extension='.c', extradir=None,
compiler_verbose=1, target=None, debug=None, **kwds):
if not isinstance(module_name, str):
module_name = module_name.encode('ascii')
if ffi._windows_unicode:
ffi._apply_windows_unicode(kwds)
if preamble is not None:
embedding = (ffi._embedding is not None)
if embedding:
ffi._apply_embedding_fix(kwds)
if c_file is None:
c_file, parts = _modname_to_file(tmpdir, module_name,
source_extension)
if extradir:
parts = [extradir] + parts
ext_c_file = os.path.join(*parts)
else:
ext_c_file = c_file
#
if target is None:
if embedding:
target = '%s.*' % module_name
else:
target = '*'
#
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
updated = make_c_source(ffi, module_name, preamble, c_file,
verbose=compiler_verbose)
if call_c_compiler:
patchlist = []
cwd = os.getcwd()
try:
if embedding:
_patch_for_embedding(patchlist)
if target != '*':
_patch_for_target(patchlist, target)
if compiler_verbose:
if tmpdir == '.':
msg = 'the current directory is'
else:
msg = 'setting the current directory to'
print('%s %r' % (msg, os.path.abspath(tmpdir)))
os.chdir(tmpdir)
outputfilename = ffiplatform.compile('.', ext,
compiler_verbose, debug)
finally:
os.chdir(cwd)
_unpatch_meths(patchlist)
return outputfilename
else:
return ext, updated
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
updated = make_py_source(ffi, module_name, c_file,
verbose=compiler_verbose)
if call_c_compiler:
return c_file
else:
return None, updated
| 64,598 | Python | 39.833755 | 80 | 0.507601 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/backend_ctypes.py | import ctypes, ctypes.util, operator, sys
from . import model
if sys.version_info < (3,):
bytechr = chr
else:
unicode = str
long = int
xrange = range
bytechr = lambda num: bytes([num])
class CTypesType(type):
pass
class CTypesData(object):
__metaclass__ = CTypesType
__slots__ = ['__weakref__']
__name__ = '<cdata>'
def __init__(self, *args):
raise TypeError("cannot instantiate %r" % (self.__class__,))
@classmethod
def _newp(cls, init):
raise TypeError("expected a pointer or array ctype, got '%s'"
% (cls._get_c_name(),))
@staticmethod
def _to_ctypes(value):
raise TypeError
@classmethod
def _arg_to_ctypes(cls, *value):
try:
ctype = cls._ctype
except AttributeError:
raise TypeError("cannot create an instance of %r" % (cls,))
if value:
res = cls._to_ctypes(*value)
if not isinstance(res, ctype):
res = cls._ctype(res)
else:
res = cls._ctype()
return res
@classmethod
def _create_ctype_obj(cls, init):
if init is None:
return cls._arg_to_ctypes()
else:
return cls._arg_to_ctypes(init)
@staticmethod
def _from_ctypes(ctypes_value):
raise TypeError
@classmethod
def _get_c_name(cls, replace_with=''):
return cls._reftypename.replace(' &', replace_with)
@classmethod
def _fix_class(cls):
cls.__name__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__module__ = 'ffi'
def _get_own_repr(self):
raise NotImplementedError
def _addr_repr(self, address):
if address == 0:
return 'NULL'
else:
if address < 0:
address += 1 << (8*ctypes.sizeof(ctypes.c_void_p))
return '0x%x' % address
def __repr__(self, c_name=None):
own = self._get_own_repr()
return '<cdata %r %s>' % (c_name or self._get_c_name(), own)
def _convert_to_address(self, BClass):
if BClass is None:
raise TypeError("cannot convert %r to an address" % (
self._get_c_name(),))
else:
raise TypeError("cannot convert %r to %r" % (
self._get_c_name(), BClass._get_c_name()))
@classmethod
def _get_size(cls):
return ctypes.sizeof(cls._ctype)
def _get_size_of_instance(self):
return ctypes.sizeof(self._ctype)
@classmethod
def _cast_from(cls, source):
raise TypeError("cannot cast to %r" % (cls._get_c_name(),))
def _cast_to_integer(self):
return self._convert_to_address(None)
@classmethod
def _alignment(cls):
return ctypes.alignment(cls._ctype)
def __iter__(self):
raise TypeError("cdata %r does not support iteration" % (
self._get_c_name()),)
def _make_cmp(name):
cmpfunc = getattr(operator, name)
def cmp(self, other):
v_is_ptr = not isinstance(self, CTypesGenericPrimitive)
w_is_ptr = (isinstance(other, CTypesData) and
not isinstance(other, CTypesGenericPrimitive))
if v_is_ptr and w_is_ptr:
return cmpfunc(self._convert_to_address(None),
other._convert_to_address(None))
elif v_is_ptr or w_is_ptr:
return NotImplemented
else:
if isinstance(self, CTypesGenericPrimitive):
self = self._value
if isinstance(other, CTypesGenericPrimitive):
other = other._value
return cmpfunc(self, other)
cmp.func_name = name
return cmp
__eq__ = _make_cmp('__eq__')
__ne__ = _make_cmp('__ne__')
__lt__ = _make_cmp('__lt__')
__le__ = _make_cmp('__le__')
__gt__ = _make_cmp('__gt__')
__ge__ = _make_cmp('__ge__')
def __hash__(self):
return hash(self._convert_to_address(None))
def _to_string(self, maxlen):
raise TypeError("string(): %r" % (self,))
class CTypesGenericPrimitive(CTypesData):
__slots__ = []
def __hash__(self):
return hash(self._value)
def _get_own_repr(self):
return repr(self._from_ctypes(self._value))
class CTypesGenericArray(CTypesData):
__slots__ = []
@classmethod
def _newp(cls, init):
return cls(init)
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
class CTypesGenericPtr(CTypesData):
__slots__ = ['_address', '_as_ctype_ptr']
_automatic_casts = False
kind = "pointer"
@classmethod
def _newp(cls, init):
return cls(init)
@classmethod
def _cast_from(cls, source):
if source is None:
address = 0
elif isinstance(source, CTypesData):
address = source._cast_to_integer()
elif isinstance(source, (int, long)):
address = source
else:
raise TypeError("bad type for cast to %r: %r" %
(cls, type(source).__name__))
return cls._new_pointer_at(address)
@classmethod
def _new_pointer_at(cls, address):
self = cls.__new__(cls)
self._address = address
self._as_ctype_ptr = ctypes.cast(address, cls._ctype)
return self
def _get_own_repr(self):
try:
return self._addr_repr(self._address)
except AttributeError:
return '???'
def _cast_to_integer(self):
return self._address
def __nonzero__(self):
return bool(self._address)
__bool__ = __nonzero__
@classmethod
def _to_ctypes(cls, value):
if not isinstance(value, CTypesData):
raise TypeError("unexpected %s object" % type(value).__name__)
address = value._convert_to_address(cls)
return ctypes.cast(address, cls._ctype)
@classmethod
def _from_ctypes(cls, ctypes_ptr):
address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0
return cls._new_pointer_at(address)
@classmethod
def _initialize(cls, ctypes_ptr, value):
if value:
ctypes_ptr.contents = cls._to_ctypes(value).contents
def _convert_to_address(self, BClass):
if (BClass in (self.__class__, None) or BClass._automatic_casts
or self._automatic_casts):
return self._address
else:
return CTypesData._convert_to_address(self, BClass)
class CTypesBaseStructOrUnion(CTypesData):
__slots__ = ['_blob']
@classmethod
def _create_ctype_obj(cls, init):
# may be overridden
raise TypeError("cannot instantiate opaque type %s" % (cls,))
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
@classmethod
def _offsetof(cls, fieldname):
return getattr(cls._ctype, fieldname).offset
def _convert_to_address(self, BClass):
if getattr(BClass, '_BItem', None) is self.__class__:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@classmethod
def _from_ctypes(cls, ctypes_struct_or_union):
self = cls.__new__(cls)
self._blob = ctypes_struct_or_union
return self
@classmethod
def _to_ctypes(cls, value):
return value._blob
def __repr__(self, c_name=None):
return CTypesData.__repr__(self, c_name or self._get_c_name(' &'))
class CTypesBackend(object):
PRIMITIVE_TYPES = {
'char': ctypes.c_char,
'short': ctypes.c_short,
'int': ctypes.c_int,
'long': ctypes.c_long,
'long long': ctypes.c_longlong,
'signed char': ctypes.c_byte,
'unsigned char': ctypes.c_ubyte,
'unsigned short': ctypes.c_ushort,
'unsigned int': ctypes.c_uint,
'unsigned long': ctypes.c_ulong,
'unsigned long long': ctypes.c_ulonglong,
'float': ctypes.c_float,
'double': ctypes.c_double,
'_Bool': ctypes.c_bool,
}
for _name in ['unsigned long long', 'unsigned long',
'unsigned int', 'unsigned short', 'unsigned char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_void_p):
PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_size_t):
PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name]
for _name in ['long long', 'long', 'int', 'short', 'signed char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_void_p):
PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name]
PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_size_t):
PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name]
def __init__(self):
self.RTLD_LAZY = 0 # not supported anyway by ctypes
self.RTLD_NOW = 0
self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL
self.RTLD_LOCAL = ctypes.RTLD_LOCAL
def set_ffi(self, ffi):
self.ffi = ffi
def _get_types(self):
return CTypesData, CTypesType
def load_library(self, path, flags=0):
cdll = ctypes.CDLL(path, flags)
return CTypesLibrary(self, cdll)
def new_void_type(self):
class CTypesVoid(CTypesData):
__slots__ = []
_reftypename = 'void &'
@staticmethod
def _from_ctypes(novalue):
return None
@staticmethod
def _to_ctypes(novalue):
if novalue is not None:
raise TypeError("None expected, got %s object" %
(type(novalue).__name__,))
return None
CTypesVoid._fix_class()
return CTypesVoid
def new_primitive_type(self, name):
if name == 'wchar_t':
raise NotImplementedError(name)
ctype = self.PRIMITIVE_TYPES[name]
if name == 'char':
kind = 'char'
elif name in ('float', 'double'):
kind = 'float'
else:
if name in ('signed char', 'unsigned char'):
kind = 'byte'
elif name == '_Bool':
kind = 'bool'
else:
kind = 'int'
is_signed = (ctype(-1).value == -1)
#
def _cast_source_to_int(source):
if isinstance(source, (int, long, float)):
source = int(source)
elif isinstance(source, CTypesData):
source = source._cast_to_integer()
elif isinstance(source, bytes):
source = ord(source)
elif source is None:
source = 0
else:
raise TypeError("bad type for cast to %r: %r" %
(CTypesPrimitive, type(source).__name__))
return source
#
kind1 = kind
class CTypesPrimitive(CTypesGenericPrimitive):
__slots__ = ['_value']
_ctype = ctype
_reftypename = '%s &' % name
kind = kind1
def __init__(self, value):
self._value = value
@staticmethod
def _create_ctype_obj(init):
if init is None:
return ctype()
return ctype(CTypesPrimitive._to_ctypes(init))
if kind == 'int' or kind == 'byte':
@classmethod
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = ctype(source).value # cast within range
return cls(source)
def __int__(self):
return self._value
if kind == 'bool':
@classmethod
def _cast_from(cls, source):
if not isinstance(source, (int, long, float)):
source = _cast_source_to_int(source)
return cls(bool(source))
def __int__(self):
return int(self._value)
if kind == 'char':
@classmethod
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = bytechr(source & 0xFF)
return cls(source)
def __int__(self):
return ord(self._value)
if kind == 'float':
@classmethod
def _cast_from(cls, source):
if isinstance(source, float):
pass
elif isinstance(source, CTypesGenericPrimitive):
if hasattr(source, '__float__'):
source = float(source)
else:
source = int(source)
else:
source = _cast_source_to_int(source)
source = ctype(source).value # fix precision
return cls(source)
def __int__(self):
return int(self._value)
def __float__(self):
return self._value
_cast_to_integer = __int__
if kind == 'int' or kind == 'byte' or kind == 'bool':
@staticmethod
def _to_ctypes(x):
if not isinstance(x, (int, long)):
if isinstance(x, CTypesData):
x = int(x)
else:
raise TypeError("integer expected, got %s" %
type(x).__name__)
if ctype(x).value != x:
if not is_signed and x < 0:
raise OverflowError("%s: negative integer" % name)
else:
raise OverflowError("%s: integer out of bounds"
% name)
return x
if kind == 'char':
@staticmethod
def _to_ctypes(x):
if isinstance(x, bytes) and len(x) == 1:
return x
if isinstance(x, CTypesPrimitive): # <CData <char>>
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
def __nonzero__(self):
return ord(self._value) != 0
else:
def __nonzero__(self):
return self._value != 0
__bool__ = __nonzero__
if kind == 'float':
@staticmethod
def _to_ctypes(x):
if not isinstance(x, (int, long, float, CTypesData)):
raise TypeError("float expected, got %s" %
type(x).__name__)
return ctype(x).value
@staticmethod
def _from_ctypes(value):
return getattr(value, 'value', value)
@staticmethod
def _initialize(blob, init):
blob.value = CTypesPrimitive._to_ctypes(init)
if kind == 'char':
def _to_string(self, maxlen):
return self._value
if kind == 'byte':
def _to_string(self, maxlen):
return chr(self._value & 0xff)
#
CTypesPrimitive._fix_class()
return CTypesPrimitive
def new_pointer_type(self, BItem):
getbtype = self.ffi._get_cached_btype
if BItem is getbtype(model.PrimitiveType('char')):
kind = 'charp'
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'bytep'
elif BItem is getbtype(model.void_type):
kind = 'voidp'
else:
kind = 'generic'
#
class CTypesPtr(CTypesGenericPtr):
__slots__ = ['_own']
if kind == 'charp':
__slots__ += ['__as_strbuf']
_BItem = BItem
if hasattr(BItem, '_ctype'):
_ctype = ctypes.POINTER(BItem._ctype)
_bitem_size = ctypes.sizeof(BItem._ctype)
else:
_ctype = ctypes.c_void_p
if issubclass(BItem, CTypesGenericArray):
_reftypename = BItem._get_c_name('(* &)')
else:
_reftypename = BItem._get_c_name(' * &')
def __init__(self, init):
ctypeobj = BItem._create_ctype_obj(init)
if kind == 'charp':
self.__as_strbuf = ctypes.create_string_buffer(
ctypeobj.value + b'\x00')
self._as_ctype_ptr = ctypes.cast(
self.__as_strbuf, self._ctype)
else:
self._as_ctype_ptr = ctypes.pointer(ctypeobj)
self._address = ctypes.cast(self._as_ctype_ptr,
ctypes.c_void_p).value
self._own = True
def __add__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at(self._address +
other * self._bitem_size)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at(self._address -
other * self._bitem_size)
elif type(self) is type(other):
return (self._address - other._address) // self._bitem_size
else:
return NotImplemented
def __getitem__(self, index):
if getattr(self, '_own', False) and index != 0:
raise IndexError
return BItem._from_ctypes(self._as_ctype_ptr[index])
def __setitem__(self, index, value):
self._as_ctype_ptr[index] = BItem._to_ctypes(value)
if kind == 'charp' or kind == 'voidp':
@classmethod
def _arg_to_ctypes(cls, *value):
if value and isinstance(value[0], bytes):
return ctypes.c_char_p(value[0])
else:
return super(CTypesPtr, cls)._arg_to_ctypes(*value)
if kind == 'charp' or kind == 'bytep':
def _to_string(self, maxlen):
if maxlen < 0:
maxlen = sys.maxsize
p = ctypes.cast(self._as_ctype_ptr,
ctypes.POINTER(ctypes.c_char))
n = 0
while n < maxlen and p[n] != b'\x00':
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return 'owning %d bytes' % (
ctypes.sizeof(self._as_ctype_ptr.contents),)
return super(CTypesPtr, self)._get_own_repr()
#
if (BItem is self.ffi._get_cached_btype(model.void_type) or
BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))):
CTypesPtr._automatic_casts = True
#
CTypesPtr._fix_class()
return CTypesPtr
def new_array_type(self, CTypesPtr, length):
if length is None:
brackets = ' &[]'
else:
brackets = ' &[%d]' % length
BItem = CTypesPtr._BItem
getbtype = self.ffi._get_cached_btype
if BItem is getbtype(model.PrimitiveType('char')):
kind = 'char'
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'byte'
else:
kind = 'generic'
#
class CTypesArray(CTypesGenericArray):
__slots__ = ['_blob', '_own']
if length is not None:
_ctype = BItem._ctype * length
else:
__slots__.append('_ctype')
_reftypename = BItem._get_c_name(brackets)
_declared_length = length
_CTPtr = CTypesPtr
def __init__(self, init):
if length is None:
if isinstance(init, (int, long)):
len1 = init
init = None
elif kind == 'char' and isinstance(init, bytes):
len1 = len(init) + 1 # extra null
else:
init = tuple(init)
len1 = len(init)
self._ctype = BItem._ctype * len1
self._blob = self._ctype()
self._own = True
if init is not None:
self._initialize(self._blob, init)
@staticmethod
def _initialize(blob, init):
if isinstance(init, bytes):
init = [init[i:i+1] for i in range(len(init))]
else:
if isinstance(init, CTypesGenericArray):
if (len(init) != len(blob) or
not isinstance(init, CTypesArray)):
raise TypeError("length/type mismatch: %s" % (init,))
init = tuple(init)
if len(init) > len(blob):
raise IndexError("too many initializers")
addr = ctypes.cast(blob, ctypes.c_void_p).value
PTR = ctypes.POINTER(BItem._ctype)
itemsize = ctypes.sizeof(BItem._ctype)
for i, value in enumerate(init):
p = ctypes.cast(addr + i * itemsize, PTR)
BItem._initialize(p.contents, value)
def __len__(self):
return len(self._blob)
def __getitem__(self, index):
if not (0 <= index < len(self._blob)):
raise IndexError
return BItem._from_ctypes(self._blob[index])
def __setitem__(self, index, value):
if not (0 <= index < len(self._blob)):
raise IndexError
self._blob[index] = BItem._to_ctypes(value)
if kind == 'char' or kind == 'byte':
def _to_string(self, maxlen):
if maxlen < 0:
maxlen = len(self._blob)
p = ctypes.cast(self._blob,
ctypes.POINTER(ctypes.c_char))
n = 0
while n < maxlen and p[n] != b'\x00':
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return 'owning %d bytes' % (ctypes.sizeof(self._blob),)
return super(CTypesArray, self)._get_own_repr()
def _convert_to_address(self, BClass):
if BClass in (CTypesPtr, None) or BClass._automatic_casts:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@staticmethod
def _from_ctypes(ctypes_array):
self = CTypesArray.__new__(CTypesArray)
self._blob = ctypes_array
return self
@staticmethod
def _arg_to_ctypes(value):
return CTypesPtr._arg_to_ctypes(value)
def __add__(self, other):
if isinstance(other, (int, long)):
return CTypesPtr._new_pointer_at(
ctypes.addressof(self._blob) +
other * ctypes.sizeof(BItem._ctype))
else:
return NotImplemented
@classmethod
def _cast_from(cls, source):
raise NotImplementedError("casting to %r" % (
cls._get_c_name(),))
#
CTypesArray._fix_class()
return CTypesArray
def _new_struct_or_union(self, kind, name, base_ctypes_class):
#
class struct_or_union(base_ctypes_class):
pass
struct_or_union.__name__ = '%s_%s' % (kind, name)
kind1 = kind
#
class CTypesStructOrUnion(CTypesBaseStructOrUnion):
__slots__ = ['_blob']
_ctype = struct_or_union
_reftypename = '%s &' % (name,)
_kind = kind = kind1
#
CTypesStructOrUnion._fix_class()
return CTypesStructOrUnion
def new_struct_type(self, name):
return self._new_struct_or_union('struct', name, ctypes.Structure)
def new_union_type(self, name):
return self._new_struct_or_union('union', name, ctypes.Union)
def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp,
totalsize=-1, totalalignment=-1, sflags=0,
pack=0):
if totalsize >= 0 or totalalignment >= 0:
raise NotImplementedError("the ctypes backend of CFFI does not support "
"structures completed by verify(); please "
"compile and install the _cffi_backend module.")
struct_or_union = CTypesStructOrUnion._ctype
fnames = [fname for (fname, BField, bitsize) in fields]
btypes = [BField for (fname, BField, bitsize) in fields]
bitfields = [bitsize for (fname, BField, bitsize) in fields]
#
bfield_types = {}
cfields = []
for (fname, BField, bitsize) in fields:
if bitsize < 0:
cfields.append((fname, BField._ctype))
bfield_types[fname] = BField
else:
cfields.append((fname, BField._ctype, bitsize))
bfield_types[fname] = Ellipsis
if sflags & 8:
struct_or_union._pack_ = 1
elif pack:
struct_or_union._pack_ = pack
struct_or_union._fields_ = cfields
CTypesStructOrUnion._bfield_types = bfield_types
#
@staticmethod
def _create_ctype_obj(init):
result = struct_or_union()
if init is not None:
initialize(result, init)
return result
CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj
#
def initialize(blob, init):
if is_union:
if len(init) > 1:
raise ValueError("union initializer: %d items given, but "
"only one supported (use a dict if needed)"
% (len(init),))
if not isinstance(init, dict):
if isinstance(init, (bytes, unicode)):
raise TypeError("union initializer: got a str")
init = tuple(init)
if len(init) > len(fnames):
raise ValueError("too many values for %s initializer" %
CTypesStructOrUnion._get_c_name())
init = dict(zip(fnames, init))
addr = ctypes.addressof(blob)
for fname, value in init.items():
BField, bitsize = name2fieldtype[fname]
assert bitsize < 0, \
"not implemented: initializer with bit fields"
offset = CTypesStructOrUnion._offsetof(fname)
PTR = ctypes.POINTER(BField._ctype)
p = ctypes.cast(addr + offset, PTR)
BField._initialize(p.contents, value)
is_union = CTypesStructOrUnion._kind == 'union'
name2fieldtype = dict(zip(fnames, zip(btypes, bitfields)))
#
for fname, BField, bitsize in fields:
if fname == '':
raise NotImplementedError("nested anonymous structs/unions")
if hasattr(CTypesStructOrUnion, fname):
raise ValueError("the field name %r conflicts in "
"the ctypes backend" % fname)
if bitsize < 0:
def getter(self, fname=fname, BField=BField,
offset=CTypesStructOrUnion._offsetof(fname),
PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast(addr + offset, PTR)
return BField._from_ctypes(p.contents)
def setter(self, value, fname=fname, BField=BField):
setattr(self._blob, fname, BField._to_ctypes(value))
#
if issubclass(BField, CTypesGenericArray):
setter = None
if BField._declared_length == 0:
def getter(self, fname=fname, BFieldPtr=BField._CTPtr,
offset=CTypesStructOrUnion._offsetof(fname),
PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast(addr + offset, PTR)
return BFieldPtr._from_ctypes(p)
#
else:
def getter(self, fname=fname, BField=BField):
return BField._from_ctypes(getattr(self._blob, fname))
def setter(self, value, fname=fname, BField=BField):
# xxx obscure workaround
value = BField._to_ctypes(value)
oldvalue = getattr(self._blob, fname)
setattr(self._blob, fname, value)
if value != getattr(self._blob, fname):
setattr(self._blob, fname, oldvalue)
raise OverflowError("value too large for bitfield")
setattr(CTypesStructOrUnion, fname, property(getter, setter))
#
CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp))
for fname in fnames:
if hasattr(CTypesPtr, fname):
raise ValueError("the field name %r conflicts in "
"the ctypes backend" % fname)
def getter(self, fname=fname):
return getattr(self[0], fname)
def setter(self, value, fname=fname):
setattr(self[0], fname, value)
setattr(CTypesPtr, fname, property(getter, setter))
def new_function_type(self, BArgs, BResult, has_varargs):
nameargs = [BArg._get_c_name() for BArg in BArgs]
if has_varargs:
nameargs.append('...')
nameargs = ', '.join(nameargs)
#
class CTypesFunctionPtr(CTypesGenericPtr):
__slots__ = ['_own_callback', '_name']
_ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None),
*[BArg._ctype for BArg in BArgs],
use_errno=True)
_reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,))
def __init__(self, init, error=None):
# create a callback to the Python callable init()
import traceback
assert not has_varargs, "varargs not supported for callbacks"
if getattr(BResult, '_ctype', None) is not None:
error = BResult._from_ctypes(
BResult._create_ctype_obj(error))
else:
error = None
def callback(*args):
args2 = []
for arg, BArg in zip(args, BArgs):
args2.append(BArg._from_ctypes(arg))
try:
res2 = init(*args2)
res2 = BResult._to_ctypes(res2)
except:
traceback.print_exc()
res2 = error
if issubclass(BResult, CTypesGenericPtr):
if res2:
res2 = ctypes.cast(res2, ctypes.c_void_p).value
# .value: http://bugs.python.org/issue1574593
else:
res2 = None
#print repr(res2)
return res2
if issubclass(BResult, CTypesGenericPtr):
# The only pointers callbacks can return are void*s:
# http://bugs.python.org/issue5710
callback_ctype = ctypes.CFUNCTYPE(
ctypes.c_void_p,
*[BArg._ctype for BArg in BArgs],
use_errno=True)
else:
callback_ctype = CTypesFunctionPtr._ctype
self._as_ctype_ptr = callback_ctype(callback)
self._address = ctypes.cast(self._as_ctype_ptr,
ctypes.c_void_p).value
self._own_callback = init
@staticmethod
def _initialize(ctypes_ptr, value):
if value:
raise NotImplementedError("ctypes backend: not supported: "
"initializers for function pointers")
def __repr__(self):
c_name = getattr(self, '_name', None)
if c_name:
i = self._reftypename.index('(* &)')
if self._reftypename[i-1] not in ' )*':
c_name = ' ' + c_name
c_name = self._reftypename.replace('(* &)', c_name)
return CTypesData.__repr__(self, c_name)
def _get_own_repr(self):
if getattr(self, '_own_callback', None) is not None:
return 'calling %r' % (self._own_callback,)
return super(CTypesFunctionPtr, self)._get_own_repr()
def __call__(self, *args):
if has_varargs:
assert len(args) >= len(BArgs)
extraargs = args[len(BArgs):]
args = args[:len(BArgs)]
else:
assert len(args) == len(BArgs)
ctypes_args = []
for arg, BArg in zip(args, BArgs):
ctypes_args.append(BArg._arg_to_ctypes(arg))
if has_varargs:
for i, arg in enumerate(extraargs):
if arg is None:
ctypes_args.append(ctypes.c_void_p(0)) # NULL
continue
if not isinstance(arg, CTypesData):
raise TypeError(
"argument %d passed in the variadic part "
"needs to be a cdata object (got %s)" %
(1 + len(BArgs) + i, type(arg).__name__))
ctypes_args.append(arg._arg_to_ctypes(arg))
result = self._as_ctype_ptr(*ctypes_args)
return BResult._from_ctypes(result)
#
CTypesFunctionPtr._fix_class()
return CTypesFunctionPtr
def new_enum_type(self, name, enumerators, enumvalues, CTypesInt):
assert isinstance(name, str)
reverse_mapping = dict(zip(reversed(enumvalues),
reversed(enumerators)))
#
class CTypesEnum(CTypesInt):
__slots__ = []
_reftypename = '%s &' % name
def _get_own_repr(self):
value = self._value
try:
return '%d: %s' % (value, reverse_mapping[value])
except KeyError:
return str(value)
def _to_string(self, maxlen):
value = self._value
try:
return reverse_mapping[value]
except KeyError:
return str(value)
#
CTypesEnum._fix_class()
return CTypesEnum
def get_errno(self):
return ctypes.get_errno()
def set_errno(self, value):
ctypes.set_errno(value)
def string(self, b, maxlen=-1):
return b._to_string(maxlen)
def buffer(self, bptr, size=-1):
raise NotImplementedError("buffer() with ctypes backend")
def sizeof(self, cdata_or_BType):
if isinstance(cdata_or_BType, CTypesData):
return cdata_or_BType._get_size_of_instance()
else:
assert issubclass(cdata_or_BType, CTypesData)
return cdata_or_BType._get_size()
def alignof(self, BType):
assert issubclass(BType, CTypesData)
return BType._alignment()
def newp(self, BType, source):
if not issubclass(BType, CTypesData):
raise TypeError
return BType._newp(source)
def cast(self, BType, source):
return BType._cast_from(source)
def callback(self, BType, source, error, onerror):
assert onerror is None # XXX not implemented
return BType(source, error)
_weakref_cache_ref = None
def gcp(self, cdata, destructor, size=0):
if self._weakref_cache_ref is None:
import weakref
class MyRef(weakref.ref):
def __eq__(self, other):
myref = self()
return self is other or (
myref is not None and myref is other())
def __ne__(self, other):
return not (self == other)
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(self())
return self._hash
self._weakref_cache_ref = {}, MyRef
weak_cache, MyRef = self._weakref_cache_ref
if destructor is None:
try:
del weak_cache[MyRef(cdata)]
except KeyError:
raise TypeError("Can remove destructor only on a object "
"previously returned by ffi.gc()")
return None
def remove(k):
cdata, destructor = weak_cache.pop(k, (None, None))
if destructor is not None:
destructor(cdata)
new_cdata = self.cast(self.typeof(cdata), cdata)
assert new_cdata is not cdata
weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor)
return new_cdata
typeof = type
def getcname(self, BType, replace_with):
return BType._get_c_name(replace_with)
def typeoffsetof(self, BType, fieldname, num=0):
if isinstance(fieldname, str):
if num == 0 and issubclass(BType, CTypesGenericPtr):
BType = BType._BItem
if not issubclass(BType, CTypesBaseStructOrUnion):
raise TypeError("expected a struct or union ctype")
BField = BType._bfield_types[fieldname]
if BField is Ellipsis:
raise TypeError("not supported for bitfields")
return (BField, BType._offsetof(fieldname))
elif isinstance(fieldname, (int, long)):
if issubclass(BType, CTypesGenericArray):
BType = BType._CTPtr
if not issubclass(BType, CTypesGenericPtr):
raise TypeError("expected an array or ptr ctype")
BItem = BType._BItem
offset = BItem._get_size() * fieldname
if offset > sys.maxsize:
raise OverflowError
return (BItem, offset)
else:
raise TypeError(type(fieldname))
def rawaddressof(self, BTypePtr, cdata, offset=None):
if isinstance(cdata, CTypesBaseStructOrUnion):
ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata))
elif isinstance(cdata, CTypesGenericPtr):
if offset is None or not issubclass(type(cdata)._BItem,
CTypesBaseStructOrUnion):
raise TypeError("unexpected cdata type")
ptr = type(cdata)._to_ctypes(cdata)
elif isinstance(cdata, CTypesGenericArray):
ptr = type(cdata)._to_ctypes(cdata)
else:
raise TypeError("expected a <cdata 'struct-or-union'>")
if offset:
ptr = ctypes.cast(
ctypes.c_void_p(
ctypes.cast(ptr, ctypes.c_void_p).value + offset),
type(ptr))
return BTypePtr._from_ctypes(ptr)
class CTypesLibrary(object):
def __init__(self, backend, cdll):
self.backend = backend
self.cdll = cdll
def load_function(self, BType, name):
c_func = getattr(self.cdll, name)
funcobj = BType._from_ctypes(c_func)
funcobj._name = name
return funcobj
def read_variable(self, BType, name):
try:
ctypes_obj = BType._ctype.in_dll(self.cdll, name)
except AttributeError as e:
raise NotImplementedError(e)
return BType._from_ctypes(ctypes_obj)
def write_variable(self, BType, name, value):
new_ctypes_obj = BType._to_ctypes(value)
ctypes_obj = BType._ctype.in_dll(self.cdll, name)
ctypes.memmove(ctypes.addressof(ctypes_obj),
ctypes.addressof(new_ctypes_obj),
ctypes.sizeof(BType._ctype))
| 42,454 | Python | 36.838681 | 86 | 0.49046 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/vengine_cpy.py | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, imp
from . import model
from .error import VerificationError
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'alloca((size_t)datasize) : NULL;' % (tovar,))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
prnt(' PyObject *pyresult;')
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' pyresult = %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
for freeline in freelines:
prnt(' ' + freeline)
prnt(' return pyresult;')
else:
for freeline in freelines:
prnt(' ' + freeline)
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = tp.length_is_unknown())
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length_is_unknown():
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_from_c__Bool PyBool_FromLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
union _cffi_union_alignment_u {
unsigned char m_char;
unsigned short m_short;
unsigned int m_int;
unsigned long m_long;
unsigned long long m_longlong;
float m_float;
double m_double;
long double m_longdouble;
};
struct _cffi_freeme_s {
struct _cffi_freeme_s *next;
union _cffi_union_alignment_u alignment;
};
#ifdef __GNUC__
__attribute__((unused))
#endif
static int _cffi_convert_array_argument(CTypeDescrObject *ctptr, PyObject *arg,
char **output_data, Py_ssize_t datasize,
struct _cffi_freeme_s **freeme)
{
char *p;
if (datasize < 0)
return -1;
p = *output_data;
if (p == NULL) {
struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc(
offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize);
if (fp == NULL)
return -1;
fp->next = *freeme;
*freeme = fp;
p = *output_data = (char *)&fp->alignment;
}
memset((void *)p, 0, (size_t)datasize);
return _cffi_convert_array_from_object(p, ctptr, arg);
}
#ifdef __GNUC__
__attribute__((unused))
#endif
static void _cffi_free_array_arguments(struct _cffi_freeme_s *freeme)
{
do {
void *p = (void *)freeme;
freeme = freeme->next;
PyObject_Free(p);
} while (freeme != NULL);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| 43,320 | Python | 39.22377 | 80 | 0.514358 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/verifier.py | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, os, binascii, shutil, io
from . import __version_verifier_modules__
from . import ffiplatform
from .error import VerificationError
if sys.version_info >= (3, 3):
import importlib.machinery
def _extension_suffixes():
return importlib.machinery.EXTENSION_SUFFIXES[:]
else:
import imp
def _extension_suffixes():
return [suffix for suffix, _, type in imp.get_suffixes()
if type == imp.C_EXTENSION]
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
class Verifier(object):
def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
ext_package=None, tag='', force_generic_engine=False,
source_extension='.c', flags=None, relative_to=None, **kwds):
if ffi._parser._uses_new_feature:
raise VerificationError(
"feature not supported with ffi.verify(), but only "
"with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,))
self.ffi = ffi
self.preamble = preamble
if not modulename:
flattened_kwds = ffiplatform.flatten(kwds)
vengine_class = _locate_engine_class(ffi, force_generic_engine)
self._vengine = vengine_class(self)
self._vengine.patch_extension_kwds(kwds)
self.flags = flags
self.kwds = self.make_relative_to(kwds, relative_to)
#
if modulename:
if tag:
raise TypeError("can't specify both 'modulename' and 'tag'")
else:
key = '\x00'.join(['%d.%d' % sys.version_info[:2],
__version_verifier_modules__,
preamble, flattened_kwds] +
ffi._cdefsources)
if sys.version_info >= (3,):
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
k1, k2)
suffix = _get_so_suffixes()[0]
self.tmpdir = tmpdir or _caller_dir_pycache()
self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension)
self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
self.ext_package = ext_package
self._has_source = False
self._has_module = False
def write_source(self, file=None):
"""Write the C source code. It is produced in 'self.sourcefilename',
which can be tweaked beforehand."""
with self.ffi._lock:
if self._has_source and file is None:
raise VerificationError(
"source code already written")
self._write_source(file)
def compile_module(self):
"""Write the C source code (if not done already) and compile it.
This produces a dynamic link library in 'self.modulefilename'."""
with self.ffi._lock:
if self._has_module:
raise VerificationError("module already compiled")
if not self._has_source:
self._write_source()
self._compile_module()
def load_library(self):
"""Get a C module from this Verifier instance.
Returns an instance of a FFILibrary class that behaves like the
objects returned by ffi.dlopen(), but that delegates all
operations to the C module. If necessary, the C code is written
and compiled first.
"""
with self.ffi._lock:
if not self._has_module:
self._locate_module()
if not self._has_module:
if not self._has_source:
self._write_source()
self._compile_module()
return self._load_library()
def get_module_name(self):
basename = os.path.basename(self.modulefilename)
# kill both the .so extension and the other .'s, as introduced
# by Python 3: 'basename.cpython-33m.so'
basename = basename.split('.', 1)[0]
# and the _d added in Python 2 debug builds --- but try to be
# conservative and not kill a legitimate _d
if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
basename = basename[:-2]
return basename
def get_extension(self):
ffiplatform._hack_at_distutils() # backward compatibility hack
if not self._has_source:
with self.ffi._lock:
if not self._has_source:
self._write_source()
sourcename = ffiplatform.maybe_relative_path(self.sourcefilename)
modname = self.get_module_name()
return ffiplatform.get_extension(sourcename, modname, **self.kwds)
def generates_python_module(self):
return self._vengine._gen_python_module
def make_relative_to(self, kwds, relative_to):
if relative_to and os.path.dirname(relative_to):
dirname = os.path.dirname(relative_to)
kwds = kwds.copy()
for key in ffiplatform.LIST_OF_FILE_NAMES:
if key in kwds:
lst = kwds[key]
if not isinstance(lst, (list, tuple)):
raise TypeError("keyword '%s' should be a list or tuple"
% (key,))
lst = [os.path.join(dirname, fn) for fn in lst]
kwds[key] = lst
return kwds
# ----------
def _locate_module(self):
if not os.path.isfile(self.modulefilename):
if self.ext_package:
try:
pkg = __import__(self.ext_package, None, None, ['__doc__'])
except ImportError:
return # cannot import the package itself, give up
# (e.g. it might be called differently before installation)
path = pkg.__path__
else:
path = None
filename = self._vengine.find_module(self.get_module_name(), path,
_get_so_suffixes())
if filename is None:
return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
def _write_source_to(self, file):
self._vengine._f = file
try:
self._vengine.write_source_to_f()
finally:
del self._vengine._f
def _write_source(self, file=None):
if file is not None:
self._write_source_to(file)
else:
# Write our source file to an in memory file.
f = NativeIO()
self._write_source_to(f)
source_data = f.getvalue()
# Determine if this matches the current file
if os.path.exists(self.sourcefilename):
with open(self.sourcefilename, "r") as fp:
needs_written = not (fp.read() == source_data)
else:
needs_written = True
# Actually write the file out if it doesn't match
if needs_written:
_ensure_dir(self.sourcefilename)
with open(self.sourcefilename, "w") as fp:
fp.write(source_data)
# Set this flag
self._has_source = True
def _compile_module(self):
# compile this C source
tmpdir = os.path.dirname(self.sourcefilename)
outputfilename = ffiplatform.compile(tmpdir, self.get_extension())
try:
same = ffiplatform.samefile(outputfilename, self.modulefilename)
except OSError:
same = False
if not same:
_ensure_dir(self.modulefilename)
shutil.move(outputfilename, self.modulefilename)
self._has_module = True
def _load_library(self):
assert self._has_module
if self.flags is not None:
return self._vengine.load_library(self.flags)
else:
return self._vengine.load_library()
# ____________________________________________________________
_FORCE_GENERIC_ENGINE = False # for tests
def _locate_engine_class(ffi, force_generic_engine):
if _FORCE_GENERIC_ENGINE:
force_generic_engine = True
if not force_generic_engine:
if '__pypy__' in sys.builtin_module_names:
force_generic_engine = True
else:
try:
import _cffi_backend
except ImportError:
_cffi_backend = '?'
if ffi._backend is not _cffi_backend:
force_generic_engine = True
if force_generic_engine:
from . import vengine_gen
return vengine_gen.VGenericEngine
else:
from . import vengine_cpy
return vengine_cpy.VCPythonEngine
# ____________________________________________________________
_TMPDIR = None
def _caller_dir_pycache():
if _TMPDIR:
return _TMPDIR
result = os.environ.get('CFFI_TMPDIR')
if result:
return result
filename = sys._getframe(2).f_code.co_filename
return os.path.abspath(os.path.join(os.path.dirname(filename),
'__pycache__'))
def set_tmpdir(dirname):
"""Set the temporary directory to use instead of __pycache__."""
global _TMPDIR
_TMPDIR = dirname
def cleanup_tmpdir(tmpdir=None, keep_so=False):
"""Clean up the temporary directory by removing all files in it
called `_cffi_*.{c,so}` as well as the `build` subdirectory."""
tmpdir = tmpdir or _caller_dir_pycache()
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c' # only remove .c files
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if fn.lower().startswith('_cffi_') and (
fn.lower().endswith(suffix) or fn.lower().endswith('.c')):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
def _get_so_suffixes():
suffixes = _extension_suffixes()
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def _ensure_dir(filename):
dirname = os.path.dirname(filename)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
| 11,253 | Python | 35.538961 | 86 | 0.545455 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/parse_c_type.h |
/* This part is from file 'cffi/parse_c_type.h'. It is copied at the
beginning of C sources generated by CFFI's ffi.set_source(). */
typedef void *_cffi_opcode_t;
#define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8))
#define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode)
#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8)
#define _CFFI_OP_PRIMITIVE 1
#define _CFFI_OP_POINTER 3
#define _CFFI_OP_ARRAY 5
#define _CFFI_OP_OPEN_ARRAY 7
#define _CFFI_OP_STRUCT_UNION 9
#define _CFFI_OP_ENUM 11
#define _CFFI_OP_FUNCTION 13
#define _CFFI_OP_FUNCTION_END 15
#define _CFFI_OP_NOOP 17
#define _CFFI_OP_BITFIELD 19
#define _CFFI_OP_TYPENAME 21
#define _CFFI_OP_CPYTHON_BLTN_V 23 // varargs
#define _CFFI_OP_CPYTHON_BLTN_N 25 // noargs
#define _CFFI_OP_CPYTHON_BLTN_O 27 // O (i.e. a single arg)
#define _CFFI_OP_CONSTANT 29
#define _CFFI_OP_CONSTANT_INT 31
#define _CFFI_OP_GLOBAL_VAR 33
#define _CFFI_OP_DLOPEN_FUNC 35
#define _CFFI_OP_DLOPEN_CONST 37
#define _CFFI_OP_GLOBAL_VAR_F 39
#define _CFFI_OP_EXTERN_PYTHON 41
#define _CFFI_PRIM_VOID 0
#define _CFFI_PRIM_BOOL 1
#define _CFFI_PRIM_CHAR 2
#define _CFFI_PRIM_SCHAR 3
#define _CFFI_PRIM_UCHAR 4
#define _CFFI_PRIM_SHORT 5
#define _CFFI_PRIM_USHORT 6
#define _CFFI_PRIM_INT 7
#define _CFFI_PRIM_UINT 8
#define _CFFI_PRIM_LONG 9
#define _CFFI_PRIM_ULONG 10
#define _CFFI_PRIM_LONGLONG 11
#define _CFFI_PRIM_ULONGLONG 12
#define _CFFI_PRIM_FLOAT 13
#define _CFFI_PRIM_DOUBLE 14
#define _CFFI_PRIM_LONGDOUBLE 15
#define _CFFI_PRIM_WCHAR 16
#define _CFFI_PRIM_INT8 17
#define _CFFI_PRIM_UINT8 18
#define _CFFI_PRIM_INT16 19
#define _CFFI_PRIM_UINT16 20
#define _CFFI_PRIM_INT32 21
#define _CFFI_PRIM_UINT32 22
#define _CFFI_PRIM_INT64 23
#define _CFFI_PRIM_UINT64 24
#define _CFFI_PRIM_INTPTR 25
#define _CFFI_PRIM_UINTPTR 26
#define _CFFI_PRIM_PTRDIFF 27
#define _CFFI_PRIM_SIZE 28
#define _CFFI_PRIM_SSIZE 29
#define _CFFI_PRIM_INT_LEAST8 30
#define _CFFI_PRIM_UINT_LEAST8 31
#define _CFFI_PRIM_INT_LEAST16 32
#define _CFFI_PRIM_UINT_LEAST16 33
#define _CFFI_PRIM_INT_LEAST32 34
#define _CFFI_PRIM_UINT_LEAST32 35
#define _CFFI_PRIM_INT_LEAST64 36
#define _CFFI_PRIM_UINT_LEAST64 37
#define _CFFI_PRIM_INT_FAST8 38
#define _CFFI_PRIM_UINT_FAST8 39
#define _CFFI_PRIM_INT_FAST16 40
#define _CFFI_PRIM_UINT_FAST16 41
#define _CFFI_PRIM_INT_FAST32 42
#define _CFFI_PRIM_UINT_FAST32 43
#define _CFFI_PRIM_INT_FAST64 44
#define _CFFI_PRIM_UINT_FAST64 45
#define _CFFI_PRIM_INTMAX 46
#define _CFFI_PRIM_UINTMAX 47
#define _CFFI_PRIM_FLOATCOMPLEX 48
#define _CFFI_PRIM_DOUBLECOMPLEX 49
#define _CFFI_PRIM_CHAR16 50
#define _CFFI_PRIM_CHAR32 51
#define _CFFI__NUM_PRIM 52
#define _CFFI__UNKNOWN_PRIM (-1)
#define _CFFI__UNKNOWN_FLOAT_PRIM (-2)
#define _CFFI__UNKNOWN_LONG_DOUBLE (-3)
#define _CFFI__IO_FILE_STRUCT (-1)
struct _cffi_global_s {
const char *name;
void *address;
_cffi_opcode_t type_op;
void *size_or_direct_fn; // OP_GLOBAL_VAR: size, or 0 if unknown
// OP_CPYTHON_BLTN_*: addr of direct function
};
struct _cffi_getconst_s {
unsigned long long value;
const struct _cffi_type_context_s *ctx;
int gindex;
};
struct _cffi_struct_union_s {
const char *name;
int type_index; // -> _cffi_types, on a OP_STRUCT_UNION
int flags; // _CFFI_F_* flags below
size_t size;
int alignment;
int first_field_index; // -> _cffi_fields array
int num_fields;
};
#define _CFFI_F_UNION 0x01 // is a union, not a struct
#define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the
// "standard layout" or if some are missing
#define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct
#define _CFFI_F_EXTERNAL 0x08 // in some other ffi.include()
#define _CFFI_F_OPAQUE 0x10 // opaque
struct _cffi_field_s {
const char *name;
size_t field_offset;
size_t field_size;
_cffi_opcode_t field_type_op;
};
struct _cffi_enum_s {
const char *name;
int type_index; // -> _cffi_types, on a OP_ENUM
int type_prim; // _CFFI_PRIM_xxx
const char *enumerators; // comma-delimited string
};
struct _cffi_typename_s {
const char *name;
int type_index; /* if opaque, points to a possibly artificial
OP_STRUCT which is itself opaque */
};
struct _cffi_type_context_s {
_cffi_opcode_t *types;
const struct _cffi_global_s *globals;
const struct _cffi_field_s *fields;
const struct _cffi_struct_union_s *struct_unions;
const struct _cffi_enum_s *enums;
const struct _cffi_typename_s *typenames;
int num_globals;
int num_struct_unions;
int num_enums;
int num_typenames;
const char *const *includes;
int num_types;
int flags; /* future extension */
};
struct _cffi_parse_info_s {
const struct _cffi_type_context_s *ctx;
_cffi_opcode_t *output;
unsigned int output_size;
size_t error_location;
const char *error_message;
};
struct _cffi_externpy_s {
const char *name;
size_t size_of_result;
void *reserved1, *reserved2;
};
#ifdef _CFFI_INTERNAL
static int parse_c_type(struct _cffi_parse_info_s *info, const char *input);
static int search_in_globals(const struct _cffi_type_context_s *ctx,
const char *search, size_t search_len);
static int search_in_struct_unions(const struct _cffi_type_context_s *ctx,
const char *search, size_t search_len);
#endif
| 5,976 | C | 31.840659 | 84 | 0.63002 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/__init__.py | __all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError',
'FFIError']
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
from .error import PkgConfigError
__version__ = "1.15.1"
__version_info__ = (1, 15, 1)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
# if nothing is clearly incompatible.
__version_verifier_modules__ = "0.8.6"
| 513 | Python | 33.266664 | 78 | 0.71345 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/commontypes.py | import sys
from . import model
from .error import FFIError
COMMON_TYPES = {}
try:
# fetch "bool" and all simple Windows types
from _cffi_backend import _get_common_types
_get_common_types(COMMON_TYPES)
except ImportError:
pass
COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE')
COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above
for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
if _type.endswith('_t'):
COMMON_TYPES[_type] = _type
del _type
_CACHE = {}
def resolve_common_type(parser, commontype):
try:
return _CACHE[commontype]
except KeyError:
cdecl = COMMON_TYPES.get(commontype, commontype)
if not isinstance(cdecl, str):
result, quals = cdecl, 0 # cdecl is already a BaseType
elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
result, quals = model.PrimitiveType(cdecl), 0
elif cdecl == 'set-unicode-needed':
raise FFIError("The Windows type %r is only available after "
"you call ffi.set_unicode()" % (commontype,))
else:
if commontype == cdecl:
raise FFIError(
"Unsupported type: %r. Please look at "
"http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations "
"and file an issue if you think this type should really "
"be supported." % (commontype,))
result, quals = parser.parse_type_and_quals(cdecl) # recursive
assert isinstance(result, model.BaseTypeByIdentity)
_CACHE[commontype] = result, quals
return result, quals
# ____________________________________________________________
# extra types for Windows (most of them are in commontypes.c)
def win_common_types():
return {
"UNICODE_STRING": model.StructType(
"_UNICODE_STRING",
["Length",
"MaximumLength",
"Buffer"],
[model.PrimitiveType("unsigned short"),
model.PrimitiveType("unsigned short"),
model.PointerType(model.PrimitiveType("wchar_t"))],
[-1, -1, -1]),
"PUNICODE_STRING": "UNICODE_STRING *",
"PCUNICODE_STRING": "const UNICODE_STRING *",
"TBYTE": "set-unicode-needed",
"TCHAR": "set-unicode-needed",
"LPCTSTR": "set-unicode-needed",
"PCTSTR": "set-unicode-needed",
"LPTSTR": "set-unicode-needed",
"PTSTR": "set-unicode-needed",
"PTBYTE": "set-unicode-needed",
"PTCHAR": "set-unicode-needed",
}
if sys.platform == 'win32':
COMMON_TYPES.update(win_common_types())
| 2,689 | Python | 32.209876 | 78 | 0.57791 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/model.py | import types
import weakref
from .lock import allocate_lock
from .error import CDefError, VerificationError, VerificationMissing
# type qualifiers
Q_CONST = 0x01
Q_RESTRICT = 0x02
Q_VOLATILE = 0x04
def qualify(quals, replace_with):
if quals & Q_CONST:
replace_with = ' const ' + replace_with.lstrip()
if quals & Q_VOLATILE:
replace_with = ' volatile ' + replace_with.lstrip()
if quals & Q_RESTRICT:
# It seems that __restrict is supported by gcc and msvc.
# If you hit some different compiler, add a #define in
# _cffi_include.h for it (and in its copies, documented there)
replace_with = ' __restrict ' + replace_with.lstrip()
return replace_with
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file', quals=0):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
replace_with = qualify(quals, replace_with)
result = result.replace('&', replace_with)
if '$' in result:
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def is_integer_type(self):
return False
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class BasePrimitiveType(BaseType):
def is_complex_type(self):
return False
class PrimitiveType(BasePrimitiveType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'float _Complex': 'j',
'double _Complex': 'j',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'char16_t': 'c',
'char32_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'int_least8_t': 'i',
'uint_least8_t': 'i',
'int_least16_t': 'i',
'uint_least16_t': 'i',
'int_least32_t': 'i',
'uint_least32_t': 'i',
'int_least64_t': 'i',
'uint_least64_t': 'i',
'int_fast8_t': 'i',
'uint_fast8_t': 'i',
'int_fast16_t': 'i',
'uint_fast16_t': 'i',
'int_fast32_t': 'i',
'uint_fast32_t': 'i',
'int_fast64_t': 'i',
'uint_fast64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'intmax_t': 'i',
'uintmax_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def is_complex_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'j'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class UnknownIntegerType(BasePrimitiveType):
_attrs_ = ('name',)
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def is_integer_type(self):
return True
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
class UnknownFloatType(BasePrimitiveType):
_attrs_ = ('name', )
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("float type '%s' can only be used after "
"compilation" % self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis', 'abi')
def __init__(self, args, result, ellipsis, abi=None):
self.args = args
self.result = result
self.ellipsis = ellipsis
self.abi = abi
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
if abi is not None:
replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
raise CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
abi_args = ()
if self.abi == "__stdcall":
if not self.ellipsis: # __stdcall ignored for variadic funcs
try:
abi_args = (ffi._backend.FFI_STDCALL,)
except AttributeError:
pass
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis, *abi_args)
def as_raw_function(self):
return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
class PointerType(BaseType):
_attrs_ = ('totype', 'quals')
def __init__(self, totype, quals=0):
self.totype = totype
self.quals = quals
extra = qualify(quals, " *&")
if totype.is_array_type:
extra = "(%s)" % (extra.lstrip(),)
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
def ConstPointerType(totype):
return PointerType(totype, Q_CONST)
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name, quals=0):
PointerType.__init__(self, totype, quals)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%s]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def length_is_unknown(self):
return isinstance(self.length, str)
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length_is_unknown():
raise CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = 0
partial = False
packed = 0
def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.fldquals = fldquals
self.build_c_name_with_marker()
def anonymous_struct_fields(self):
if self.fldtypes is not None:
for name, type in zip(self.fldnames, self.fldtypes):
if name == '' and isinstance(type, StructOrUnion):
yield type
def enumfields(self, expand_anonymous_struct_union=True):
fldquals = self.fldquals
if fldquals is None:
fldquals = (0,) * len(self.fldnames)
for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
self.fldbitsize, fldquals):
if (name == '' and isinstance(type, StructOrUnion)
and expand_anonymous_struct_union):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize, quals)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
fldquals = []
for name, type, bitsize, quals in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
fldquals.append(quals)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
self.fldquals = tuple(fldquals)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
#
self.completed = 1
#
if self.fldtypes is None:
pass # not completing it: it's an opaque struct
#
elif self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
extra_flags = ()
if self.packed:
if self.packed == 1:
extra_flags = (8,) # SF_PACKED
else:
extra_flags = (0, self.packed)
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, *extra_flags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length_is_unknown():
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
raise VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
raise VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
import warnings
try:
# XXX! The goal is to ensure that the warnings.warn()
# will not suppress the warning. We want to get it
# several times if we reach this point several times.
__warningregistry__.clear()
except NameError:
pass
warnings.warn("%r has no values explicitly defined; "
"guessing that it is equivalent to 'unsigned int'"
% self._get_c_name())
smallest_value = largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
tp.origin = "unknown_type"
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '$$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
_typecache_cffi_backend = weakref.WeakValueDictionary()
def get_typecache(backend):
# returns _typecache_cffi_backend if backend is the _cffi_backend
# module, or type(backend).__typecache if backend is an instance of
# CTypesBackend (or some FakeBackend class during tests)
if isinstance(backend, types.ModuleType):
return _typecache_cffi_backend
with global_lock:
if not hasattr(type(backend), '__typecache'):
type(backend).__typecache = weakref.WeakValueDictionary()
return type(backend).__typecache
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._typecache[key]
except KeyError:
pass
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
| 21,768 | Python | 34.224919 | 79 | 0.543275 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/error.py |
class FFIError(Exception):
__module__ = 'cffi'
class CDefError(Exception):
__module__ = 'cffi'
def __str__(self):
try:
current_decl = self.args[1]
filename = current_decl.coord.file
linenum = current_decl.coord.line
prefix = '%s:%d: ' % (filename, linenum)
except (AttributeError, TypeError, IndexError):
prefix = ''
return '%s%s' % (prefix, self.args[0])
class VerificationError(Exception):
""" An error raised when verification fails
"""
__module__ = 'cffi'
class VerificationMissing(Exception):
""" An error raised when incomplete structures are passed into
cdef, but no verification has been done
"""
__module__ = 'cffi'
class PkgConfigError(Exception):
""" An error raised for missing modules in pkg-config
"""
__module__ = 'cffi'
| 877 | Python | 26.437499 | 66 | 0.596351 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/cffi_opcode.py | from .error import VerificationError
class CffiOp(object):
def __init__(self, op, arg):
self.op = op
self.arg = arg
def as_c_expr(self):
if self.op is None:
assert isinstance(self.arg, str)
return '(_cffi_opcode_t)(%s)' % (self.arg,)
classname = CLASS_NAME[self.op]
return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg)
def as_python_bytes(self):
if self.op is None and self.arg.isdigit():
value = int(self.arg) # non-negative: '-' not in self.arg
if value >= 2**31:
raise OverflowError("cannot emit %r: limited to 2**31-1"
% (self.arg,))
return format_four_bytes(value)
if isinstance(self.arg, str):
raise VerificationError("cannot emit to Python: %r" % (self.arg,))
return format_four_bytes((self.arg << 8) | self.op)
def __str__(self):
classname = CLASS_NAME.get(self.op, self.op)
return '(%s %s)' % (classname, self.arg)
def format_four_bytes(num):
return '\\x%02X\\x%02X\\x%02X\\x%02X' % (
(num >> 24) & 0xFF,
(num >> 16) & 0xFF,
(num >> 8) & 0xFF,
(num ) & 0xFF)
OP_PRIMITIVE = 1
OP_POINTER = 3
OP_ARRAY = 5
OP_OPEN_ARRAY = 7
OP_STRUCT_UNION = 9
OP_ENUM = 11
OP_FUNCTION = 13
OP_FUNCTION_END = 15
OP_NOOP = 17
OP_BITFIELD = 19
OP_TYPENAME = 21
OP_CPYTHON_BLTN_V = 23 # varargs
OP_CPYTHON_BLTN_N = 25 # noargs
OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg)
OP_CONSTANT = 29
OP_CONSTANT_INT = 31
OP_GLOBAL_VAR = 33
OP_DLOPEN_FUNC = 35
OP_DLOPEN_CONST = 37
OP_GLOBAL_VAR_F = 39
OP_EXTERN_PYTHON = 41
PRIM_VOID = 0
PRIM_BOOL = 1
PRIM_CHAR = 2
PRIM_SCHAR = 3
PRIM_UCHAR = 4
PRIM_SHORT = 5
PRIM_USHORT = 6
PRIM_INT = 7
PRIM_UINT = 8
PRIM_LONG = 9
PRIM_ULONG = 10
PRIM_LONGLONG = 11
PRIM_ULONGLONG = 12
PRIM_FLOAT = 13
PRIM_DOUBLE = 14
PRIM_LONGDOUBLE = 15
PRIM_WCHAR = 16
PRIM_INT8 = 17
PRIM_UINT8 = 18
PRIM_INT16 = 19
PRIM_UINT16 = 20
PRIM_INT32 = 21
PRIM_UINT32 = 22
PRIM_INT64 = 23
PRIM_UINT64 = 24
PRIM_INTPTR = 25
PRIM_UINTPTR = 26
PRIM_PTRDIFF = 27
PRIM_SIZE = 28
PRIM_SSIZE = 29
PRIM_INT_LEAST8 = 30
PRIM_UINT_LEAST8 = 31
PRIM_INT_LEAST16 = 32
PRIM_UINT_LEAST16 = 33
PRIM_INT_LEAST32 = 34
PRIM_UINT_LEAST32 = 35
PRIM_INT_LEAST64 = 36
PRIM_UINT_LEAST64 = 37
PRIM_INT_FAST8 = 38
PRIM_UINT_FAST8 = 39
PRIM_INT_FAST16 = 40
PRIM_UINT_FAST16 = 41
PRIM_INT_FAST32 = 42
PRIM_UINT_FAST32 = 43
PRIM_INT_FAST64 = 44
PRIM_UINT_FAST64 = 45
PRIM_INTMAX = 46
PRIM_UINTMAX = 47
PRIM_FLOATCOMPLEX = 48
PRIM_DOUBLECOMPLEX = 49
PRIM_CHAR16 = 50
PRIM_CHAR32 = 51
_NUM_PRIM = 52
_UNKNOWN_PRIM = -1
_UNKNOWN_FLOAT_PRIM = -2
_UNKNOWN_LONG_DOUBLE = -3
_IO_FILE_STRUCT = -1
PRIMITIVE_TO_INDEX = {
'char': PRIM_CHAR,
'short': PRIM_SHORT,
'int': PRIM_INT,
'long': PRIM_LONG,
'long long': PRIM_LONGLONG,
'signed char': PRIM_SCHAR,
'unsigned char': PRIM_UCHAR,
'unsigned short': PRIM_USHORT,
'unsigned int': PRIM_UINT,
'unsigned long': PRIM_ULONG,
'unsigned long long': PRIM_ULONGLONG,
'float': PRIM_FLOAT,
'double': PRIM_DOUBLE,
'long double': PRIM_LONGDOUBLE,
'float _Complex': PRIM_FLOATCOMPLEX,
'double _Complex': PRIM_DOUBLECOMPLEX,
'_Bool': PRIM_BOOL,
'wchar_t': PRIM_WCHAR,
'char16_t': PRIM_CHAR16,
'char32_t': PRIM_CHAR32,
'int8_t': PRIM_INT8,
'uint8_t': PRIM_UINT8,
'int16_t': PRIM_INT16,
'uint16_t': PRIM_UINT16,
'int32_t': PRIM_INT32,
'uint32_t': PRIM_UINT32,
'int64_t': PRIM_INT64,
'uint64_t': PRIM_UINT64,
'intptr_t': PRIM_INTPTR,
'uintptr_t': PRIM_UINTPTR,
'ptrdiff_t': PRIM_PTRDIFF,
'size_t': PRIM_SIZE,
'ssize_t': PRIM_SSIZE,
'int_least8_t': PRIM_INT_LEAST8,
'uint_least8_t': PRIM_UINT_LEAST8,
'int_least16_t': PRIM_INT_LEAST16,
'uint_least16_t': PRIM_UINT_LEAST16,
'int_least32_t': PRIM_INT_LEAST32,
'uint_least32_t': PRIM_UINT_LEAST32,
'int_least64_t': PRIM_INT_LEAST64,
'uint_least64_t': PRIM_UINT_LEAST64,
'int_fast8_t': PRIM_INT_FAST8,
'uint_fast8_t': PRIM_UINT_FAST8,
'int_fast16_t': PRIM_INT_FAST16,
'uint_fast16_t': PRIM_UINT_FAST16,
'int_fast32_t': PRIM_INT_FAST32,
'uint_fast32_t': PRIM_UINT_FAST32,
'int_fast64_t': PRIM_INT_FAST64,
'uint_fast64_t': PRIM_UINT_FAST64,
'intmax_t': PRIM_INTMAX,
'uintmax_t': PRIM_UINTMAX,
}
F_UNION = 0x01
F_CHECK_FIELDS = 0x02
F_PACKED = 0x04
F_EXTERNAL = 0x08
F_OPAQUE = 0x10
G_FLAGS = dict([('_CFFI_' + _key, globals()[_key])
for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED',
'F_EXTERNAL', 'F_OPAQUE']])
CLASS_NAME = {}
for _name, _value in list(globals().items()):
if _name.startswith('OP_') and isinstance(_value, int):
CLASS_NAME[_value] = _name[3:]
| 5,724 | Python | 29.452128 | 78 | 0.513277 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/api.py | import sys, types
from .lock import allocate_lock
from .error import CDefError
from . import model
try:
callable
except NameError:
# Python 3.1
from collections import Callable
callable = lambda x: isinstance(x, Callable)
try:
basestring
except NameError:
# Python 3.x
basestring = str
_unspecified = object()
class FFI(object):
r'''
The main top-level class that you instantiate once, or once per module.
Example usage:
ffi = FFI()
ffi.cdef("""
int printf(const char *, ...);
""")
C = ffi.dlopen(None) # standard library
-or-
C = ffi.verify() # use a C compiler: verify the decl above is right
C.printf("hello, %s!\n", ffi.new("char[]", "world"))
'''
def __init__(self, backend=None):
"""Create an FFI instance. The 'backend' argument is used to
select a non-default backend, mostly for tests.
"""
if backend is None:
# You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with
# _cffi_backend.so compiled.
import _cffi_backend as backend
from . import __version__
if backend.__version__ != __version__:
# bad version! Try to be as explicit as possible.
if hasattr(backend, '__file__'):
# CPython
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % (
__version__, __file__,
backend.__version__, backend.__file__))
else:
# PyPy
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % (
__version__, __file__, backend.__version__))
# (If you insist you can also try to pass the option
# 'backend=backend_ctypes.CTypesBackend()', but don't
# rely on it! It's probably not going to work well.)
from . import cparser
self._backend = backend
self._lock = allocate_lock()
self._parser = cparser.Parser()
self._cached_btypes = {}
self._parsed_types = types.ModuleType('parsed_types').__dict__
self._new_types = types.ModuleType('new_types').__dict__
self._function_caches = []
self._libraries = []
self._cdefsources = []
self._included_ffis = []
self._windows_unicode = None
self._init_once_cache = {}
self._cdef_version = None
self._embedding = None
self._typecache = model.get_typecache(backend)
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
for name in list(backend.__dict__):
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
with self._lock:
self.BVoidP = self._get_cached_btype(model.voidp_type)
self.BCharA = self._get_cached_btype(model.char_array_type)
if isinstance(backend, types.ModuleType):
# _cffi_backend: attach these constants to the class
if not hasattr(FFI, 'NULL'):
FFI.NULL = self.cast(self.BVoidP, 0)
FFI.CData, FFI.CType = backend._get_types()
else:
# ctypes backend: attach these constants to the instance
self.NULL = self.cast(self.BVoidP, 0)
self.CData, self.CType = backend._get_types()
self.buffer = backend.buffer
def cdef(self, csource, override=False, packed=False, pack=None):
"""Parse the given C source. This registers all declared functions,
types, and global variables. The functions and global variables can
then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'.
The types can be used in 'ffi.new()' and other functions.
If 'packed' is specified as True, all structs declared inside this
cdef are packed, i.e. laid out without any field alignment at all.
Alternatively, 'pack' can be a small integer, and requests for
alignment greater than that are ignored (pack=1 is equivalent to
packed=True).
"""
self._cdef(csource, override=override, packed=packed, pack=pack)
def embedding_api(self, csource, packed=False, pack=None):
self._cdef(csource, packed=packed, pack=pack, dllexport=True)
if self._embedding is None:
self._embedding = ''
def _cdef(self, csource, override=False, **options):
if not isinstance(csource, str): # unicode, on Python 2
if not isinstance(csource, basestring):
raise TypeError("cdef() argument must be a string")
csource = csource.encode('ascii')
with self._lock:
self._cdef_version = object()
self._parser.parse(csource, override=override, **options)
self._cdefsources.append(csource)
if override:
for cache in self._function_caches:
cache.clear()
finishlist = self._parser._recomplete
if finishlist:
self._parser._recomplete = []
for tp in finishlist:
tp.finish_backend_type(self, finishlist)
def dlopen(self, name, flags=0):
"""Load and return a dynamic library identified by 'name'.
The standard C library can be loaded by passing None.
Note that functions and types declared by 'ffi.cdef()' are not
linked to a particular library, just like C headers; in the
library we only look for the actual (untyped) symbols.
"""
if not (isinstance(name, basestring) or
name is None or
isinstance(name, self.CData)):
raise TypeError("dlopen(name): name must be a file name, None, "
"or an already-opened 'void *' handle")
with self._lock:
lib, function_cache = _make_ffi_library(self, name, flags)
self._function_caches.append(function_cache)
self._libraries.append(lib)
return lib
def dlclose(self, lib):
"""Close a library obtained with ffi.dlopen(). After this call,
access to functions or variables from the library will fail
(possibly with a segmentation fault).
"""
type(lib).__cffi_close__(lib)
def _typeof_locked(self, cdecl):
# call me with the lock!
key = cdecl
if key in self._parsed_types:
return self._parsed_types[key]
#
if not isinstance(cdecl, str): # unicode, on Python 2
cdecl = cdecl.encode('ascii')
#
type = self._parser.parse_type(cdecl)
really_a_function_type = type.is_raw_function
if really_a_function_type:
type = type.as_function_pointer()
btype = self._get_cached_btype(type)
result = btype, really_a_function_type
self._parsed_types[key] = result
return result
def _typeof(self, cdecl, consider_function_as_funcptr=False):
# string -> ctype object
try:
result = self._parsed_types[cdecl]
except KeyError:
with self._lock:
result = self._typeof_locked(cdecl)
#
btype, really_a_function_type = result
if really_a_function_type and not consider_function_as_funcptr:
raise CDefError("the type %r is a function type, not a "
"pointer-to-function type" % (cdecl,))
return btype
def typeof(self, cdecl):
"""Parse the C type given as a string and return the
corresponding <ctype> object.
It can also be used on 'cdata' instance to get its C type.
"""
if isinstance(cdecl, basestring):
return self._typeof(cdecl)
if isinstance(cdecl, self.CData):
return self._backend.typeof(cdecl)
if isinstance(cdecl, types.BuiltinFunctionType):
res = _builtin_function_type(cdecl)
if res is not None:
return res
if (isinstance(cdecl, types.FunctionType)
and hasattr(cdecl, '_cffi_base_type')):
with self._lock:
return self._get_cached_btype(cdecl._cffi_base_type)
raise TypeError(type(cdecl))
def sizeof(self, cdecl):
"""Return the size in bytes of the argument. It can be a
string naming a C type, or a 'cdata' instance.
"""
if isinstance(cdecl, basestring):
BType = self._typeof(cdecl)
return self._backend.sizeof(BType)
else:
return self._backend.sizeof(cdecl)
def alignof(self, cdecl):
"""Return the natural alignment size in bytes of the C type
given as a string.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.alignof(cdecl)
def offsetof(self, cdecl, *fields_or_indexes):
"""Return the offset of the named field inside the given
structure or array, which must be given as a C type name.
You can give several field names in case of nested structures.
You can also give numeric values which correspond to array
items, in case of an array type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._typeoffsetof(cdecl, *fields_or_indexes)[1]
def new(self, cdecl, init=None):
"""Allocate an instance according to the specified C type and
return a pointer to it. The specified C type must be either a
pointer or an array: ``new('X *')`` allocates an X and returns
a pointer to it, whereas ``new('X[n]')`` allocates an array of
n X'es and returns an array referencing it (which works
mostly like a pointer, like in C). You can also use
``new('X[]', n)`` to allocate an array of a non-constant
length n.
The memory is initialized following the rules of declaring a
global variable in C: by default it is zero-initialized, but
an explicit initializer can be given which can be used to
fill all or part of the memory.
When the returned <cdata> object goes out of scope, the memory
is freed. In other words the returned <cdata> object has
ownership of the value of type 'cdecl' that it points to. This
means that the raw data can be used as long as this object is
kept alive, but must not be used for a longer time. Be careful
about that when copying the pointer to the memory somewhere
else, e.g. into another structure.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.newp(cdecl, init)
def new_allocator(self, alloc=None, free=None,
should_clear_after_alloc=True):
"""Return a new allocator, i.e. a function that behaves like ffi.new()
but uses the provided low-level 'alloc' and 'free' functions.
'alloc' is called with the size as argument. If it returns NULL, a
MemoryError is raised. 'free' is called with the result of 'alloc'
as argument. Both can be either Python function or directly C
functions. If 'free' is None, then no free function is called.
If both 'alloc' and 'free' are None, the default is used.
If 'should_clear_after_alloc' is set to False, then the memory
returned by 'alloc' is assumed to be already cleared (or you are
fine with garbage); otherwise CFFI will clear it.
"""
compiled_ffi = self._backend.FFI()
allocator = compiled_ffi.new_allocator(alloc, free,
should_clear_after_alloc)
def allocate(cdecl, init=None):
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return allocator(cdecl, init)
return allocate
def cast(self, cdecl, source):
"""Similar to a C cast: returns an instance of the named C
type initialized with the given 'source'. The source is
casted between integers or pointers of any type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.cast(cdecl, source)
def string(self, cdata, maxlen=-1):
"""Return a Python string (or unicode string) from the 'cdata'.
If 'cdata' is a pointer or array of characters or bytes, returns
the null-terminated string. The returned string extends until
the first null character, or at most 'maxlen' characters. If
'cdata' is an array then 'maxlen' defaults to its length.
If 'cdata' is a pointer or array of wchar_t, returns a unicode
string following the same rules.
If 'cdata' is a single character or byte or a wchar_t, returns
it as a string or unicode string.
If 'cdata' is an enum, returns the value of the enumerator as a
string, or 'NUMBER' if the value is out of range.
"""
return self._backend.string(cdata, maxlen)
def unpack(self, cdata, length):
"""Unpack an array of C data of the given length,
returning a Python string/unicode/list.
If 'cdata' is a pointer to 'char', returns a byte string.
It does not stop at the first null. This is equivalent to:
ffi.buffer(cdata, length)[:]
If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
'length' is measured in wchar_t's; it is not the size in bytes.
If 'cdata' is a pointer to anything else, returns a list of
'length' items. This is a faster equivalent to:
[cdata[i] for i in range(length)]
"""
return self._backend.unpack(cdata, length)
#def buffer(self, cdata, size=-1):
# """Return a read-write buffer object that references the raw C data
# pointed to by the given 'cdata'. The 'cdata' must be a pointer or
# an array. Can be passed to functions expecting a buffer, or directly
# manipulated with:
#
# buf[:] get a copy of it in a regular string, or
# buf[idx] as a single character
# buf[:] = ...
# buf[idx] = ... change the content
# """
# note that 'buffer' is a type, set on this instance by __init__
def from_buffer(self, cdecl, python_buffer=_unspecified,
require_writable=False):
"""Return a cdata of the given type pointing to the data of the
given Python object, which must support the buffer interface.
Note that this is not meant to be used on the built-in types
str or unicode (you can build 'char[]' arrays explicitly)
but only on objects containing large quantities of raw data
in some other format, like 'array.array' or numpy arrays.
The first argument is optional and default to 'char[]'.
"""
if python_buffer is _unspecified:
cdecl, python_buffer = self.BCharA, cdecl
elif isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.from_buffer(cdecl, python_buffer,
require_writable)
def memmove(self, dest, src, n):
"""ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.
Like the C function memmove(), the memory areas may overlap;
apart from that it behaves like the C function memcpy().
'src' can be any cdata ptr or array, or any Python buffer object.
'dest' can be any cdata ptr or array, or a writable Python buffer
object. The size to copy, 'n', is always measured in bytes.
Unlike other methods, this one supports all Python buffer including
byte strings and bytearrays---but it still does not support
non-contiguous buffers.
"""
return self._backend.memmove(dest, src, n)
def callback(self, cdecl, python_callable=None, error=None, onerror=None):
"""Return a callback object or a decorator making such a
callback object. 'cdecl' must name a C function pointer type.
The callback invokes the specified 'python_callable' (which may
be provided either directly or via a decorator). Important: the
callback object must be manually kept alive for as long as the
callback may be invoked from the C level.
"""
def callback_decorator_wrap(python_callable):
if not callable(python_callable):
raise TypeError("the 'python_callable' argument "
"is not callable")
return self._backend.callback(cdecl, python_callable,
error, onerror)
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl, consider_function_as_funcptr=True)
if python_callable is None:
return callback_decorator_wrap # decorator mode
else:
return callback_decorator_wrap(python_callable) # direct mode
def getctype(self, cdecl, replace_with=''):
"""Return a string giving the C type 'cdecl', which may be itself
a string or a <ctype> object. If 'replace_with' is given, it gives
extra text to append (or insert for more complicated C types), like
a variable name, or '*' to get actually the C type 'pointer-to-cdecl'.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
replace_with = replace_with.strip()
if (replace_with.startswith('*')
and '&[' in self._backend.getcname(cdecl, '&')):
replace_with = '(%s)' % replace_with
elif replace_with and not replace_with[0] in '[(':
replace_with = ' ' + replace_with
return self._backend.getcname(cdecl, replace_with)
def gc(self, cdata, destructor, size=0):
"""Return a new cdata object that points to the same
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
The optional 'size' gives an estimate of the size, used to
trigger the garbage collection more eagerly. So far only used
on PyPy. It tells the GC that the returned object keeps alive
roughly 'size' bytes of external memory.
"""
return self._backend.gcp(cdata, destructor, size)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
# call me with the lock!
try:
BType = self._cached_btypes[type]
except KeyError:
finishlist = []
BType = type.get_cached_btype(self, finishlist)
for type in finishlist:
type.finish_backend_type(self, finishlist)
return BType
def verify(self, source='', tmpdir=None, **kwargs):
"""Verify that the current ffi signatures compile on this
machine, and return a dynamic library object. The dynamic
library can be used to call functions and access global
variables declared in this 'ffi'. The library is compiled
by the C compiler: it gives you C-level API compatibility
(including calling macros). This is unlike 'ffi.dlopen()',
which requires binary compatibility in the signatures.
"""
from .verifier import Verifier, _caller_dir_pycache
#
# If set_unicode(True) was called, insert the UNICODE and
# _UNICODE macro declarations
if self._windows_unicode:
self._apply_windows_unicode(kwargs)
#
# Set the tmpdir here, and not in Verifier.__init__: it picks
# up the caller's directory, which we want to be the caller of
# ffi.verify(), as opposed to the caller of Veritier().
tmpdir = tmpdir or _caller_dir_pycache()
#
# Make a Verifier() and use it to load the library.
self.verifier = Verifier(self, source, tmpdir, **kwargs)
lib = self.verifier.load_library()
#
# Save the loaded library for keep-alive purposes, even
# if the caller doesn't keep it alive itself (it should).
self._libraries.append(lib)
return lib
def _get_errno(self):
return self._backend.get_errno()
def _set_errno(self, errno):
self._backend.set_errno(errno)
errno = property(_get_errno, _set_errno, None,
"the value of 'errno' from/to the C calls")
def getwinerror(self, code=-1):
return self._backend.getwinerror(code)
def _pointer_to(self, ctype):
with self._lock:
return model.pointer_cache(self, ctype)
def addressof(self, cdata, *fields_or_indexes):
"""Return the address of a <cdata 'struct-or-union'>.
If 'fields_or_indexes' are given, returns the address of that
field or array item in the structure or array, recursively in
case of nested structures.
"""
try:
ctype = self._backend.typeof(cdata)
except TypeError:
if '__addressof__' in type(cdata).__dict__:
return type(cdata).__addressof__(cdata, *fields_or_indexes)
raise
if fields_or_indexes:
ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes)
else:
if ctype.kind == "pointer":
raise TypeError("addressof(pointer)")
offset = 0
ctypeptr = self._pointer_to(ctype)
return self._backend.rawaddressof(ctypeptr, cdata, offset)
def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes):
ctype, offset = self._backend.typeoffsetof(ctype, field_or_index)
for field1 in fields_or_indexes:
ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1)
offset += offset1
return ctype, offset
def include(self, ffi_to_include):
"""Includes the typedefs, structs, unions and enums defined
in another FFI instance. Usage is similar to a #include in C,
where a part of the program might include types defined in
another part for its own usage. Note that the include()
method has no effect on functions, constants and global
variables, which must anyway be accessed directly from the
lib object returned by the original FFI instance.
"""
if not isinstance(ffi_to_include, FFI):
raise TypeError("ffi.include() expects an argument that is also of"
" type cffi.FFI, not %r" % (
type(ffi_to_include).__name__,))
if ffi_to_include is self:
raise ValueError("self.include(self)")
with ffi_to_include._lock:
with self._lock:
self._parser.include(ffi_to_include._parser)
self._cdefsources.append('[')
self._cdefsources.extend(ffi_to_include._cdefsources)
self._cdefsources.append(']')
self._included_ffis.append(ffi_to_include)
def new_handle(self, x):
return self._backend.newp_handle(self.BVoidP, x)
def from_handle(self, x):
return self._backend.from_handle(x)
def release(self, x):
self._backend.release(x)
def set_unicode(self, enabled_flag):
"""Windows: if 'enabled_flag' is True, enable the UNICODE and
_UNICODE defines in C, and declare the types like TCHAR and LPTCSTR
to be (pointers to) wchar_t. If 'enabled_flag' is False,
declare these types to be (pointers to) plain 8-bit characters.
This is mostly for backward compatibility; you usually want True.
"""
if self._windows_unicode is not None:
raise ValueError("set_unicode() can only be called once")
enabled_flag = bool(enabled_flag)
if enabled_flag:
self.cdef("typedef wchar_t TBYTE;"
"typedef wchar_t TCHAR;"
"typedef const wchar_t *LPCTSTR;"
"typedef const wchar_t *PCTSTR;"
"typedef wchar_t *LPTSTR;"
"typedef wchar_t *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
else:
self.cdef("typedef char TBYTE;"
"typedef char TCHAR;"
"typedef const char *LPCTSTR;"
"typedef const char *PCTSTR;"
"typedef char *LPTSTR;"
"typedef char *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
self._windows_unicode = enabled_flag
def _apply_windows_unicode(self, kwds):
defmacros = kwds.get('define_macros', ())
if not isinstance(defmacros, (list, tuple)):
raise TypeError("'define_macros' must be a list or tuple")
defmacros = list(defmacros) + [('UNICODE', '1'),
('_UNICODE', '1')]
kwds['define_macros'] = defmacros
def _apply_embedding_fix(self, kwds):
# must include an argument like "-lpython2.7" for the compiler
def ensure(key, value):
lst = kwds.setdefault(key, [])
if value not in lst:
lst.append(value)
#
if '__pypy__' in sys.builtin_module_names:
import os
if sys.platform == "win32":
# we need 'libpypy-c.lib'. Current distributions of
# pypy (>= 4.1) contain it as 'libs/python27.lib'.
pythonlib = "python{0[0]}{0[1]}".format(sys.version_info)
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'libs'))
else:
# we need 'libpypy-c.{so,dylib}', which should be by
# default located in 'sys.prefix/bin' for installed
# systems.
if sys.version_info < (3,):
pythonlib = "pypy-c"
else:
pythonlib = "pypy3-c"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'bin'))
# On uninstalled pypy's, the libpypy-c is typically found in
# .../pypy/goal/.
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal'))
else:
if sys.platform == "win32":
template = "python%d%d"
if hasattr(sys, 'gettotalrefcount'):
template += '_d'
else:
try:
import sysconfig
except ImportError: # 2.6
from distutils import sysconfig
template = "python%d.%d"
if sysconfig.get_config_var('DEBUG_EXT'):
template += sysconfig.get_config_var('DEBUG_EXT')
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
if hasattr(sys, 'abiflags'):
pythonlib += sys.abiflags
ensure('libraries', pythonlib)
if sys.platform == "win32":
ensure('extra_link_args', '/MANIFEST')
def set_source(self, module_name, source, source_extension='.c', **kwds):
import os
if hasattr(self, '_assigned_source'):
raise ValueError("set_source() cannot be called several times "
"per ffi object")
if not isinstance(module_name, basestring):
raise TypeError("'module_name' must be a string")
if os.sep in module_name or (os.altsep and os.altsep in module_name):
raise ValueError("'module_name' must not contain '/': use a dotted "
"name to make a 'package.module' location")
self._assigned_source = (str(module_name), source,
source_extension, kwds)
def set_source_pkgconfig(self, module_name, pkgconfig_libs, source,
source_extension='.c', **kwds):
from . import pkgconfig
if not isinstance(pkgconfig_libs, list):
raise TypeError("the pkgconfig_libs argument must be a list "
"of package names")
kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs)
pkgconfig.merge_flags(kwds, kwds2)
self.set_source(module_name, source, source_extension, **kwds)
def distutils_extension(self, tmpdir='build', verbose=True):
from distutils.dir_util import mkpath
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored
return self.verifier.get_extension()
raise ValueError("set_source() must be called before"
" distutils_extension()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("distutils_extension() is only for C extension "
"modules, not for dlopen()-style pure Python "
"modules")
mkpath(tmpdir)
ext, updated = recompile(self, module_name,
source, tmpdir=tmpdir, extradir=tmpdir,
source_extension=source_extension,
call_c_compiler=False, **kwds)
if verbose:
if updated:
sys.stderr.write("regenerated: %r\n" % (ext.sources[0],))
else:
sys.stderr.write("not modified: %r\n" % (ext.sources[0],))
return ext
def emit_c_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("emit_c_code() is only for C extension modules, "
"not for dlopen()-style pure Python modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def emit_python_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is not None:
raise TypeError("emit_python_code() is only for dlopen()-style "
"pure Python modules, not for C extension modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
"""The 'target' argument gives the final file name of the
compiled DLL. Use '*' to force distutils' choice, suitable for
regular CPython C API modules. Use a file name ending in '.*'
to ask for the system's default extension for dynamic libraries
(.so/.dll/.dylib).
The default is '*' when building a non-embedded C API extension,
and (module_name + '.*') when building an embedded library.
"""
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before compile()")
module_name, source, source_extension, kwds = self._assigned_source
return recompile(self, module_name, source, tmpdir=tmpdir,
target=target, source_extension=source_extension,
compiler_verbose=verbose, debug=debug, **kwds)
def init_once(self, func, tag):
# Read _init_once_cache[tag], which is either (False, lock) if
# we're calling the function now in some thread, or (True, result).
# Don't call setdefault() in most cases, to avoid allocating and
# immediately freeing a lock; but still use setdefaut() to avoid
# races.
try:
x = self._init_once_cache[tag]
except KeyError:
x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
# Common case: we got (True, result), so we return the result.
if x[0]:
return x[1]
# Else, it's a lock. Acquire it to serialize the following tests.
with x[1]:
# Read again from _init_once_cache the current status.
x = self._init_once_cache[tag]
if x[0]:
return x[1]
# Call the function and store the result back.
result = func()
self._init_once_cache[tag] = (True, result)
return result
def embedding_init_code(self, pysource):
if self._embedding:
raise ValueError("embedding_init_code() can only be called once")
# fix 'pysource' before it gets dumped into the C file:
# - remove empty lines at the beginning, so it starts at "line 1"
# - dedent, if all non-empty lines are indented
# - check for SyntaxErrors
import re
match = re.match(r'\s*\n', pysource)
if match:
pysource = pysource[match.end():]
lines = pysource.splitlines() or ['']
prefix = re.match(r'\s*', lines[0]).group()
for i in range(1, len(lines)):
line = lines[i]
if line.rstrip():
while not line.startswith(prefix):
prefix = prefix[:-1]
i = len(prefix)
lines = [line[i:]+'\n' for line in lines]
pysource = ''.join(lines)
#
compile(pysource, "cffi_init", "exec")
#
self._embedding = pysource
def def_extern(self, *args, **kwds):
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
def list_types(self):
"""Returns the user type names known to this FFI instance.
This returns a tuple containing three lists of names:
(typedef_names, names_of_structs, names_of_unions)
"""
typedefs = []
structs = []
unions = []
for key in self._parser._declarations:
if key.startswith('typedef '):
typedefs.append(key[8:])
elif key.startswith('struct '):
structs.append(key[7:])
elif key.startswith('union '):
unions.append(key[6:])
typedefs.sort()
structs.sort()
unions.sort()
return (typedefs, structs, unions)
def _load_backend_lib(backend, name, flags):
import os
if not isinstance(name, basestring):
if sys.platform != "win32" or name is not None:
return backend.load_library(name, flags)
name = "c" # Windows: load_library(None) fails, but this works
# on Python 2 (backward compatibility hack only)
first_error = None
if '.' in name or '/' in name or os.sep in name:
try:
return backend.load_library(name, flags)
except OSError as e:
first_error = e
import ctypes.util
path = ctypes.util.find_library(name)
if path is None:
if name == "c" and sys.platform == "win32" and sys.version_info >= (3,):
raise OSError("dlopen(None) cannot work on Windows for Python 3 "
"(see http://bugs.python.org/issue23606)")
msg = ("ctypes.util.find_library() did not manage "
"to locate a library called %r" % (name,))
if first_error is not None:
msg = "%s. Additionally, %s" % (first_error, msg)
raise OSError(msg)
return backend.load_library(path, flags)
def _make_ffi_library(ffi, libname, flags):
backend = ffi._backend
backendlib = _load_backend_lib(backend, libname, flags)
#
def accessor_function(name):
key = 'function ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
value = backendlib.load_function(BType, name)
library.__dict__[name] = value
#
def accessor_variable(name):
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
read_variable = backendlib.read_variable
write_variable = backendlib.write_variable
setattr(FFILibrary, name, property(
lambda self: read_variable(BType, name),
lambda self, value: write_variable(BType, name, value)))
#
def addressof_var(name):
try:
return addr_variables[name]
except KeyError:
with ffi._lock:
if name not in addr_variables:
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
if BType.kind != 'array':
BType = model.pointer_cache(ffi, BType)
p = backendlib.load_function(BType, name)
addr_variables[name] = p
return addr_variables[name]
#
def accessor_constant(name):
raise NotImplementedError("non-integer constant '%s' cannot be "
"accessed from a dlopen() library" % (name,))
#
def accessor_int_constant(name):
library.__dict__[name] = ffi._parser._int_constants[name]
#
accessors = {}
accessors_version = [False]
addr_variables = {}
#
def update_accessors():
if accessors_version[0] is ffi._cdef_version:
return
#
for key, (tp, _) in ffi._parser._declarations.items():
if not isinstance(tp, model.EnumType):
tag, name = key.split(' ', 1)
if tag == 'function':
accessors[name] = accessor_function
elif tag == 'variable':
accessors[name] = accessor_variable
elif tag == 'constant':
accessors[name] = accessor_constant
else:
for i, enumname in enumerate(tp.enumerators):
def accessor_enum(name, tp=tp, i=i):
tp.check_not_partial()
library.__dict__[name] = tp.enumvalues[i]
accessors[enumname] = accessor_enum
for name in ffi._parser._int_constants:
accessors.setdefault(name, accessor_int_constant)
accessors_version[0] = ffi._cdef_version
#
def make_accessor(name):
with ffi._lock:
if name in library.__dict__ or name in FFILibrary.__dict__:
return # added by another thread while waiting for the lock
if name not in accessors:
update_accessors()
if name not in accessors:
raise AttributeError(name)
accessors[name](name)
#
class FFILibrary(object):
def __getattr__(self, name):
make_accessor(name)
return getattr(self, name)
def __setattr__(self, name, value):
try:
property = getattr(self.__class__, name)
except AttributeError:
make_accessor(name)
setattr(self, name, value)
else:
property.__set__(self, value)
def __dir__(self):
with ffi._lock:
update_accessors()
return accessors.keys()
def __addressof__(self, name):
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
make_accessor(name)
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
raise AttributeError("cffi library has no function or "
"global variable named '%s'" % (name,))
def __cffi_close__(self):
backendlib.close_lib()
self.__dict__.clear()
#
if isinstance(libname, basestring):
try:
if not isinstance(libname, str): # unicode, on Python 2
libname = libname.encode('utf-8')
FFILibrary.__name__ = 'FFILibrary_%s' % libname
except UnicodeError:
pass
library = FFILibrary()
return library, library.__dict__
def _builtin_function_type(func):
# a hack to make at least ffi.typeof(builtin_function) work,
# if the builtin function was obtained by 'vengine_cpy'.
import sys
try:
module = sys.modules[func.__module__]
ffi = module._cffi_original_ffi
types_of_builtin_funcs = module._cffi_types_of_builtin_funcs
tp = types_of_builtin_funcs[func]
except (KeyError, AttributeError, TypeError):
return None
else:
with ffi._lock:
return ffi._get_cached_btype(tp)
| 42,064 | Python | 42.545549 | 271 | 0.574315 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/setuptools_ext.py | import os
import sys
try:
basestring
except NameError:
# Python 3.x
basestring = str
def error(msg):
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(msg)
def execfile(filename, glob):
# We use execfile() (here rewritten for Python 3) instead of
# __import__() to load the build script. The problem with
# a normal import is that in some packages, the intermediate
# __init__.py files may already try to import the file that
# we are generating.
with open(filename) as f:
src = f.read()
src += '\n' # Python 2.6 compatibility
code = compile(src, filename, 'exec')
exec(code, glob, glob)
def add_cffi_module(dist, mod_spec):
from cffi.api import FFI
if not isinstance(mod_spec, basestring):
error("argument to 'cffi_modules=...' must be a str or a list of str,"
" not %r" % (type(mod_spec).__name__,))
mod_spec = str(mod_spec)
try:
build_file_name, ffi_var_name = mod_spec.split(':')
except ValueError:
error("%r must be of the form 'path/build.py:ffi_variable'" %
(mod_spec,))
if not os.path.exists(build_file_name):
ext = ''
rewritten = build_file_name.replace('.', '/') + '.py'
if os.path.exists(rewritten):
ext = ' (rewrite cffi_modules to [%r])' % (
rewritten + ':' + ffi_var_name,)
error("%r does not name an existing file%s" % (build_file_name, ext))
mod_vars = {'__name__': '__cffi__', '__file__': build_file_name}
execfile(build_file_name, mod_vars)
try:
ffi = mod_vars[ffi_var_name]
except KeyError:
error("%r: object %r not found in module" % (mod_spec,
ffi_var_name))
if not isinstance(ffi, FFI):
ffi = ffi() # maybe it's a function instead of directly an ffi
if not isinstance(ffi, FFI):
error("%r is not an FFI instance (got %r)" % (mod_spec,
type(ffi).__name__))
if not hasattr(ffi, '_assigned_source'):
error("%r: the set_source() method was not called" % (mod_spec,))
module_name, source, source_extension, kwds = ffi._assigned_source
if ffi._windows_unicode:
kwds = kwds.copy()
ffi._apply_windows_unicode(kwds)
if source is None:
_add_py_module(dist, ffi, module_name)
else:
_add_c_module(dist, ffi, module_name, source, source_extension, kwds)
def _set_py_limited_api(Extension, kwds):
"""
Add py_limited_api to kwds if setuptools >= 26 is in use.
Do not alter the setting if it already exists.
Setuptools takes care of ignoring the flag on Python 2 and PyPy.
CPython itself should ignore the flag in a debugging version
(by not listing .abi3.so in the extensions it supports), but
it doesn't so far, creating troubles. That's why we check
for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent
of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401)
On Windows, with CPython <= 3.4, it's better not to use py_limited_api
because virtualenv *still* doesn't copy PYTHON3.DLL on these versions.
Recently (2020) we started shipping only >= 3.5 wheels, though. So
we'll give it another try and set py_limited_api on Windows >= 3.5.
"""
from cffi import recompiler
if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount')
and recompiler.USE_LIMITED_API):
import setuptools
try:
setuptools_major_version = int(setuptools.__version__.partition('.')[0])
if setuptools_major_version >= 26:
kwds['py_limited_api'] = True
except ValueError: # certain development versions of setuptools
# If we don't know the version number of setuptools, we
# try to set 'py_limited_api' anyway. At worst, we get a
# warning.
kwds['py_limited_api'] = True
return kwds
def _add_c_module(dist, ffi, module_name, source, source_extension, kwds):
from distutils.core import Extension
# We are a setuptools extension. Need this build_ext for py_limited_api.
from setuptools.command.build_ext import build_ext
from distutils.dir_util import mkpath
from distutils import log
from cffi import recompiler
allsources = ['$PLACEHOLDER']
allsources.extend(kwds.pop('sources', []))
kwds = _set_py_limited_api(Extension, kwds)
ext = Extension(name=module_name, sources=allsources, **kwds)
def make_mod(tmpdir, pre_run=None):
c_file = os.path.join(tmpdir, module_name + source_extension)
log.info("generating cffi module %r" % c_file)
mkpath(tmpdir)
# a setuptools-only, API-only hook: called with the "ext" and "ffi"
# arguments just before we turn the ffi into C code. To use it,
# subclass the 'distutils.command.build_ext.build_ext' class and
# add a method 'def pre_run(self, ext, ffi)'.
if pre_run is not None:
pre_run(ext, ffi)
updated = recompiler.make_c_source(ffi, module_name, source, c_file)
if not updated:
log.info("already up-to-date")
return c_file
if dist.ext_modules is None:
dist.ext_modules = []
dist.ext_modules.append(ext)
base_class = dist.cmdclass.get('build_ext', build_ext)
class build_ext_make_mod(base_class):
def run(self):
if ext.sources[0] == '$PLACEHOLDER':
pre_run = getattr(self, 'pre_run', None)
ext.sources[0] = make_mod(self.build_temp, pre_run)
base_class.run(self)
dist.cmdclass['build_ext'] = build_ext_make_mod
# NB. multiple runs here will create multiple 'build_ext_make_mod'
# classes. Even in this case the 'build_ext' command should be
# run once; but just in case, the logic above does nothing if
# called again.
def _add_py_module(dist, ffi, module_name):
from distutils.dir_util import mkpath
from setuptools.command.build_py import build_py
from setuptools.command.build_ext import build_ext
from distutils import log
from cffi import recompiler
def generate_mod(py_file):
log.info("generating cffi module %r" % py_file)
mkpath(os.path.dirname(py_file))
updated = recompiler.make_py_source(ffi, module_name, py_file)
if not updated:
log.info("already up-to-date")
base_class = dist.cmdclass.get('build_py', build_py)
class build_py_make_mod(base_class):
def run(self):
base_class.run(self)
module_path = module_name.split('.')
module_path[-1] += '.py'
generate_mod(os.path.join(self.build_lib, *module_path))
def get_source_files(self):
# This is called from 'setup.py sdist' only. Exclude
# the generate .py module in this case.
saved_py_modules = self.py_modules
try:
if saved_py_modules:
self.py_modules = [m for m in saved_py_modules
if m != module_name]
return base_class.get_source_files(self)
finally:
self.py_modules = saved_py_modules
dist.cmdclass['build_py'] = build_py_make_mod
# distutils and setuptools have no notion I could find of a
# generated python module. If we don't add module_name to
# dist.py_modules, then things mostly work but there are some
# combination of options (--root and --record) that will miss
# the module. So we add it here, which gives a few apparently
# harmless warnings about not finding the file outside the
# build directory.
# Then we need to hack more in get_source_files(); see above.
if dist.py_modules is None:
dist.py_modules = []
dist.py_modules.append(module_name)
# the following is only for "build_ext -i"
base_class_2 = dist.cmdclass.get('build_ext', build_ext)
class build_ext_make_mod(base_class_2):
def run(self):
base_class_2.run(self)
if self.inplace:
# from get_ext_fullpath() in distutils/command/build_ext.py
module_path = module_name.split('.')
package = '.'.join(module_path[:-1])
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
file_name = module_path[-1] + '.py'
generate_mod(os.path.join(package_dir, file_name))
dist.cmdclass['build_ext'] = build_ext_make_mod
def cffi_modules(dist, attr, value):
assert attr == 'cffi_modules'
if isinstance(value, basestring):
value = [value]
for cffi_module in value:
add_cffi_module(dist, cffi_module)
| 8,931 | Python | 39.6 | 84 | 0.610122 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/_cffi_errors.h | #ifndef CFFI_MESSAGEBOX
# ifdef _MSC_VER
# define CFFI_MESSAGEBOX 1
# else
# define CFFI_MESSAGEBOX 0
# endif
#endif
#if CFFI_MESSAGEBOX
/* Windows only: logic to take the Python-CFFI embedding logic
initialization errors and display them in a background thread
with MessageBox. The idea is that if the whole program closes
as a result of this problem, then likely it is already a console
program and you can read the stderr output in the console too.
If it is not a console program, then it will likely show its own
dialog to complain, or generally not abruptly close, and for this
case the background thread should stay alive.
*/
static void *volatile _cffi_bootstrap_text;
static PyObject *_cffi_start_error_capture(void)
{
PyObject *result = NULL;
PyObject *x, *m, *bi;
if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text,
(void *)1, NULL) != NULL)
return (PyObject *)1;
m = PyImport_AddModule("_cffi_error_capture");
if (m == NULL)
goto error;
result = PyModule_GetDict(m);
if (result == NULL)
goto error;
#if PY_MAJOR_VERSION >= 3
bi = PyImport_ImportModule("builtins");
#else
bi = PyImport_ImportModule("__builtin__");
#endif
if (bi == NULL)
goto error;
PyDict_SetItemString(result, "__builtins__", bi);
Py_DECREF(bi);
x = PyRun_String(
"import sys\n"
"class FileLike:\n"
" def write(self, x):\n"
" try:\n"
" of.write(x)\n"
" except: pass\n"
" self.buf += x\n"
" def flush(self):\n"
" pass\n"
"fl = FileLike()\n"
"fl.buf = ''\n"
"of = sys.stderr\n"
"sys.stderr = fl\n"
"def done():\n"
" sys.stderr = of\n"
" return fl.buf\n", /* make sure the returned value stays alive */
Py_file_input,
result, result);
Py_XDECREF(x);
error:
if (PyErr_Occurred())
{
PyErr_WriteUnraisable(Py_None);
PyErr_Clear();
}
return result;
}
#pragma comment(lib, "user32.lib")
static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored)
{
Sleep(666); /* may be interrupted if the whole process is closing */
#if PY_MAJOR_VERSION >= 3
MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text,
L"Python-CFFI error",
MB_OK | MB_ICONERROR);
#else
MessageBoxA(NULL, (char *)_cffi_bootstrap_text,
"Python-CFFI error",
MB_OK | MB_ICONERROR);
#endif
_cffi_bootstrap_text = NULL;
return 0;
}
static void _cffi_stop_error_capture(PyObject *ecap)
{
PyObject *s;
void *text;
if (ecap == (PyObject *)1)
return;
if (ecap == NULL)
goto error;
s = PyRun_String("done()", Py_eval_input, ecap, ecap);
if (s == NULL)
goto error;
/* Show a dialog box, but in a background thread, and
never show multiple dialog boxes at once. */
#if PY_MAJOR_VERSION >= 3
text = PyUnicode_AsWideCharString(s, NULL);
#else
text = PyString_AsString(s);
#endif
_cffi_bootstrap_text = text;
if (text != NULL)
{
HANDLE h;
h = CreateThread(NULL, 0, _cffi_bootstrap_dialog,
NULL, 0, NULL);
if (h != NULL)
CloseHandle(h);
}
/* decref the string, but it should stay alive as 'fl.buf'
in the small module above. It will really be freed only if
we later get another similar error. So it's a leak of at
most one copy of the small module. That's fine for this
situation which is usually a "fatal error" anyway. */
Py_DECREF(s);
PyErr_Clear();
return;
error:
_cffi_bootstrap_text = NULL;
PyErr_Clear();
}
#else
static PyObject *_cffi_start_error_capture(void) { return NULL; }
static void _cffi_stop_error_capture(PyObject *ecap) { }
#endif
| 3,908 | C | 25.06 | 77 | 0.594933 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/lock.py | import sys
if sys.version_info < (3,):
try:
from thread import allocate_lock
except ImportError:
from dummy_thread import allocate_lock
else:
try:
from _thread import allocate_lock
except ImportError:
from _dummy_thread import allocate_lock
##import sys
##l1 = allocate_lock
##class allocate_lock(object):
## def __init__(self):
## self._real = l1()
## def __enter__(self):
## for i in range(4, 0, -1):
## print sys._getframe(i).f_code
## print
## return self._real.__enter__()
## def __exit__(self, *args):
## return self._real.__exit__(*args)
## def acquire(self, f):
## assert f is False
## return self._real.acquire(f)
| 747 | Python | 23.129032 | 47 | 0.56091 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/ffiplatform.py | import sys, os
from .error import VerificationError
LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs',
'extra_objects', 'depends']
def get_extension(srcfilename, modname, sources=(), **kwds):
_hack_at_distutils()
from distutils.core import Extension
allsources = [srcfilename]
for src in sources:
allsources.append(os.path.normpath(src))
return Extension(name=modname, sources=allsources, **kwds)
def compile(tmpdir, ext, compiler_verbose=0, debug=None):
"""Compile a C extension module using distutils."""
_hack_at_distutils()
saved_environ = os.environ.copy()
try:
outputfilename = _build(tmpdir, ext, compiler_verbose, debug)
outputfilename = os.path.abspath(outputfilename)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
def _build(tmpdir, ext, compiler_verbose=0, debug=None):
# XXX compact but horrible :-(
from distutils.core import Distribution
import distutils.errors, distutils.log
#
dist = Distribution({'ext_modules': [ext]})
dist.parse_config_files()
options = dist.get_option_dict('build_ext')
if debug is None:
debug = sys.flags.debug
options['debug'] = ('ffiplatform', debug)
options['force'] = ('ffiplatform', True)
options['build_lib'] = ('ffiplatform', tmpdir)
options['build_temp'] = ('ffiplatform', tmpdir)
#
try:
old_level = distutils.log.set_threshold(0) or 0
try:
distutils.log.set_verbosity(compiler_verbose)
dist.run_command('build_ext')
cmd_obj = dist.get_command_obj('build_ext')
[soname] = cmd_obj.get_outputs()
finally:
distutils.log.set_threshold(old_level)
except (distutils.errors.CompileError,
distutils.errors.LinkError) as e:
raise VerificationError('%s: %s' % (e.__class__.__name__, e))
#
return soname
try:
from os.path import samefile
except ImportError:
def samefile(f1, f2):
return os.path.abspath(f1) == os.path.abspath(f2)
def maybe_relative_path(path):
if not os.path.isabs(path):
return path # already relative
dir = path
names = []
while True:
prevdir = dir
dir, name = os.path.split(prevdir)
if dir == prevdir or not dir:
return path # failed to make it relative
names.append(name)
try:
if samefile(dir, os.curdir):
names.reverse()
return os.path.join(*names)
except OSError:
pass
# ____________________________________________________________
try:
int_or_long = (int, long)
import cStringIO
except NameError:
int_or_long = int # Python 3
import io as cStringIO
def _flatten(x, f):
if isinstance(x, str):
f.write('%ds%s' % (len(x), x))
elif isinstance(x, dict):
keys = sorted(x.keys())
f.write('%dd' % len(keys))
for key in keys:
_flatten(key, f)
_flatten(x[key], f)
elif isinstance(x, (list, tuple)):
f.write('%dl' % len(x))
for value in x:
_flatten(value, f)
elif isinstance(x, int_or_long):
f.write('%di' % (x,))
else:
raise TypeError(
"the keywords to verify() contains unsupported object %r" % (x,))
def flatten(x):
f = cStringIO.StringIO()
_flatten(x, f)
return f.getvalue()
def _hack_at_distutils():
# Windows-only workaround for some configurations: see
# https://bugs.python.org/issue23246 (Python 2.7 with
# a specific MS compiler suite download)
if sys.platform == "win32":
try:
import setuptools # for side-effects, patches distutils
except ImportError:
pass
| 4,046 | Python | 30.617187 | 77 | 0.587988 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/_embedding.h |
/***** Support code for embedding *****/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_WIN32)
# define CFFI_DLLEXPORT __declspec(dllexport)
#elif defined(__GNUC__)
# define CFFI_DLLEXPORT __attribute__((visibility("default")))
#else
# define CFFI_DLLEXPORT /* nothing */
#endif
/* There are two global variables of type _cffi_call_python_fnptr:
* _cffi_call_python, which we declare just below, is the one called
by ``extern "Python"`` implementations.
* _cffi_call_python_org, which on CPython is actually part of the
_cffi_exports[] array, is the function pointer copied from
_cffi_backend. If _cffi_start_python() fails, then this is set
to NULL; otherwise, it should never be NULL.
After initialization is complete, both are equal. However, the
first one remains equal to &_cffi_start_and_call_python until the
very end of initialization, when we are (or should be) sure that
concurrent threads also see a completely initialized world, and
only then is it changed.
*/
#undef _cffi_call_python
typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *);
static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *);
static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python;
#ifndef _MSC_VER
/* --- Assuming a GCC not infinitely old --- */
# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n)
# define cffi_write_barrier() __sync_synchronize()
# if !defined(__amd64__) && !defined(__x86_64__) && \
!defined(__i386__) && !defined(__i386)
# define cffi_read_barrier() __sync_synchronize()
# else
# define cffi_read_barrier() (void)0
# endif
#else
/* --- Windows threads version --- */
# include <Windows.h>
# define cffi_compare_and_swap(l,o,n) \
(InterlockedCompareExchangePointer(l,n,o) == (o))
# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0)
# define cffi_read_barrier() (void)0
static volatile LONG _cffi_dummy;
#endif
#ifdef WITH_THREAD
# ifndef _MSC_VER
# include <pthread.h>
static pthread_mutex_t _cffi_embed_startup_lock;
# else
static CRITICAL_SECTION _cffi_embed_startup_lock;
# endif
static char _cffi_embed_startup_lock_ready = 0;
#endif
static void _cffi_acquire_reentrant_mutex(void)
{
static void *volatile lock = NULL;
while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) {
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: pthread_mutex_init() should be very fast, and
this is only run at start-up anyway. */
}
#ifdef WITH_THREAD
if (!_cffi_embed_startup_lock_ready) {
# ifndef _MSC_VER
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&_cffi_embed_startup_lock, &attr);
# else
InitializeCriticalSection(&_cffi_embed_startup_lock);
# endif
_cffi_embed_startup_lock_ready = 1;
}
#endif
while (!cffi_compare_and_swap(&lock, (void *)1, NULL))
;
#ifndef _MSC_VER
pthread_mutex_lock(&_cffi_embed_startup_lock);
#else
EnterCriticalSection(&_cffi_embed_startup_lock);
#endif
}
static void _cffi_release_reentrant_mutex(void)
{
#ifndef _MSC_VER
pthread_mutex_unlock(&_cffi_embed_startup_lock);
#else
LeaveCriticalSection(&_cffi_embed_startup_lock);
#endif
}
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
#include "_cffi_errors.h"
#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX]
PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */
static void _cffi_py_initialize(void)
{
/* XXX use initsigs=0, which "skips initialization registration of
signal handlers, which might be useful when Python is
embedded" according to the Python docs. But review and think
if it should be a user-controllable setting.
XXX we should also give a way to write errors to a buffer
instead of to stderr.
XXX if importing 'site' fails, CPython (any version) calls
exit(). Should we try to work around this behavior here?
*/
Py_InitializeEx(0);
}
static int _cffi_initialize_python(void)
{
/* This initializes Python, imports _cffi_backend, and then the
present .dll/.so is set up as a CPython C extension module.
*/
int result;
PyGILState_STATE state;
PyObject *pycode=NULL, *global_dict=NULL, *x;
PyObject *builtins;
state = PyGILState_Ensure();
/* Call the initxxx() function from the present module. It will
create and initialize us as a CPython extension module, instead
of letting the startup Python code do it---it might reimport
the same .dll/.so and get maybe confused on some platforms.
It might also have troubles locating the .dll/.so again for all
I know.
*/
(void)_CFFI_PYTHON_STARTUP_FUNC();
if (PyErr_Occurred())
goto error;
/* Now run the Python code provided to ffi.embedding_init_code().
*/
pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE,
"<init code for '" _CFFI_MODULE_NAME "'>",
Py_file_input);
if (pycode == NULL)
goto error;
global_dict = PyDict_New();
if (global_dict == NULL)
goto error;
builtins = PyEval_GetBuiltins();
if (builtins == NULL)
goto error;
if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0)
goto error;
x = PyEval_EvalCode(
#if PY_MAJOR_VERSION < 3
(PyCodeObject *)
#endif
pycode, global_dict, global_dict);
if (x == NULL)
goto error;
Py_DECREF(x);
/* Done! Now if we've been called from
_cffi_start_and_call_python() in an ``extern "Python"``, we can
only hope that the Python code did correctly set up the
corresponding @ffi.def_extern() function. Otherwise, the
general logic of ``extern "Python"`` functions (inside the
_cffi_backend module) will find that the reference is still
missing and print an error.
*/
result = 0;
done:
Py_XDECREF(pycode);
Py_XDECREF(global_dict);
PyGILState_Release(state);
return result;
error:;
{
/* Print as much information as potentially useful.
Debugging load-time failures with embedding is not fun
*/
PyObject *ecap;
PyObject *exception, *v, *tb, *f, *modules, *mod;
PyErr_Fetch(&exception, &v, &tb);
ecap = _cffi_start_error_capture();
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString(
"Failed to initialize the Python-CFFI embedding logic:\n\n", f);
}
if (exception != NULL) {
PyErr_NormalizeException(&exception, &v, &tb);
PyErr_Display(exception, v, tb);
}
Py_XDECREF(exception);
Py_XDECREF(v);
Py_XDECREF(tb);
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
"\ncompiled with cffi version: 1.15.1"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
if (mod == NULL) {
PyFile_WriteString("not loaded", f);
}
else {
v = PyObject_GetAttrString(mod, "__file__");
PyFile_WriteObject(v, f, 0);
Py_XDECREF(v);
}
PyFile_WriteString("\nsys.path: ", f);
PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0);
PyFile_WriteString("\n\n", f);
}
_cffi_stop_error_capture(ecap);
}
result = -1;
goto done;
}
#if PY_VERSION_HEX < 0x03080000
PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */
#endif
static int _cffi_carefully_make_gil(void)
{
/* This does the basic initialization of Python. It can be called
completely concurrently from unrelated threads. It assumes
that we don't hold the GIL before (if it exists), and we don't
hold it afterwards.
(What it really does used to be completely different in Python 2
and Python 3, with the Python 2 solution avoiding the spin-lock
around the Py_InitializeEx() call. However, after recent changes
to CPython 2.7 (issue #358) it no longer works. So we use the
Python 3 solution everywhere.)
This initializes Python by calling Py_InitializeEx().
Important: this must not be called concurrently at all.
So we use a global variable as a simple spin lock. This global
variable must be from 'libpythonX.Y.so', not from this
cffi-based extension module, because it must be shared from
different cffi-based extension modules.
In Python < 3.8, we choose
_PyParser_TokenNames[0] as a completely arbitrary pointer value
that is never written to. The default is to point to the
string "ENDMARKER". We change it temporarily to point to the
next character in that string. (Yes, I know it's REALLY
obscure.)
In Python >= 3.8, this string array is no longer writable, so
instead we pick PyCapsuleType.tp_version_tag. We can't change
Python < 3.8 because someone might use a mixture of cffi
embedded modules, some of which were compiled before this file
changed.
*/
#ifdef WITH_THREAD
# if PY_VERSION_HEX < 0x03080000
char *volatile *lock = (char *volatile *)_PyParser_TokenNames;
char *old_value, *locked_value;
while (1) { /* spin loop */
old_value = *lock;
locked_value = old_value + 1;
if (old_value[0] == 'E') {
assert(old_value[1] == 'N');
if (cffi_compare_and_swap(lock, old_value, locked_value))
break;
}
else {
assert(old_value[0] == 'N');
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: PyEval_InitThreads() should be very fast, and
this is only run at start-up anyway. */
}
}
# else
int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag;
int old_value, locked_value;
assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG));
while (1) { /* spin loop */
old_value = *lock;
locked_value = -42;
if (old_value == 0) {
if (cffi_compare_and_swap(lock, old_value, locked_value))
break;
}
else {
assert(old_value == locked_value);
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: PyEval_InitThreads() should be very fast, and
this is only run at start-up anyway. */
}
}
# endif
#endif
/* call Py_InitializeEx() */
if (!Py_IsInitialized()) {
_cffi_py_initialize();
#if PY_VERSION_HEX < 0x03070000
PyEval_InitThreads();
#endif
PyEval_SaveThread(); /* release the GIL */
/* the returned tstate must be the one that has been stored into the
autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */
}
else {
#if PY_VERSION_HEX < 0x03070000
/* PyEval_InitThreads() is always a no-op from CPython 3.7 */
PyGILState_STATE state = PyGILState_Ensure();
PyEval_InitThreads();
PyGILState_Release(state);
#endif
}
#ifdef WITH_THREAD
/* release the lock */
while (!cffi_compare_and_swap(lock, locked_value, old_value))
;
#endif
return 0;
}
/********** end CPython-specific section **********/
#else
/********** PyPy-specific section **********/
PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */
static struct _cffi_pypy_init_s {
const char *name;
void *func; /* function pointer */
const char *code;
} _cffi_pypy_init = {
_CFFI_MODULE_NAME,
_CFFI_PYTHON_STARTUP_FUNC,
_CFFI_PYTHON_STARTUP_CODE,
};
extern int pypy_carefully_make_gil(const char *);
extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *);
static int _cffi_carefully_make_gil(void)
{
return pypy_carefully_make_gil(_CFFI_MODULE_NAME);
}
static int _cffi_initialize_python(void)
{
return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init);
}
/********** end PyPy-specific section **********/
#endif
#ifdef __GNUC__
__attribute__((noinline))
#endif
static _cffi_call_python_fnptr _cffi_start_python(void)
{
/* Delicate logic to initialize Python. This function can be
called multiple times concurrently, e.g. when the process calls
its first ``extern "Python"`` functions in multiple threads at
once. It can also be called recursively, in which case we must
ignore it. We also have to consider what occurs if several
different cffi-based extensions reach this code in parallel
threads---it is a different copy of the code, then, and we
can't have any shared global variable unless it comes from
'libpythonX.Y.so'.
Idea:
* _cffi_carefully_make_gil(): "carefully" call
PyEval_InitThreads() (possibly with Py_InitializeEx() first).
* then we use a (local) custom lock to make sure that a call to this
cffi-based extension will wait if another call to the *same*
extension is running the initialization in another thread.
It is reentrant, so that a recursive call will not block, but
only one from a different thread.
* then we grab the GIL and (Python 2) we call Py_InitializeEx().
At this point, concurrent calls to Py_InitializeEx() are not
possible: we have the GIL.
* do the rest of the specific initialization, which may
temporarily release the GIL but not the custom lock.
Only release the custom lock when we are done.
*/
static char called = 0;
if (_cffi_carefully_make_gil() != 0)
return NULL;
_cffi_acquire_reentrant_mutex();
/* Here the GIL exists, but we don't have it. We're only protected
from concurrency by the reentrant mutex. */
/* This file only initializes the embedded module once, the first
time this is called, even if there are subinterpreters. */
if (!called) {
called = 1; /* invoke _cffi_initialize_python() only once,
but don't set '_cffi_call_python' right now,
otherwise concurrent threads won't call
this function at all (we need them to wait) */
if (_cffi_initialize_python() == 0) {
/* now initialization is finished. Switch to the fast-path. */
/* We would like nobody to see the new value of
'_cffi_call_python' without also seeing the rest of the
data initialized. However, this is not possible. But
the new value of '_cffi_call_python' is the function
'cffi_call_python()' from _cffi_backend. So: */
cffi_write_barrier();
/* ^^^ we put a write barrier here, and a corresponding
read barrier at the start of cffi_call_python(). This
ensures that after that read barrier, we see everything
done here before the write barrier.
*/
assert(_cffi_call_python_org != NULL);
_cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org;
}
else {
/* initialization failed. Reset this to NULL, even if it was
already set to some other value. Future calls to
_cffi_start_python() are still forced to occur, and will
always return NULL from now on. */
_cffi_call_python_org = NULL;
}
}
_cffi_release_reentrant_mutex();
return (_cffi_call_python_fnptr)_cffi_call_python_org;
}
static
void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args)
{
_cffi_call_python_fnptr fnptr;
int current_err = errno;
#ifdef _MSC_VER
int current_lasterr = GetLastError();
#endif
fnptr = _cffi_start_python();
if (fnptr == NULL) {
fprintf(stderr, "function %s() called, but initialization code "
"failed. Returning 0.\n", externpy->name);
memset(args, 0, externpy->size_of_result);
}
#ifdef _MSC_VER
SetLastError(current_lasterr);
#endif
errno = current_err;
if (fnptr != NULL)
fnptr(externpy, args);
}
/* The cffi_start_python() function makes sure Python is initialized
and our cffi module is set up. It can be called manually from the
user C code. The same effect is obtained automatically from any
dll-exported ``extern "Python"`` function. This function returns
-1 if initialization failed, 0 if all is OK. */
_CFFI_UNUSED_FN
static int cffi_start_python(void)
{
if (_cffi_call_python == &_cffi_start_and_call_python) {
if (_cffi_start_python() == NULL)
return -1;
}
cffi_read_barrier();
return 0;
}
#undef cffi_compare_and_swap
#undef cffi_write_barrier
#undef cffi_read_barrier
#ifdef __cplusplus
}
#endif
| 17,680 | C | 32.42344 | 80 | 0.616686 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/_version.py |
__version__ = '3.1.1'
| 23 | Python | 6.999998 | 21 | 0.434783 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/__init__.py |
from ._cares import ffi as _ffi, lib as _lib
import _cffi_backend # hint for bundler tools
if _lib.ARES_SUCCESS != _lib.ares_library_init(_lib.ARES_LIB_INIT_ALL):
raise RuntimeError('Could not initialize c-ares')
from . import errno
from .utils import ascii_bytes, maybe_str, parse_name
from ._version import __version__
import collections.abc
import socket
import math
import functools
import sys
exported_pycares_symbols = [
# Flag values
'ARES_FLAG_USEVC',
'ARES_FLAG_PRIMARY',
'ARES_FLAG_IGNTC',
'ARES_FLAG_NORECURSE',
'ARES_FLAG_STAYOPEN',
'ARES_FLAG_NOSEARCH',
'ARES_FLAG_NOALIASES',
'ARES_FLAG_NOCHECKRESP',
# Nameinfo flag values
'ARES_NI_NOFQDN',
'ARES_NI_NUMERICHOST',
'ARES_NI_NAMEREQD',
'ARES_NI_NUMERICSERV',
'ARES_NI_DGRAM',
'ARES_NI_TCP',
'ARES_NI_UDP',
'ARES_NI_SCTP',
'ARES_NI_DCCP',
'ARES_NI_NUMERICSCOPE',
'ARES_NI_LOOKUPHOST',
'ARES_NI_LOOKUPSERVICE',
'ARES_NI_IDN',
'ARES_NI_IDN_ALLOW_UNASSIGNED',
'ARES_NI_IDN_USE_STD3_ASCII_RULES',
# Bad socket
'ARES_SOCKET_BAD',
]
for symbol in exported_pycares_symbols:
globals()[symbol] = getattr(_lib, symbol)
exported_pycares_symbols_map = {
# Query types
"QUERY_TYPE_A" : "T_A",
"QUERY_TYPE_AAAA" : "T_AAAA",
"QUERY_TYPE_ANY" : "T_ANY",
"QUERY_TYPE_CNAME" : "T_CNAME",
"QUERY_TYPE_MX" : "T_MX",
"QUERY_TYPE_NAPTR" : "T_NAPTR",
"QUERY_TYPE_NS" : "T_NS",
"QUERY_TYPE_PTR" : "T_PTR",
"QUERY_TYPE_SOA" : "T_SOA",
"QUERY_TYPE_SRV" : "T_SRV",
"QUERY_TYPE_TXT" : "T_TXT",
}
for k, v in exported_pycares_symbols_map.items():
globals()[k] = getattr(_lib, v)
globals()['ARES_VERSION'] = maybe_str(_ffi.string(_lib.ares_version(_ffi.NULL)))
PYCARES_ADDRTTL_SIZE = 256
class AresError(Exception):
pass
# callback helpers
_global_set = set()
@_ffi.def_extern()
def _sock_state_cb(data, socket_fd, readable, writable):
sock_state_cb = _ffi.from_handle(data)
sock_state_cb(socket_fd, readable, writable)
@_ffi.def_extern()
def _host_cb(arg, status, timeouts, hostent):
callback = _ffi.from_handle(arg)
_global_set.discard(arg)
if status != _lib.ARES_SUCCESS:
result = None
else:
result = ares_host_result(hostent)
status = None
callback(result, status)
@_ffi.def_extern()
def _nameinfo_cb(arg, status, timeouts, node, service):
callback = _ffi.from_handle(arg)
_global_set.discard(arg)
if status != _lib.ARES_SUCCESS:
result = None
else:
result = ares_nameinfo_result(node, service)
status = None
callback(result, status)
@_ffi.def_extern()
def _query_cb(arg, status, timeouts, abuf, alen):
callback, query_type = _ffi.from_handle(arg)
_global_set.discard(arg)
if status == _lib.ARES_SUCCESS:
if query_type == _lib.T_ANY:
result = []
for qtype in (_lib.T_A, _lib.T_AAAA, _lib.T_CNAME, _lib.T_MX, _lib.T_NAPTR, _lib.T_NS, _lib.T_PTR, _lib.T_SOA, _lib.T_SRV, _lib.T_TXT):
r, status = parse_result(qtype, abuf, alen)
if status not in (None, _lib.ARES_ENODATA, _lib.ARES_EBADRESP):
result = None
break
if r is not None:
if isinstance(r, collections.abc.Iterable):
result.extend(r)
else:
result.append(r)
else:
status = None
else:
result, status = parse_result(query_type, abuf, alen)
else:
result = None
callback(result, status)
def parse_result(query_type, abuf, alen):
if query_type == _lib.T_A:
addrttls = _ffi.new("struct ares_addrttl[]", PYCARES_ADDRTTL_SIZE)
naddrttls = _ffi.new("int*", PYCARES_ADDRTTL_SIZE)
parse_status = _lib.ares_parse_a_reply(abuf, alen, _ffi.NULL, addrttls, naddrttls)
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = [ares_query_a_result(addrttls[i]) for i in range(naddrttls[0])]
status = None
elif query_type == _lib.T_AAAA:
addrttls = _ffi.new("struct ares_addr6ttl[]", PYCARES_ADDRTTL_SIZE)
naddrttls = _ffi.new("int*", PYCARES_ADDRTTL_SIZE)
parse_status = _lib.ares_parse_aaaa_reply(abuf, alen, _ffi.NULL, addrttls, naddrttls)
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = [ares_query_aaaa_result(addrttls[i]) for i in range(naddrttls[0])]
status = None
elif query_type == _lib.T_CNAME:
host = _ffi.new("struct hostent **")
parse_status = _lib.ares_parse_a_reply(abuf, alen, host, _ffi.NULL, _ffi.NULL)
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = ares_query_cname_result(host[0])
_lib.ares_free_hostent(host[0])
status = None
elif query_type == _lib.T_MX:
mx_reply = _ffi.new("struct ares_mx_reply **")
parse_status = _lib.ares_parse_mx_reply(abuf, alen, mx_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
mx_reply_ptr = mx_reply[0]
while mx_reply_ptr != _ffi.NULL:
result.append(ares_query_mx_result(mx_reply_ptr))
mx_reply_ptr = mx_reply_ptr.next
_lib.ares_free_data(mx_reply[0])
status = None
elif query_type == _lib.T_NAPTR:
naptr_reply = _ffi.new("struct ares_naptr_reply **")
parse_status = _lib.ares_parse_naptr_reply(abuf, alen, naptr_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
naptr_reply_ptr = naptr_reply[0]
while naptr_reply_ptr != _ffi.NULL:
result.append(ares_query_naptr_result(naptr_reply_ptr))
naptr_reply_ptr = naptr_reply_ptr.next
_lib.ares_free_data(naptr_reply[0])
status = None
elif query_type == _lib.T_NS:
hostent = _ffi.new("struct hostent **")
parse_status = _lib.ares_parse_ns_reply(abuf, alen, hostent);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
host = hostent[0]
i = 0
while host.h_aliases[i] != _ffi.NULL:
result.append(ares_query_ns_result(host.h_aliases[i]))
i += 1
_lib.ares_free_hostent(host)
status = None
elif query_type == _lib.T_PTR:
hostent = _ffi.new("struct hostent **")
hostttl = _ffi.new("int*", PYCARES_ADDRTTL_SIZE)
parse_status = _lib.ares_parse_ptr_reply(abuf, alen, _ffi.NULL, 0, socket.AF_UNSPEC, hostent, hostttl);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
aliases = []
host = hostent[0]
i = 0
while host.h_aliases[i] != _ffi.NULL:
aliases.append(maybe_str(_ffi.string(host.h_aliases[i])))
i += 1
result = ares_query_ptr_result(host, hostttl[0], aliases)
_lib.ares_free_hostent(host)
status = None
elif query_type == _lib.T_SOA:
soa_reply = _ffi.new("struct ares_soa_reply **")
parse_status = _lib.ares_parse_soa_reply(abuf, alen, soa_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = ares_query_soa_result(soa_reply[0])
_lib.ares_free_data(soa_reply[0])
status = None
elif query_type == _lib.T_SRV:
srv_reply = _ffi.new("struct ares_srv_reply **")
parse_status = _lib.ares_parse_srv_reply(abuf, alen, srv_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
srv_reply_ptr = srv_reply[0]
while srv_reply_ptr != _ffi.NULL:
result.append(ares_query_srv_result(srv_reply_ptr))
srv_reply_ptr = srv_reply_ptr.next
_lib.ares_free_data(srv_reply[0])
status = None
elif query_type == _lib.T_TXT:
txt_reply = _ffi.new("struct ares_txt_ext **")
parse_status = _lib.ares_parse_txt_reply_ext(abuf, alen, txt_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
txt_reply_ptr = txt_reply[0]
tmp_obj = None
while True:
if txt_reply_ptr == _ffi.NULL:
if tmp_obj is not None:
result.append(ares_query_txt_result(tmp_obj))
break
if txt_reply_ptr.record_start == 1:
if tmp_obj is not None:
result.append(ares_query_txt_result(tmp_obj))
tmp_obj = ares_query_txt_result_chunk(txt_reply_ptr)
else:
new_chunk = ares_query_txt_result_chunk(txt_reply_ptr)
tmp_obj.text += new_chunk.text
txt_reply_ptr = txt_reply_ptr.next
_lib.ares_free_data(txt_reply[0])
status = None
else:
raise ValueError("invalid query type specified")
return result, status
class Channel:
__qtypes__ = (_lib.T_A, _lib.T_AAAA, _lib.T_ANY, _lib.T_CNAME, _lib.T_MX, _lib.T_NAPTR, _lib.T_NS, _lib.T_PTR, _lib.T_SOA, _lib.T_SRV, _lib.T_TXT)
def __init__(self,
flags = None,
timeout = None,
tries = None,
ndots = None,
tcp_port = None,
udp_port = None,
servers = None,
domains = None,
lookups = None,
sock_state_cb = None,
socket_send_buffer_size = None,
socket_receive_buffer_size = None,
rotate = False,
local_ip = None,
local_dev = None,
resolvconf_path = None):
channel = _ffi.new("ares_channel *")
options = _ffi.new("struct ares_options *")
optmask = 0
if flags is not None:
options.flags = flags
optmask = optmask | _lib.ARES_OPT_FLAGS
if timeout is not None:
options.timeout = int(timeout * 1000)
optmask = optmask | _lib.ARES_OPT_TIMEOUTMS
if tries is not None:
options.tries = tries
optmask = optmask | _lib.ARES_OPT_TRIES
if ndots is not None:
options.ndots = ndots
optmask = optmask | _lib.ARES_OPT_NDOTS
if tcp_port is not None:
options.tcp_port = tcp_port
optmask = optmask | _lib.ARES_OPT_TCP_PORT
if udp_port is not None:
options.udp_port = udp_port
optmask = optmask | _lib.ARES_OPT_UDP_PORT
if socket_send_buffer_size is not None:
options.socket_send_buffer_size = socket_send_buffer_size
optmask = optmask | _lib.ARES_OPT_SOCK_SNDBUF
if socket_receive_buffer_size is not None:
options.socket_receive_buffer_size = socket_receive_buffer_size
optmask = optmask | _lib.ARES_OPT_SOCK_RCVBUF
if sock_state_cb:
if not callable(sock_state_cb):
raise TypeError("sock_state_cb is not callable")
userdata = _ffi.new_handle(sock_state_cb)
# This must be kept alive while the channel is alive.
self._sock_state_cb_handle = userdata
options.sock_state_cb = _lib._sock_state_cb
options.sock_state_cb_data = userdata
optmask = optmask | _lib.ARES_OPT_SOCK_STATE_CB
if lookups:
options.lookups = _ffi.new('char[]', ascii_bytes(lookups))
optmask = optmask | _lib.ARES_OPT_LOOKUPS
if domains:
strs = [_ffi.new("char[]", ascii_bytes(i)) for i in domains]
c = _ffi.new("char *[%d]" % (len(domains) + 1))
for i in range(len(domains)):
c[i] = strs[i]
options.domains = c
options.ndomains = len(domains)
optmask = optmask | _lib.ARES_OPT_DOMAINS
if rotate:
optmask = optmask | _lib.ARES_OPT_ROTATE
if resolvconf_path is not None:
optmask = optmask | _lib.ARES_OPT_RESOLVCONF
options.resolvconf_path = _ffi.new('char[]', ascii_bytes(resolvconf_path))
r = _lib.ares_init_options(channel, options, optmask)
if r != _lib.ARES_SUCCESS:
raise AresError('Failed to initialize c-ares channel')
self._channel = _ffi.gc(channel, lambda x: _lib.ares_destroy(x[0]))
if servers:
self.servers = servers
if local_ip:
self.set_local_ip(local_ip)
if local_dev:
self.set_local_dev(local_dev)
def cancel(self):
_lib.ares_cancel(self._channel[0])
@property
def servers(self):
servers = _ffi.new("struct ares_addr_node **")
r = _lib.ares_get_servers(self._channel[0], servers)
if r != _lib.ARES_SUCCESS:
raise AresError(r, errno.strerror(r))
server_list = []
server = _ffi.new("struct ares_addr_node **", servers[0])
while True:
if server == _ffi.NULL:
break
ip = _ffi.new("char []", _lib.INET6_ADDRSTRLEN)
s = server[0]
if _ffi.NULL != _lib.ares_inet_ntop(s.family, _ffi.addressof(s.addr), ip, _lib.INET6_ADDRSTRLEN):
server_list.append(maybe_str(_ffi.string(ip, _lib.INET6_ADDRSTRLEN)))
server = s.next
return server_list
@servers.setter
def servers(self, servers):
c = _ffi.new("struct ares_addr_node[%d]" % len(servers))
for i, server in enumerate(servers):
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(server), _ffi.addressof(c[i].addr.addr4)) == 1:
c[i].family = socket.AF_INET
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(server), _ffi.addressof(c[i].addr.addr6)) == 1:
c[i].family = socket.AF_INET6
else:
raise ValueError("invalid IP address")
if i > 0:
c[i - 1].next = _ffi.addressof(c[i])
r = _lib.ares_set_servers(self._channel[0], c)
if r != _lib.ARES_SUCCESS:
raise AresError(r, errno.strerror(r))
def getsock(self):
rfds = []
wfds = []
socks = _ffi.new("ares_socket_t [%d]" % _lib.ARES_GETSOCK_MAXNUM)
bitmask = _lib.ares_getsock(self._channel[0], socks, _lib.ARES_GETSOCK_MAXNUM)
for i in range(_lib.ARES_GETSOCK_MAXNUM):
if _lib.ARES_GETSOCK_READABLE(bitmask, i):
rfds.append(socks[i])
if _lib.ARES_GETSOCK_WRITABLE(bitmask, i):
wfds.append(socks[i])
return rfds, wfds
def process_fd(self, read_fd, write_fd):
_lib.ares_process_fd(self._channel[0], _ffi.cast("ares_socket_t", read_fd), _ffi.cast("ares_socket_t", write_fd))
def timeout(self, t = None):
maxtv = _ffi.NULL
tv = _ffi.new("struct timeval*")
if t is not None:
if t >= 0.0:
maxtv = _ffi.new("struct timeval*")
maxtv.tv_sec = int(math.floor(t))
maxtv.tv_usec = int(math.fmod(t, 1.0) * 1000000)
else:
raise ValueError("timeout needs to be a positive number or None")
_lib.ares_timeout(self._channel[0], maxtv, tv)
if tv == _ffi.NULL:
return 0.0
return (tv.tv_sec + tv.tv_usec / 1000000.0)
def gethostbyaddr(self, addr, callback):
if not callable(callback):
raise TypeError("a callable is required")
addr4 = _ffi.new("struct in_addr*")
addr6 = _ffi.new("struct ares_in6_addr*")
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(addr), (addr4)) == 1:
address = addr4
family = socket.AF_INET
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(addr), (addr6)) == 1:
address = addr6
family = socket.AF_INET6
else:
raise ValueError("invalid IP address")
userdata = _ffi.new_handle(callback)
_global_set.add(userdata)
_lib.ares_gethostbyaddr(self._channel[0], address, _ffi.sizeof(address[0]), family, _lib._host_cb, userdata)
def gethostbyname(self, name, family, callback):
if not callable(callback):
raise TypeError("a callable is required")
userdata = _ffi.new_handle(callback)
_global_set.add(userdata)
_lib.ares_gethostbyname(self._channel[0], parse_name(name), family, _lib._host_cb, userdata)
def query(self, name, query_type, callback):
self._do_query(_lib.ares_query, name, query_type, callback)
def search(self, name, query_type, callback):
self._do_query(_lib.ares_search, name, query_type, callback)
def _do_query(self, func, name, query_type, callback):
if not callable(callback):
raise TypeError('a callable is required')
if query_type not in self.__qtypes__:
raise ValueError('invalid query type specified')
userdata = _ffi.new_handle((callback, query_type))
_global_set.add(userdata)
func(self._channel[0], parse_name(name), _lib.C_IN, query_type, _lib._query_cb, userdata)
def set_local_ip(self, ip):
addr4 = _ffi.new("struct in_addr*")
addr6 = _ffi.new("struct ares_in6_addr*")
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(ip), addr4) == 1:
_lib.ares_set_local_ip4(self._channel[0], socket.ntohl(addr4.s_addr))
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(ip), addr6) == 1:
_lib.ares_set_local_ip6(self._channel[0], addr6)
else:
raise ValueError("invalid IP address")
def getnameinfo(self, ip_port, flags, callback):
if not callable(callback):
raise TypeError("a callable is required")
ip, port = ip_port
if port < 0 or port > 65535:
raise ValueError("port must be between 0 and 65535")
sa4 = _ffi.new("struct sockaddr_in*")
sa6 = _ffi.new("struct sockaddr_in6*")
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(ip), _ffi.addressof(sa4.sin_addr)) == 1:
sa4.sin_family = socket.AF_INET
sa4.sin_port = socket.htons(port)
sa = sa4
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(ip), _ffi.addressof(sa6.sin6_addr)) == 1:
sa6.sin6_family = socket.AF_INET6
sa6.sin6_port = socket.htons(port)
sa = sa6
else:
raise ValueError("invalid IP address")
userdata = _ffi.new_handle(callback)
_global_set.add(userdata)
_lib.ares_getnameinfo(self._channel[0], _ffi.cast("struct sockaddr*", sa), _ffi.sizeof(sa[0]), flags, _lib._nameinfo_cb, userdata)
def set_local_dev(self, dev):
_lib.ares_set_local_dev(self._channel[0], dev)
class AresResult:
__slots__ = ()
def __repr__(self):
attrs = ['%s=%s' % (a, getattr(self, a)) for a in self.__slots__]
return '<%s> %s' % (self.__class__.__name__, ', '.join(attrs))
# DNS query result types
#
class ares_query_a_result(AresResult):
__slots__ = ('host', 'ttl')
type = 'A'
def __init__(self, ares_addrttl):
buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN)
_lib.ares_inet_ntop(socket.AF_INET, _ffi.addressof(ares_addrttl.ipaddr), buf, _lib.INET6_ADDRSTRLEN)
self.host = maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN))
self.ttl = ares_addrttl.ttl
class ares_query_aaaa_result(AresResult):
__slots__ = ('host', 'ttl')
type = 'AAAA'
def __init__(self, ares_addrttl):
buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN)
_lib.ares_inet_ntop(socket.AF_INET6, _ffi.addressof(ares_addrttl.ip6addr), buf, _lib.INET6_ADDRSTRLEN)
self.host = maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN))
self.ttl = ares_addrttl.ttl
class ares_query_cname_result(AresResult):
__slots__ = ('cname', 'ttl')
type = 'CNAME'
def __init__(self, host):
self.cname = maybe_str(_ffi.string(host.h_name))
self.ttl = -1
class ares_query_mx_result(AresResult):
__slots__ = ('host', 'priority', 'ttl')
type = 'MX'
def __init__(self, mx):
self.host = maybe_str(_ffi.string(mx.host))
self.priority = mx.priority
self.ttl = mx.ttl
class ares_query_naptr_result(AresResult):
__slots__ = ('order', 'preference', 'flags', 'service', 'regex', 'replacement', 'ttl')
type = 'NAPTR'
def __init__(self, naptr):
self.order = naptr.order
self.preference = naptr.preference
self.flags = maybe_str(_ffi.string(naptr.flags))
self.service = maybe_str(_ffi.string(naptr.service))
self.regex = maybe_str(_ffi.string(naptr.regexp))
self.replacement = maybe_str(_ffi.string(naptr.replacement))
self.ttl = naptr.ttl
class ares_query_ns_result(AresResult):
__slots__ = ('host', 'ttl')
type = 'NS'
def __init__(self, ns):
self.host = maybe_str(_ffi.string(ns))
self.ttl = -1
class ares_query_ptr_result(AresResult):
__slots__ = ('name', 'ttl', 'aliases')
type = 'PTR'
def __init__(self, hostent, ttl, aliases):
self.name = maybe_str(_ffi.string(hostent.h_name))
self.ttl = ttl
self.aliases = aliases
class ares_query_soa_result(AresResult):
__slots__ = ('nsname', 'hostmaster', 'serial', 'refresh', 'retry', 'expires', 'minttl', 'ttl')
type = 'SOA'
def __init__(self, soa):
self.nsname = maybe_str(_ffi.string(soa.nsname))
self.hostmaster = maybe_str(_ffi.string(soa.hostmaster))
self.serial = soa.serial
self.refresh = soa.refresh
self.retry = soa.retry
self.expires = soa.expire
self.minttl = soa.minttl
self.ttl = soa.ttl
class ares_query_srv_result(AresResult):
__slots__ = ('host', 'port', 'priority', 'weight', 'ttl')
type = 'SRV'
def __init__(self, srv):
self.host = maybe_str(_ffi.string(srv.host))
self.port = srv.port
self.priority = srv.priority
self.weight = srv.weight
self.ttl = srv.ttl
class ares_query_txt_result(AresResult):
__slots__ = ('text', 'ttl')
type = 'TXT'
def __init__(self, txt_chunk):
self.text = maybe_str(txt_chunk.text)
self.ttl = txt_chunk.ttl
class ares_query_txt_result_chunk(AresResult):
__slots__ = ('text', 'ttl')
type = 'TXT'
def __init__(self, txt):
self.text = _ffi.string(txt.txt)
self.ttl = txt.ttl
# Other result types
#
class ares_host_result(AresResult):
__slots__ = ('name', 'aliases', 'addresses')
def __init__(self, hostent):
self.name = maybe_str(_ffi.string(hostent.h_name))
self.aliases = []
self.addresses = []
i = 0
while hostent.h_aliases[i] != _ffi.NULL:
self.aliases.append(maybe_str(_ffi.string(hostent.h_aliases[i])))
i += 1
i = 0
while hostent.h_addr_list[i] != _ffi.NULL:
buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN)
if _ffi.NULL != _lib.ares_inet_ntop(hostent.h_addrtype, hostent.h_addr_list[i], buf, _lib.INET6_ADDRSTRLEN):
self.addresses.append(maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN)))
i += 1
class ares_nameinfo_result(AresResult):
__slots__ = ('node', 'service')
def __init__(self, node, service):
self.node = maybe_str(_ffi.string(node))
self.service = maybe_str(_ffi.string(service)) if service != _ffi.NULL else None
__all__ = exported_pycares_symbols + list(exported_pycares_symbols_map.keys()) + ['AresError', 'Channel', 'errno', '__version__']
del exported_pycares_symbols, exported_pycares_symbols_map
| 24,660 | Python | 32.921596 | 150 | 0.559124 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/utils.py |
try:
import idna as idna2008
except ImportError:
idna2008 = None
def ascii_bytes(data):
if isinstance(data, str):
return data.encode('ascii')
if isinstance(data, bytes):
return data
raise TypeError('only str (ascii encoding) and bytes are supported')
def maybe_str(data):
if isinstance(data, str):
return data
if isinstance(data, bytes):
try:
return data.decode('ascii')
except UnicodeDecodeError:
return data
raise TypeError('only str (ascii encoding) and bytes are supported')
def is_all_ascii(text):
for c in text:
if ord(c) > 0x7f:
return False
return True
def parse_name_idna2008(name):
parts = name.split('.')
r = []
for part in parts:
if is_all_ascii(part):
r.append(part.encode('ascii'))
else:
r.append(idna2008.encode(part))
return b'.'.join(r)
def parse_name(name):
if isinstance(name, str):
if is_all_ascii(name):
return name.encode('ascii')
if idna2008 is not None:
return parse_name_idna2008(name)
return name.encode('idna')
if isinstance(name, bytes):
return name
raise TypeError('only str and bytes are supported')
__all__ = ['ascii_bytes', 'maybe_str', 'parse_name']
| 1,343 | Python | 22.578947 | 72 | 0.596426 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/errno.py |
from ._cares import ffi as _ffi, lib as _lib
from .utils import maybe_str
exported_pycares_symbols = [
'ARES_SUCCESS',
# error codes
'ARES_ENODATA',
'ARES_EFORMERR',
'ARES_ESERVFAIL',
'ARES_ENOTFOUND',
'ARES_ENOTIMP',
'ARES_EREFUSED',
'ARES_EBADQUERY',
'ARES_EBADNAME',
'ARES_EBADFAMILY',
'ARES_EBADRESP',
'ARES_ECONNREFUSED',
'ARES_ETIMEOUT',
'ARES_EOF',
'ARES_EFILE',
'ARES_ENOMEM',
'ARES_EDESTRUCTION',
'ARES_EBADSTR',
'ARES_EBADFLAGS',
'ARES_ENONAME',
'ARES_EBADHINTS',
'ARES_ENOTINITIALIZED',
'ARES_ELOADIPHLPAPI',
'ARES_EADDRGETNETWORKPARAMS',
'ARES_ECANCELLED',
]
errorcode = {}
for symbol in exported_pycares_symbols:
value = getattr(_lib, symbol)
globals()[symbol] = value
globals()["errorcode"][value] = symbol
def strerror(code):
return maybe_str(_ffi.string(_lib.ares_strerror(code)))
__all__ = exported_pycares_symbols + ['errorcode', 'strerror']
del exported_pycares_symbols
| 1,019 | Python | 19.4 | 62 | 0.632974 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/__main__.py |
import collections.abc
import pycares
import select
import socket
import sys
def wait_channel(channel):
while True:
read_fds, write_fds = channel.getsock()
if not read_fds and not write_fds:
break
timeout = channel.timeout()
if not timeout:
channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD)
continue
rlist, wlist, xlist = select.select(read_fds, write_fds, [], timeout)
for fd in rlist:
channel.process_fd(fd, pycares.ARES_SOCKET_BAD)
for fd in wlist:
channel.process_fd(pycares.ARES_SOCKET_BAD, fd)
def cb(result, error):
if error is not None:
print('Error: (%d) %s' % (error, pycares.errno.strerror(error)))
else:
parts = [
';; QUESTION SECTION:',
';%s\t\t\tIN\t%s' % (hostname, qtype.upper()),
'',
';; ANSWER SECTION:'
]
if not isinstance(result, collections.abc.Iterable):
result = [result]
for r in result:
txt = '%s\t\t%d\tIN\t%s' % (hostname, r.ttl, r.type)
if r.type in ('A', 'AAAA'):
parts.append('%s\t%s' % (txt, r.host))
elif r.type == 'CNAME':
parts.append('%s\t%s' % (txt, r.cname))
elif r.type == 'MX':
parts.append('%s\t%d %s' % (txt, r.priority, r.host))
elif r.type == 'NAPTR':
parts.append('%s\t%d %d "%s" "%s" "%s" %s' % (txt, r.order, r.preference, r.flags, r.service, r.regex, r.replacement))
elif r.type == 'NS':
parts.append('%s\t%s' % (txt, r.host))
elif r.type == 'PTR':
parts.append('%s\t%s' % (txt, r.name))
elif r.type == 'SOA':
parts.append('%s\t%s %s %d %d %d %d %d' % (txt, r.nsname, r.hostmaster, r.serial, r.refresh, r.retry, r.expires, r.minttl))
elif r.type == 'SRV':
parts.append('%s\t%d %d %d %s' % (txt, r.priority, r.weight, r.port, r.host))
elif r.type == 'TXT':
parts.append('%s\t"%s"' % (txt, r.text))
print('\n'.join(parts))
channel = pycares.Channel()
if len(sys.argv) not in (2, 3):
print('Invalid arguments! Usage: python -m pycares [query_type] hostname')
sys.exit(1)
if len(sys.argv) == 2:
_, hostname = sys.argv
qtype = 'A'
else:
_, qtype, hostname = sys.argv
try:
query_type = getattr(pycares, 'QUERY_TYPE_%s' % qtype.upper())
except Exception:
print('Invalid query type: %s' % qtype)
sys.exit(1)
channel.query(hostname, query_type, cb)
wait_channel(channel)
| 2,674 | Python | 31.228915 | 139 | 0.526178 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/bin/watchmedo-script.py | #!C:\buildAgent\work\kit\kit\_build\target-deps\python\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'watchdog==0.10.4','console_scripts','watchmedo'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'watchdog==0.10.4'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('watchdog==0.10.4', 'console_scripts', 'watchmedo')())
| 1,015 | Python | 28.882352 | 84 | 0.66798 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/bin/jp.py | #!C:\buildAgent\work\kit\kit\_build\target-deps\python\python.exe
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4, ensure_ascii=False))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
| 1,742 | Python | 30.690909 | 79 | 0.594719 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/__init__.py | # -*- coding: utf-8 -*-
"""
Charset-Normalizer
~~~~~~~~~~~~~~
The Real First Universal Charset Detector.
A library that helps you read text from an unknown charset encoding.
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
All IANA character set names for which the Python core library provides codecs are supported.
Basic usage:
>>> from charset_normalizer import from_bytes
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
>>> best_guess = results.best()
>>> str(best_guess)
'Bсеки човек има право на образование. Oбразованието!'
Others methods and usages are available - see the full documentation
at <https://github.com/Ousret/charset_normalizer>.
:copyright: (c) 2021 by Ahmed TAHRI
:license: MIT, see LICENSE for more details.
"""
import logging
from .api import from_bytes, from_fp, from_path, normalize
from .legacy import (
CharsetDetector,
CharsetDoctor,
CharsetNormalizerMatch,
CharsetNormalizerMatches,
detect,
)
from .models import CharsetMatch, CharsetMatches
from .utils import set_logging_handler
from .version import VERSION, __version__
__all__ = (
"from_fp",
"from_path",
"from_bytes",
"normalize",
"detect",
"CharsetMatch",
"CharsetMatches",
"CharsetNormalizerMatch",
"CharsetNormalizerMatches",
"CharsetDetector",
"CharsetDoctor",
"__version__",
"VERSION",
"set_logging_handler",
)
# Attach a NullHandler to the top level logger by default
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
| 1,706 | Python | 28.947368 | 99 | 0.716295 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/version.py | """
Expose version
"""
__version__ = "2.1.1"
VERSION = __version__.split(".")
| 79 | Python | 10.42857 | 32 | 0.531646 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/utils.py | try:
# WARNING: unicodedata2 support is going to be removed in 3.0
# Python is quickly catching up.
import unicodedata2 as unicodedata
except ImportError:
import unicodedata # type: ignore[no-redef]
import importlib
import logging
from codecs import IncrementalDecoder
from encodings.aliases import aliases
from functools import lru_cache
from re import findall
from typing import Generator, List, Optional, Set, Tuple, Union
from _multibytecodec import MultibyteIncrementalDecoder
from .constant import (
ENCODING_MARKS,
IANA_SUPPORTED_SIMILAR,
RE_POSSIBLE_ENCODING_INDICATION,
UNICODE_RANGES_COMBINED,
UNICODE_SECONDARY_RANGE_KEYWORD,
UTF8_MAXIMAL_ALLOCATION,
)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_accentuated(character: str) -> bool:
try:
description: str = unicodedata.name(character)
except ValueError:
return False
return (
"WITH GRAVE" in description
or "WITH ACUTE" in description
or "WITH CEDILLA" in description
or "WITH DIAERESIS" in description
or "WITH CIRCUMFLEX" in description
or "WITH TILDE" in description
)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def remove_accent(character: str) -> str:
decomposed: str = unicodedata.decomposition(character)
if not decomposed:
return character
codes: List[str] = decomposed.split(" ")
return chr(int(codes[0], 16))
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def unicode_range(character: str) -> Optional[str]:
"""
Retrieve the Unicode range official name from a single character.
"""
character_ord: int = ord(character)
for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
if character_ord in ord_range:
return range_name
return None
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_latin(character: str) -> bool:
try:
description: str = unicodedata.name(character)
except ValueError:
return False
return "LATIN" in description
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_ascii(character: str) -> bool:
try:
character.encode("ascii")
except UnicodeEncodeError:
return False
return True
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_punctuation(character: str) -> bool:
character_category: str = unicodedata.category(character)
if "P" in character_category:
return True
character_range: Optional[str] = unicode_range(character)
if character_range is None:
return False
return "Punctuation" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_symbol(character: str) -> bool:
character_category: str = unicodedata.category(character)
if "S" in character_category or "N" in character_category:
return True
character_range: Optional[str] = unicode_range(character)
if character_range is None:
return False
return "Forms" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_emoticon(character: str) -> bool:
character_range: Optional[str] = unicode_range(character)
if character_range is None:
return False
return "Emoticons" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_separator(character: str) -> bool:
if character.isspace() or character in {"|", "+", ",", ";", "<", ">"}:
return True
character_category: str = unicodedata.category(character)
return "Z" in character_category
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_case_variable(character: str) -> bool:
return character.islower() != character.isupper()
def is_private_use_only(character: str) -> bool:
character_category: str = unicodedata.category(character)
return character_category == "Co"
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_cjk(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "CJK" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_hiragana(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "HIRAGANA" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_katakana(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "KATAKANA" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_hangul(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "HANGUL" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_thai(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "THAI" in character_name
@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
def is_unicode_range_secondary(range_name: str) -> bool:
return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_unprintable(character: str) -> bool:
return (
character.isspace() is False # includes \n \t \r \v
and character.isprintable() is False
and character != "\x1A" # Why? Its the ASCII substitute character.
and character != "\ufeff" # bug discovered in Python,
# Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
)
def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
"""
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
"""
if not isinstance(sequence, bytes):
raise TypeError
seq_len: int = len(sequence)
results: List[str] = findall(
RE_POSSIBLE_ENCODING_INDICATION,
sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
)
if len(results) == 0:
return None
for specified_encoding in results:
specified_encoding = specified_encoding.lower().replace("-", "_")
encoding_alias: str
encoding_iana: str
for encoding_alias, encoding_iana in aliases.items():
if encoding_alias == specified_encoding:
return encoding_iana
if encoding_iana == specified_encoding:
return encoding_iana
return None
@lru_cache(maxsize=128)
def is_multi_byte_encoding(name: str) -> bool:
"""
Verify is a specific encoding is a multi byte one based on it IANA name
"""
return name in {
"utf_8",
"utf_8_sig",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_32",
"utf_32_le",
"utf_32_be",
"utf_7",
} or issubclass(
importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
MultibyteIncrementalDecoder,
)
def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
"""
Identify and extract SIG/BOM in given sequence.
"""
for iana_encoding in ENCODING_MARKS:
marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
if isinstance(marks, bytes):
marks = [marks]
for mark in marks:
if sequence.startswith(mark):
return iana_encoding, mark
return None, b""
def should_strip_sig_or_bom(iana_encoding: str) -> bool:
return iana_encoding not in {"utf_16", "utf_32"}
def iana_name(cp_name: str, strict: bool = True) -> str:
cp_name = cp_name.lower().replace("-", "_")
encoding_alias: str
encoding_iana: str
for encoding_alias, encoding_iana in aliases.items():
if cp_name in [encoding_alias, encoding_iana]:
return encoding_iana
if strict:
raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
return cp_name
def range_scan(decoded_sequence: str) -> List[str]:
ranges: Set[str] = set()
for character in decoded_sequence:
character_range: Optional[str] = unicode_range(character)
if character_range is None:
continue
ranges.add(character_range)
return list(ranges)
def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
return 0.0
decoder_a = importlib.import_module(
"encodings.{}".format(iana_name_a)
).IncrementalDecoder
decoder_b = importlib.import_module(
"encodings.{}".format(iana_name_b)
).IncrementalDecoder
id_a: IncrementalDecoder = decoder_a(errors="ignore")
id_b: IncrementalDecoder = decoder_b(errors="ignore")
character_match_count: int = 0
for i in range(255):
to_be_decoded: bytes = bytes([i])
if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
character_match_count += 1
return character_match_count / 254
def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
"""
Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
the function cp_similarity.
"""
return (
iana_name_a in IANA_SUPPORTED_SIMILAR
and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
)
def set_logging_handler(
name: str = "charset_normalizer",
level: int = logging.INFO,
format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
) -> None:
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(handler)
def cut_sequence_chunks(
sequences: bytes,
encoding_iana: str,
offsets: range,
chunk_size: int,
bom_or_sig_available: bool,
strip_sig_or_bom: bool,
sig_payload: bytes,
is_multi_byte_decoder: bool,
decoded_payload: Optional[str] = None,
) -> Generator[str, None, None]:
if decoded_payload and is_multi_byte_decoder is False:
for i in offsets:
chunk = decoded_payload[i : i + chunk_size]
if not chunk:
break
yield chunk
else:
for i in offsets:
chunk_end = i + chunk_size
if chunk_end > len(sequences) + 8:
continue
cut_sequence = sequences[i : i + chunk_size]
if bom_or_sig_available and strip_sig_or_bom is False:
cut_sequence = sig_payload + cut_sequence
chunk = cut_sequence.decode(
encoding_iana,
errors="ignore" if is_multi_byte_decoder else "strict",
)
# multi-byte bad cutting detector and adjustment
# not the cleanest way to perform that fix but clever enough for now.
if is_multi_byte_decoder and i > 0 and sequences[i] >= 0x80:
chunk_partial_size_chk: int = min(chunk_size, 16)
if (
decoded_payload
and chunk[:chunk_partial_size_chk] not in decoded_payload
):
for j in range(i, i - 4, -1):
cut_sequence = sequences[j:chunk_end]
if bom_or_sig_available and strip_sig_or_bom is False:
cut_sequence = sig_payload + cut_sequence
chunk = cut_sequence.decode(encoding_iana, errors="ignore")
if chunk[:chunk_partial_size_chk] in decoded_payload:
break
yield chunk
| 11,769 | Python | 26.694118 | 115 | 0.639477 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/md.py | from functools import lru_cache
from typing import List, Optional
from .constant import COMMON_SAFE_ASCII_CHARACTERS, UNICODE_SECONDARY_RANGE_KEYWORD
from .utils import (
is_accentuated,
is_ascii,
is_case_variable,
is_cjk,
is_emoticon,
is_hangul,
is_hiragana,
is_katakana,
is_latin,
is_punctuation,
is_separator,
is_symbol,
is_thai,
is_unprintable,
remove_accent,
unicode_range,
)
class MessDetectorPlugin:
"""
Base abstract class used for mess detection plugins.
All detectors MUST extend and implement given methods.
"""
def eligible(self, character: str) -> bool:
"""
Determine if given character should be fed in.
"""
raise NotImplementedError # pragma: nocover
def feed(self, character: str) -> None:
"""
The main routine to be executed upon character.
Insert the logic in witch the text would be considered chaotic.
"""
raise NotImplementedError # pragma: nocover
def reset(self) -> None: # pragma: no cover
"""
Permit to reset the plugin to the initial state.
"""
raise NotImplementedError
@property
def ratio(self) -> float:
"""
Compute the chaos ratio based on what your feed() has seen.
Must NOT be lower than 0.; No restriction gt 0.
"""
raise NotImplementedError # pragma: nocover
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._punctuation_count: int = 0
self._symbol_count: int = 0
self._character_count: int = 0
self._last_printable_char: Optional[str] = None
self._frenzy_symbol_in_word: bool = False
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character != self._last_printable_char
and character not in COMMON_SAFE_ASCII_CHARACTERS
):
if is_punctuation(character):
self._punctuation_count += 1
elif (
character.isdigit() is False
and is_symbol(character)
and is_emoticon(character) is False
):
self._symbol_count += 2
self._last_printable_char = character
def reset(self) -> None: # pragma: no cover
self._punctuation_count = 0
self._character_count = 0
self._symbol_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_punctuation: float = (
self._punctuation_count + self._symbol_count
) / self._character_count
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
class TooManyAccentuatedPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._character_count: int = 0
self._accentuated_count: int = 0
def eligible(self, character: str) -> bool:
return character.isalpha()
def feed(self, character: str) -> None:
self._character_count += 1
if is_accentuated(character):
self._accentuated_count += 1
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._accentuated_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_accentuation: float = self._accentuated_count / self._character_count
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
class UnprintablePlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._unprintable_count: int = 0
self._character_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if is_unprintable(character):
self._unprintable_count += 1
self._character_count += 1
def reset(self) -> None: # pragma: no cover
self._unprintable_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._successive_count: int = 0
self._character_count: int = 0
self._last_latin_character: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isalpha() and is_latin(character)
def feed(self, character: str) -> None:
self._character_count += 1
if (
self._last_latin_character is not None
and is_accentuated(character)
and is_accentuated(self._last_latin_character)
):
if character.isupper() and self._last_latin_character.isupper():
self._successive_count += 1
# Worse if its the same char duplicated with different accent.
if remove_accent(character) == remove_accent(self._last_latin_character):
self._successive_count += 1
self._last_latin_character = character
def reset(self) -> None: # pragma: no cover
self._successive_count = 0
self._character_count = 0
self._last_latin_character = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._successive_count * 2) / self._character_count
class SuspiciousRange(MessDetectorPlugin):
def __init__(self) -> None:
self._suspicious_successive_range_count: int = 0
self._character_count: int = 0
self._last_printable_seen: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character.isspace()
or is_punctuation(character)
or character in COMMON_SAFE_ASCII_CHARACTERS
):
self._last_printable_seen = None
return
if self._last_printable_seen is None:
self._last_printable_seen = character
return
unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen)
unicode_range_b: Optional[str] = unicode_range(character)
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
self._suspicious_successive_range_count += 1
self._last_printable_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._suspicious_successive_range_count = 0
self._last_printable_seen = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_suspicious_range_usage: float = (
self._suspicious_successive_range_count * 2
) / self._character_count
if ratio_of_suspicious_range_usage < 0.1:
return 0.0
return ratio_of_suspicious_range_usage
class SuperWeirdWordPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._word_count: int = 0
self._bad_word_count: int = 0
self._foreign_long_count: int = 0
self._is_current_word_bad: bool = False
self._foreign_long_watch: bool = False
self._character_count: int = 0
self._bad_character_count: int = 0
self._buffer: str = ""
self._buffer_accent_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character.isalpha():
self._buffer += character
if is_accentuated(character):
self._buffer_accent_count += 1
if (
self._foreign_long_watch is False
and (is_latin(character) is False or is_accentuated(character))
and is_cjk(character) is False
and is_hangul(character) is False
and is_katakana(character) is False
and is_hiragana(character) is False
and is_thai(character) is False
):
self._foreign_long_watch = True
return
if not self._buffer:
return
if (
character.isspace() or is_punctuation(character) or is_separator(character)
) and self._buffer:
self._word_count += 1
buffer_length: int = len(self._buffer)
self._character_count += buffer_length
if buffer_length >= 4:
if self._buffer_accent_count / buffer_length > 0.34:
self._is_current_word_bad = True
# Word/Buffer ending with a upper case accentuated letter are so rare,
# that we will consider them all as suspicious. Same weight as foreign_long suspicious.
if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper():
self._foreign_long_count += 1
self._is_current_word_bad = True
if buffer_length >= 24 and self._foreign_long_watch:
self._foreign_long_count += 1
self._is_current_word_bad = True
if self._is_current_word_bad:
self._bad_word_count += 1
self._bad_character_count += len(self._buffer)
self._is_current_word_bad = False
self._foreign_long_watch = False
self._buffer = ""
self._buffer_accent_count = 0
elif (
character not in {"<", ">", "-", "=", "~", "|", "_"}
and character.isdigit() is False
and is_symbol(character)
):
self._is_current_word_bad = True
self._buffer += character
def reset(self) -> None: # pragma: no cover
self._buffer = ""
self._is_current_word_bad = False
self._foreign_long_watch = False
self._bad_word_count = 0
self._word_count = 0
self._character_count = 0
self._bad_character_count = 0
self._foreign_long_count = 0
@property
def ratio(self) -> float:
if self._word_count <= 10 and self._foreign_long_count == 0:
return 0.0
return self._bad_character_count / self._character_count
class CjkInvalidStopPlugin(MessDetectorPlugin):
"""
GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
can be easily detected. Searching for the overuse of '丅' and '丄'.
"""
def __init__(self) -> None:
self._wrong_stop_count: int = 0
self._cjk_character_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character in {"丅", "丄"}:
self._wrong_stop_count += 1
return
if is_cjk(character):
self._cjk_character_count += 1
def reset(self) -> None: # pragma: no cover
self._wrong_stop_count = 0
self._cjk_character_count = 0
@property
def ratio(self) -> float:
if self._cjk_character_count < 16:
return 0.0
return self._wrong_stop_count / self._cjk_character_count
class ArchaicUpperLowerPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._buf: bool = False
self._character_count_since_last_sep: int = 0
self._successive_upper_lower_count: int = 0
self._successive_upper_lower_count_final: int = 0
self._character_count: int = 0
self._last_alpha_seen: Optional[str] = None
self._current_ascii_only: bool = True
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
is_concerned = character.isalpha() and is_case_variable(character)
chunk_sep = is_concerned is False
if chunk_sep and self._character_count_since_last_sep > 0:
if (
self._character_count_since_last_sep <= 64
and character.isdigit() is False
and self._current_ascii_only is False
):
self._successive_upper_lower_count_final += (
self._successive_upper_lower_count
)
self._successive_upper_lower_count = 0
self._character_count_since_last_sep = 0
self._last_alpha_seen = None
self._buf = False
self._character_count += 1
self._current_ascii_only = True
return
if self._current_ascii_only is True and is_ascii(character) is False:
self._current_ascii_only = False
if self._last_alpha_seen is not None:
if (character.isupper() and self._last_alpha_seen.islower()) or (
character.islower() and self._last_alpha_seen.isupper()
):
if self._buf is True:
self._successive_upper_lower_count += 2
self._buf = False
else:
self._buf = True
else:
self._buf = False
self._character_count += 1
self._character_count_since_last_sep += 1
self._last_alpha_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._character_count_since_last_sep = 0
self._successive_upper_lower_count = 0
self._successive_upper_lower_count_final = 0
self._last_alpha_seen = None
self._buf = False
self._current_ascii_only = True
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return self._successive_upper_lower_count_final / self._character_count
@lru_cache(maxsize=1024)
def is_suspiciously_successive_range(
unicode_range_a: Optional[str], unicode_range_b: Optional[str]
) -> bool:
"""
Determine if two Unicode range seen next to each other can be considered as suspicious.
"""
if unicode_range_a is None or unicode_range_b is None:
return True
if unicode_range_a == unicode_range_b:
return False
if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
return False
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
return False
# Latin characters can be accompanied with a combining diacritical mark
# eg. Vietnamese.
if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
"Combining" in unicode_range_a or "Combining" in unicode_range_b
):
return False
keywords_range_a, keywords_range_b = unicode_range_a.split(
" "
), unicode_range_b.split(" ")
for el in keywords_range_a:
if el in UNICODE_SECONDARY_RANGE_KEYWORD:
continue
if el in keywords_range_b:
return False
# Japanese Exception
range_a_jp_chars, range_b_jp_chars = (
unicode_range_a
in (
"Hiragana",
"Katakana",
),
unicode_range_b in ("Hiragana", "Katakana"),
)
if (range_a_jp_chars or range_b_jp_chars) and (
"CJK" in unicode_range_a or "CJK" in unicode_range_b
):
return False
if range_a_jp_chars and range_b_jp_chars:
return False
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
return False
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
return False
# Chinese/Japanese use dedicated range for punctuation and/or separators.
if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
unicode_range_a in ["Katakana", "Hiragana"]
and unicode_range_b in ["Katakana", "Hiragana"]
):
if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
return False
if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
return False
return True
@lru_cache(maxsize=2048)
def mess_ratio(
decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
) -> float:
"""
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
"""
detectors: List[MessDetectorPlugin] = [
md_class() for md_class in MessDetectorPlugin.__subclasses__()
]
length: int = len(decoded_sequence) + 1
mean_mess_ratio: float = 0.0
if length < 512:
intermediary_mean_mess_ratio_calc: int = 32
elif length <= 1024:
intermediary_mean_mess_ratio_calc = 64
else:
intermediary_mean_mess_ratio_calc = 128
for character, index in zip(decoded_sequence + "\n", range(length)):
for detector in detectors:
if detector.eligible(character):
detector.feed(character)
if (
index > 0 and index % intermediary_mean_mess_ratio_calc == 0
) or index == length - 1:
mean_mess_ratio = sum(dt.ratio for dt in detectors)
if mean_mess_ratio >= maximum_threshold:
break
if debug:
for dt in detectors: # pragma: nocover
print(dt.__class__, dt.ratio)
return round(mean_mess_ratio, 3)
| 17,634 | Python | 30.83213 | 113 | 0.582568 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/legacy.py | import warnings
from typing import Dict, Optional, Union
from .api import from_bytes, from_fp, from_path, normalize
from .constant import CHARDET_CORRESPONDENCE
from .models import CharsetMatch, CharsetMatches
def detect(byte_str: bytes) -> Dict[str, Optional[Union[str, float]]]:
"""
chardet legacy method
Detect the encoding of the given byte string. It should be mostly backward-compatible.
Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
This function is deprecated and should be used to migrate your project easily, consult the documentation for
further information. Not planned for removal.
:param byte_str: The byte sequence to examine.
"""
if not isinstance(byte_str, (bytearray, bytes)):
raise TypeError( # pragma: nocover
"Expected object of type bytes or bytearray, got: "
"{0}".format(type(byte_str))
)
if isinstance(byte_str, bytearray):
byte_str = bytes(byte_str)
r = from_bytes(byte_str).best()
encoding = r.encoding if r is not None else None
language = r.language if r is not None and r.language != "Unknown" else ""
confidence = 1.0 - r.chaos if r is not None else None
# Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
# but chardet does return 'utf-8-sig' and it is a valid codec name.
if r is not None and encoding == "utf_8" and r.bom:
encoding += "_sig"
return {
"encoding": encoding
if encoding not in CHARDET_CORRESPONDENCE
else CHARDET_CORRESPONDENCE[encoding],
"language": language,
"confidence": confidence,
}
class CharsetNormalizerMatch(CharsetMatch):
pass
class CharsetNormalizerMatches(CharsetMatches):
@staticmethod
def from_fp(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_fp(*args, **kwargs) # pragma: nocover
@staticmethod
def from_bytes(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_bytes(*args, **kwargs) # pragma: nocover
@staticmethod
def from_path(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_path(*args, **kwargs) # pragma: nocover
@staticmethod
def normalize(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return normalize(*args, **kwargs) # pragma: nocover
class CharsetDetector(CharsetNormalizerMatches):
pass
class CharsetDoctor(CharsetNormalizerMatches):
pass
| 3,384 | Python | 34.260416 | 120 | 0.65396 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/api.py | import logging
import warnings
from os import PathLike
from os.path import basename, splitext
from typing import Any, BinaryIO, List, Optional, Set
from .cd import (
coherence_ratio,
encoding_languages,
mb_encoding_languages,
merge_coherence_ratios,
)
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
from .md import mess_ratio
from .models import CharsetMatch, CharsetMatches
from .utils import (
any_specified_encoding,
cut_sequence_chunks,
iana_name,
identify_sig_or_bom,
is_cp_similar,
is_multi_byte_encoding,
should_strip_sig_or_bom,
)
# Will most likely be controversial
# logging.addLevelName(TRACE, "TRACE")
logger = logging.getLogger("charset_normalizer")
explain_handler = logging.StreamHandler()
explain_handler.setFormatter(
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
)
def from_bytes(
sequences: bytes,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.2,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
If there is no results, it is a strong indicator that the source is binary/not text.
By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence.
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
but never take it for granted. Can improve the performance.
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
purpose.
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
Custom logging format and handler can be set manually.
"""
if not isinstance(sequences, (bytearray, bytes)):
raise TypeError(
"Expected object of type bytes or bytearray, got: {0}".format(
type(sequences)
)
)
if explain:
previous_logger_level: int = logger.level
logger.addHandler(explain_handler)
logger.setLevel(TRACE)
length: int = len(sequences)
if length == 0:
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level or logging.WARNING)
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
if cp_isolation is not None:
logger.log(
TRACE,
"cp_isolation is set. use this flag for debugging purpose. "
"limited list of encoding allowed : %s.",
", ".join(cp_isolation),
)
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
else:
cp_isolation = []
if cp_exclusion is not None:
logger.log(
TRACE,
"cp_exclusion is set. use this flag for debugging purpose. "
"limited list of encoding excluded : %s.",
", ".join(cp_exclusion),
)
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
else:
cp_exclusion = []
if length <= (chunk_size * steps):
logger.log(
TRACE,
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
steps,
chunk_size,
length,
)
steps = 1
chunk_size = length
if steps > 1 and length / steps < chunk_size:
chunk_size = int(length / steps)
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
if is_too_small_sequence:
logger.log(
TRACE,
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
length
),
)
elif is_too_large_sequence:
logger.log(
TRACE,
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
length
),
)
prioritized_encodings: List[str] = []
specified_encoding: Optional[str] = (
any_specified_encoding(sequences) if preemptive_behaviour else None
)
if specified_encoding is not None:
prioritized_encodings.append(specified_encoding)
logger.log(
TRACE,
"Detected declarative mark in sequence. Priority +1 given for %s.",
specified_encoding,
)
tested: Set[str] = set()
tested_but_hard_failure: List[str] = []
tested_but_soft_failure: List[str] = []
fallback_ascii: Optional[CharsetMatch] = None
fallback_u8: Optional[CharsetMatch] = None
fallback_specified: Optional[CharsetMatch] = None
results: CharsetMatches = CharsetMatches()
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
if sig_encoding is not None:
prioritized_encodings.append(sig_encoding)
logger.log(
TRACE,
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
len(sig_payload),
sig_encoding,
)
prioritized_encodings.append("ascii")
if "utf_8" not in prioritized_encodings:
prioritized_encodings.append("utf_8")
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
if cp_isolation and encoding_iana not in cp_isolation:
continue
if cp_exclusion and encoding_iana in cp_exclusion:
continue
if encoding_iana in tested:
continue
tested.add(encoding_iana)
decoded_payload: Optional[str] = None
bom_or_sig_available: bool = sig_encoding == encoding_iana
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
encoding_iana
)
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
logger.log(
TRACE,
"Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
encoding_iana,
)
continue
try:
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
except (ModuleNotFoundError, ImportError):
logger.log(
TRACE,
"Encoding %s does not provide an IncrementalDecoder",
encoding_iana,
)
continue
try:
if is_too_large_sequence and is_multi_byte_decoder is False:
str(
sequences[: int(50e4)]
if strip_sig_or_bom is False
else sequences[len(sig_payload) : int(50e4)],
encoding=encoding_iana,
)
else:
decoded_payload = str(
sequences
if strip_sig_or_bom is False
else sequences[len(sig_payload) :],
encoding=encoding_iana,
)
except (UnicodeDecodeError, LookupError) as e:
if not isinstance(e, LookupError):
logger.log(
TRACE,
"Code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
similar_soft_failure_test: bool = False
for encoding_soft_failed in tested_but_soft_failure:
if is_cp_similar(encoding_iana, encoding_soft_failed):
similar_soft_failure_test = True
break
if similar_soft_failure_test:
logger.log(
TRACE,
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
encoding_iana,
encoding_soft_failed,
)
continue
r_ = range(
0 if not bom_or_sig_available else len(sig_payload),
length,
int(length / steps),
)
multi_byte_bonus: bool = (
is_multi_byte_decoder
and decoded_payload is not None
and len(decoded_payload) < length
)
if multi_byte_bonus:
logger.log(
TRACE,
"Code page %s is a multi byte encoding table and it appear that at least one character "
"was encoded using n-bytes.",
encoding_iana,
)
max_chunk_gave_up: int = int(len(r_) / 4)
max_chunk_gave_up = max(max_chunk_gave_up, 2)
early_stop_count: int = 0
lazy_str_hard_failure = False
md_chunks: List[str] = []
md_ratios = []
try:
for chunk in cut_sequence_chunks(
sequences,
encoding_iana,
r_,
chunk_size,
bom_or_sig_available,
strip_sig_or_bom,
sig_payload,
is_multi_byte_decoder,
decoded_payload,
):
md_chunks.append(chunk)
md_ratios.append(mess_ratio(chunk, threshold))
if md_ratios[-1] >= threshold:
early_stop_count += 1
if (early_stop_count >= max_chunk_gave_up) or (
bom_or_sig_available and strip_sig_or_bom is False
):
break
except UnicodeDecodeError as e: # Lazy str loading may have missed something there
logger.log(
TRACE,
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
early_stop_count = max_chunk_gave_up
lazy_str_hard_failure = True
# We might want to check the sequence again with the whole content
# Only if initial MD tests passes
if (
not lazy_str_hard_failure
and is_too_large_sequence
and not is_multi_byte_decoder
):
try:
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
except UnicodeDecodeError as e:
logger.log(
TRACE,
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
tested_but_soft_failure.append(encoding_iana)
logger.log(
TRACE,
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
"Computed mean chaos is %f %%.",
encoding_iana,
early_stop_count,
round(mean_mess_ratio * 100, ndigits=3),
)
# Preparing those fallbacks in case we got nothing.
if (
encoding_iana in ["ascii", "utf_8", specified_encoding]
and not lazy_str_hard_failure
):
fallback_entry = CharsetMatch(
sequences, encoding_iana, threshold, False, [], decoded_payload
)
if encoding_iana == specified_encoding:
fallback_specified = fallback_entry
elif encoding_iana == "ascii":
fallback_ascii = fallback_entry
else:
fallback_u8 = fallback_entry
continue
logger.log(
TRACE,
"%s passed initial chaos probing. Mean measured chaos is %f %%",
encoding_iana,
round(mean_mess_ratio * 100, ndigits=3),
)
if not is_multi_byte_decoder:
target_languages: List[str] = encoding_languages(encoding_iana)
else:
target_languages = mb_encoding_languages(encoding_iana)
if target_languages:
logger.log(
TRACE,
"{} should target any language(s) of {}".format(
encoding_iana, str(target_languages)
),
)
cd_ratios = []
# We shall skip the CD when its about ASCII
# Most of the time its not relevant to run "language-detection" on it.
if encoding_iana != "ascii":
for chunk in md_chunks:
chunk_languages = coherence_ratio(
chunk, 0.1, ",".join(target_languages) if target_languages else None
)
cd_ratios.append(chunk_languages)
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
if cd_ratios_merged:
logger.log(
TRACE,
"We detected language {} using {}".format(
cd_ratios_merged, encoding_iana
),
)
results.append(
CharsetMatch(
sequences,
encoding_iana,
mean_mess_ratio,
bom_or_sig_available,
cd_ratios_merged,
decoded_payload,
)
)
if (
encoding_iana in [specified_encoding, "ascii", "utf_8"]
and mean_mess_ratio < 0.1
):
logger.debug(
"Encoding detection: %s is most likely the one.", encoding_iana
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if encoding_iana == sig_encoding:
logger.debug(
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
"the beginning of the sequence.",
encoding_iana,
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if len(results) == 0:
if fallback_u8 or fallback_ascii or fallback_specified:
logger.log(
TRACE,
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
)
if fallback_specified:
logger.debug(
"Encoding detection: %s will be used as a fallback match",
fallback_specified.encoding,
)
results.append(fallback_specified)
elif (
(fallback_u8 and fallback_ascii is None)
or (
fallback_u8
and fallback_ascii
and fallback_u8.fingerprint != fallback_ascii.fingerprint
)
or (fallback_u8 is not None)
):
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
results.append(fallback_u8)
elif fallback_ascii:
logger.debug("Encoding detection: ascii will be used as a fallback match")
results.append(fallback_ascii)
if results:
logger.debug(
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
results.best().encoding, # type: ignore
len(results) - 1,
)
else:
logger.debug("Encoding detection: Unable to determine any suitable charset.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return results
def from_fp(
fp: BinaryIO,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but using a file pointer that is already ready.
Will not close the file pointer.
"""
return from_bytes(
fp.read(),
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
)
def from_path(
path: "PathLike[Any]",
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
Can raise IOError.
"""
with open(path, "rb") as fp:
return from_fp(
fp,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
)
def normalize(
path: "PathLike[Any]",
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
) -> CharsetMatch:
"""
Take a (text-based) file path and try to create another file next to it, this time using UTF-8.
"""
warnings.warn(
"normalize is deprecated and will be removed in 3.0",
DeprecationWarning,
)
results = from_path(
path,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
)
filename = basename(path)
target_extensions = list(splitext(filename))
if len(results) == 0:
raise IOError(
'Unable to normalize "{}", no encoding charset seems to fit.'.format(
filename
)
)
result = results.best()
target_extensions[0] += "-" + result.encoding # type: ignore
with open(
"{}".format(str(path).replace(filename, "".join(target_extensions))), "wb"
) as fp:
fp.write(result.output()) # type: ignore
return result # type: ignore
| 19,191 | Python | 31.806838 | 120 | 0.556094 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/models.py | import warnings
from collections import Counter
from encodings.aliases import aliases
from hashlib import sha256
from json import dumps
from re import sub
from typing import (
Any,
Counter as TypeCounter,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE
from .md import mess_ratio
from .utils import iana_name, is_multi_byte_encoding, unicode_range
class CharsetMatch:
def __init__(
self,
payload: bytes,
guessed_encoding: str,
mean_mess_ratio: float,
has_sig_or_bom: bool,
languages: "CoherenceMatches",
decoded_payload: Optional[str] = None,
):
self._payload: bytes = payload
self._encoding: str = guessed_encoding
self._mean_mess_ratio: float = mean_mess_ratio
self._languages: CoherenceMatches = languages
self._has_sig_or_bom: bool = has_sig_or_bom
self._unicode_ranges: Optional[List[str]] = None
self._leaves: List[CharsetMatch] = []
self._mean_coherence_ratio: float = 0.0
self._output_payload: Optional[bytes] = None
self._output_encoding: Optional[str] = None
self._string: Optional[str] = decoded_payload
def __eq__(self, other: object) -> bool:
if not isinstance(other, CharsetMatch):
raise TypeError(
"__eq__ cannot be invoked on {} and {}.".format(
str(other.__class__), str(self.__class__)
)
)
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
def __lt__(self, other: object) -> bool:
"""
Implemented to make sorted available upon CharsetMatches items.
"""
if not isinstance(other, CharsetMatch):
raise ValueError
chaos_difference: float = abs(self.chaos - other.chaos)
coherence_difference: float = abs(self.coherence - other.coherence)
# Bellow 1% difference --> Use Coherence
if chaos_difference < 0.01 and coherence_difference > 0.02:
# When having a tough decision, use the result that decoded as many multi-byte as possible.
if chaos_difference == 0.0 and self.coherence == other.coherence:
return self.multi_byte_usage > other.multi_byte_usage
return self.coherence > other.coherence
return self.chaos < other.chaos
@property
def multi_byte_usage(self) -> float:
return 1.0 - len(str(self)) / len(self.raw)
@property
def chaos_secondary_pass(self) -> float:
"""
Check once again chaos in decoded text, except this time, with full content.
Use with caution, this can be very slow.
Notice: Will be removed in 3.0
"""
warnings.warn(
"chaos_secondary_pass is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return mess_ratio(str(self), 1.0)
@property
def coherence_non_latin(self) -> float:
"""
Coherence ratio on the first non-latin language detected if ANY.
Notice: Will be removed in 3.0
"""
warnings.warn(
"coherence_non_latin is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return 0.0
@property
def w_counter(self) -> TypeCounter[str]:
"""
Word counter instance on decoded text.
Notice: Will be removed in 3.0
"""
warnings.warn(
"w_counter is deprecated and will be removed in 3.0", DeprecationWarning
)
string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower())
return Counter(string_printable_only.split())
def __str__(self) -> str:
# Lazy Str Loading
if self._string is None:
self._string = str(self._payload, self._encoding, "strict")
return self._string
def __repr__(self) -> str:
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
def add_submatch(self, other: "CharsetMatch") -> None:
if not isinstance(other, CharsetMatch) or other == self:
raise ValueError(
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
other.__class__
)
)
other._string = None # Unload RAM usage; dirty trick.
self._leaves.append(other)
@property
def encoding(self) -> str:
return self._encoding
@property
def encoding_aliases(self) -> List[str]:
"""
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
"""
also_known_as: List[str] = []
for u, p in aliases.items():
if self.encoding == u:
also_known_as.append(p)
elif self.encoding == p:
also_known_as.append(u)
return also_known_as
@property
def bom(self) -> bool:
return self._has_sig_or_bom
@property
def byte_order_mark(self) -> bool:
return self._has_sig_or_bom
@property
def languages(self) -> List[str]:
"""
Return the complete list of possible languages found in decoded sequence.
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
"""
return [e[0] for e in self._languages]
@property
def language(self) -> str:
"""
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
"Unknown".
"""
if not self._languages:
# Trying to infer the language based on the given encoding
# Its either English or we should not pronounce ourselves in certain cases.
if "ascii" in self.could_be_from_charset:
return "English"
# doing it there to avoid circular import
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
languages = (
mb_encoding_languages(self.encoding)
if is_multi_byte_encoding(self.encoding)
else encoding_languages(self.encoding)
)
if len(languages) == 0 or "Latin Based" in languages:
return "Unknown"
return languages[0]
return self._languages[0][0]
@property
def chaos(self) -> float:
return self._mean_mess_ratio
@property
def coherence(self) -> float:
if not self._languages:
return 0.0
return self._languages[0][1]
@property
def percent_chaos(self) -> float:
return round(self.chaos * 100, ndigits=3)
@property
def percent_coherence(self) -> float:
return round(self.coherence * 100, ndigits=3)
@property
def raw(self) -> bytes:
"""
Original untouched bytes.
"""
return self._payload
@property
def submatch(self) -> List["CharsetMatch"]:
return self._leaves
@property
def has_submatch(self) -> bool:
return len(self._leaves) > 0
@property
def alphabets(self) -> List[str]:
if self._unicode_ranges is not None:
return self._unicode_ranges
# list detected ranges
detected_ranges: List[Optional[str]] = [
unicode_range(char) for char in str(self)
]
# filter and sort
self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
return self._unicode_ranges
@property
def could_be_from_charset(self) -> List[str]:
"""
The complete list of encoding that output the exact SAME str result and therefore could be the originating
encoding.
This list does include the encoding available in property 'encoding'.
"""
return [self._encoding] + [m.encoding for m in self._leaves]
def first(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def best(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def output(self, encoding: str = "utf_8") -> bytes:
"""
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
Any errors will be simply ignored by the encoder NOT replaced.
"""
if self._output_encoding is None or self._output_encoding != encoding:
self._output_encoding = encoding
self._output_payload = str(self).encode(encoding, "replace")
return self._output_payload # type: ignore
@property
def fingerprint(self) -> str:
"""
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
"""
return sha256(self.output()).hexdigest()
class CharsetMatches:
"""
Container with every CharsetMatch items ordered by default from most probable to the less one.
Act like a list(iterable) but does not implements all related methods.
"""
def __init__(self, results: Optional[List[CharsetMatch]] = None):
self._results: List[CharsetMatch] = sorted(results) if results else []
def __iter__(self) -> Iterator[CharsetMatch]:
yield from self._results
def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
"""
Retrieve a single item either by its position or encoding name (alias may be used here).
Raise KeyError upon invalid index or encoding not present in results.
"""
if isinstance(item, int):
return self._results[item]
if isinstance(item, str):
item = iana_name(item, False)
for result in self._results:
if item in result.could_be_from_charset:
return result
raise KeyError
def __len__(self) -> int:
return len(self._results)
def __bool__(self) -> bool:
return len(self._results) > 0
def append(self, item: CharsetMatch) -> None:
"""
Insert a single match. Will be inserted accordingly to preserve sort.
Can be inserted as a submatch.
"""
if not isinstance(item, CharsetMatch):
raise ValueError(
"Cannot append instance '{}' to CharsetMatches".format(
str(item.__class__)
)
)
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
if len(item.raw) <= TOO_BIG_SEQUENCE:
for match in self._results:
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
match.add_submatch(item)
return
self._results.append(item)
self._results = sorted(self._results)
def best(self) -> Optional["CharsetMatch"]:
"""
Simply return the first match. Strict equivalent to matches[0].
"""
if not self._results:
return None
return self._results[0]
def first(self) -> Optional["CharsetMatch"]:
"""
Redundant method, call the method best(). Kept for BC reasons.
"""
return self.best()
CoherenceMatch = Tuple[str, float]
CoherenceMatches = List[CoherenceMatch]
class CliDetectionResult:
def __init__(
self,
path: str,
encoding: Optional[str],
encoding_aliases: List[str],
alternative_encodings: List[str],
language: str,
alphabets: List[str],
has_sig_or_bom: bool,
chaos: float,
coherence: float,
unicode_path: Optional[str],
is_preferred: bool,
):
self.path: str = path
self.unicode_path: Optional[str] = unicode_path
self.encoding: Optional[str] = encoding
self.encoding_aliases: List[str] = encoding_aliases
self.alternative_encodings: List[str] = alternative_encodings
self.language: str = language
self.alphabets: List[str] = alphabets
self.has_sig_or_bom: bool = has_sig_or_bom
self.chaos: float = chaos
self.coherence: float = coherence
self.is_preferred: bool = is_preferred
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {
"path": self.path,
"encoding": self.encoding,
"encoding_aliases": self.encoding_aliases,
"alternative_encodings": self.alternative_encodings,
"language": self.language,
"alphabets": self.alphabets,
"has_sig_or_bom": self.has_sig_or_bom,
"chaos": self.chaos,
"coherence": self.coherence,
"unicode_path": self.unicode_path,
"is_preferred": self.is_preferred,
}
def to_json(self) -> str:
return dumps(self.__dict__, ensure_ascii=True, indent=4)
| 13,167 | Python | 31.756219 | 120 | 0.587605 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cd.py | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
from .assets import FREQUENCIES
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
)
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese", "Classical Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True
if target_pure_latin and is_latin(character) is False:
target_pure_latin = False
return target_have_accents, target_pure_latin
def alphabet_languages(
characters: List[str], ignore_non_latin: bool = False
) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages: List[Tuple[str, float]] = []
source_have_accents = any(is_accentuated(character) for character in characters)
for language, language_characters in FREQUENCIES.items():
target_have_accents, target_pure_latin = get_target_features(language)
if ignore_non_latin and target_pure_latin is False:
continue
if target_have_accents is False and source_have_accents:
continue
character_count: int = len(language_characters)
character_match_count: int = len(
[c for c in language_characters if c in characters]
)
ratio: float = character_match_count / character_count
if ratio >= 0.2:
languages.append((language, ratio))
languages = sorted(languages, key=lambda x: x[1], reverse=True)
return [compatible_language[0] for compatible_language in languages]
def characters_popularity_compare(
language: str, ordered_characters: List[str]
) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count: int = 0
FREQUENCIES_language_set = set(FREQUENCIES[language])
for character in ordered_characters:
if character not in FREQUENCIES_language_set:
continue
characters_before_source: List[str] = FREQUENCIES[language][
0 : FREQUENCIES[language].index(character)
]
characters_after_source: List[str] = FREQUENCIES[language][
FREQUENCIES[language].index(character) :
]
characters_before: List[str] = ordered_characters[
0 : ordered_characters.index(character)
]
characters_after: List[str] = ordered_characters[
ordered_characters.index(character) :
]
before_match_count: int = len(
set(characters_before) & set(characters_before_source)
)
after_match_count: int = len(
set(characters_after) & set(characters_after_source)
)
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers: Dict[str, str] = {}
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range: Optional[str] = unicode_range(character)
if character_range is None:
continue
layer_target_range: Optional[str] = None
for discovered_range in layers:
if (
is_suspiciously_successive_range(discovered_range, character_range)
is False
):
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios: Dict[str, List[float]] = {}
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
merge = [
(
language,
round(
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
4,
),
)
for language in per_language_ratios
]
return sorted(merge, key=lambda x: x[1], reverse=True)
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results: List[Tuple[str, float]] = []
ignore_non_latin: bool = False
sufficient_match_count: int = 0
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
if "Latin Based" in lg_inclusion_list:
ignore_non_latin = True
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies: TypeCounter[str] = Counter(layer)
most_common = sequence_frequencies.most_common()
character_count: int = sum(o for c, o in most_common)
if character_count <= TOO_SMALL_SEQUENCE:
continue
popular_character_ordered: List[str] = [c for c, o in most_common]
for language in lg_inclusion_list or alphabet_languages(
popular_character_ordered, ignore_non_latin
):
ratio: float = characters_popularity_compare(
language, popular_character_ordered
)
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(results, key=lambda x: x[1], reverse=True)
| 10,811 | Python | 30.8 | 118 | 0.630746 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/constant.py | from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
from encodings.aliases import aliases
from re import IGNORECASE, compile as re_compile
from typing import Dict, List, Set, Union
from .assets import FREQUENCIES
# Contain for each eligible encoding a list of/item bytes SIG/BOM
ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = {
"utf_8": BOM_UTF8,
"utf_7": [
b"\x2b\x2f\x76\x38",
b"\x2b\x2f\x76\x39",
b"\x2b\x2f\x76\x2b",
b"\x2b\x2f\x76\x2f",
b"\x2b\x2f\x76\x38\x2d",
],
"gb18030": b"\x84\x31\x95\x33",
"utf_32": [BOM_UTF32_BE, BOM_UTF32_LE],
"utf_16": [BOM_UTF16_BE, BOM_UTF16_LE],
}
TOO_SMALL_SEQUENCE: int = 32
TOO_BIG_SEQUENCE: int = int(10e6)
UTF8_MAXIMAL_ALLOCATION: int = 1112064
UNICODE_RANGES_COMBINED: Dict[str, range] = {
"Control character": range(31 + 1),
"Basic Latin": range(32, 127 + 1),
"Latin-1 Supplement": range(128, 255 + 1),
"Latin Extended-A": range(256, 383 + 1),
"Latin Extended-B": range(384, 591 + 1),
"IPA Extensions": range(592, 687 + 1),
"Spacing Modifier Letters": range(688, 767 + 1),
"Combining Diacritical Marks": range(768, 879 + 1),
"Greek and Coptic": range(880, 1023 + 1),
"Cyrillic": range(1024, 1279 + 1),
"Cyrillic Supplement": range(1280, 1327 + 1),
"Armenian": range(1328, 1423 + 1),
"Hebrew": range(1424, 1535 + 1),
"Arabic": range(1536, 1791 + 1),
"Syriac": range(1792, 1871 + 1),
"Arabic Supplement": range(1872, 1919 + 1),
"Thaana": range(1920, 1983 + 1),
"NKo": range(1984, 2047 + 1),
"Samaritan": range(2048, 2111 + 1),
"Mandaic": range(2112, 2143 + 1),
"Syriac Supplement": range(2144, 2159 + 1),
"Arabic Extended-A": range(2208, 2303 + 1),
"Devanagari": range(2304, 2431 + 1),
"Bengali": range(2432, 2559 + 1),
"Gurmukhi": range(2560, 2687 + 1),
"Gujarati": range(2688, 2815 + 1),
"Oriya": range(2816, 2943 + 1),
"Tamil": range(2944, 3071 + 1),
"Telugu": range(3072, 3199 + 1),
"Kannada": range(3200, 3327 + 1),
"Malayalam": range(3328, 3455 + 1),
"Sinhala": range(3456, 3583 + 1),
"Thai": range(3584, 3711 + 1),
"Lao": range(3712, 3839 + 1),
"Tibetan": range(3840, 4095 + 1),
"Myanmar": range(4096, 4255 + 1),
"Georgian": range(4256, 4351 + 1),
"Hangul Jamo": range(4352, 4607 + 1),
"Ethiopic": range(4608, 4991 + 1),
"Ethiopic Supplement": range(4992, 5023 + 1),
"Cherokee": range(5024, 5119 + 1),
"Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1),
"Ogham": range(5760, 5791 + 1),
"Runic": range(5792, 5887 + 1),
"Tagalog": range(5888, 5919 + 1),
"Hanunoo": range(5920, 5951 + 1),
"Buhid": range(5952, 5983 + 1),
"Tagbanwa": range(5984, 6015 + 1),
"Khmer": range(6016, 6143 + 1),
"Mongolian": range(6144, 6319 + 1),
"Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1),
"Limbu": range(6400, 6479 + 1),
"Tai Le": range(6480, 6527 + 1),
"New Tai Lue": range(6528, 6623 + 1),
"Khmer Symbols": range(6624, 6655 + 1),
"Buginese": range(6656, 6687 + 1),
"Tai Tham": range(6688, 6831 + 1),
"Combining Diacritical Marks Extended": range(6832, 6911 + 1),
"Balinese": range(6912, 7039 + 1),
"Sundanese": range(7040, 7103 + 1),
"Batak": range(7104, 7167 + 1),
"Lepcha": range(7168, 7247 + 1),
"Ol Chiki": range(7248, 7295 + 1),
"Cyrillic Extended C": range(7296, 7311 + 1),
"Sundanese Supplement": range(7360, 7375 + 1),
"Vedic Extensions": range(7376, 7423 + 1),
"Phonetic Extensions": range(7424, 7551 + 1),
"Phonetic Extensions Supplement": range(7552, 7615 + 1),
"Combining Diacritical Marks Supplement": range(7616, 7679 + 1),
"Latin Extended Additional": range(7680, 7935 + 1),
"Greek Extended": range(7936, 8191 + 1),
"General Punctuation": range(8192, 8303 + 1),
"Superscripts and Subscripts": range(8304, 8351 + 1),
"Currency Symbols": range(8352, 8399 + 1),
"Combining Diacritical Marks for Symbols": range(8400, 8447 + 1),
"Letterlike Symbols": range(8448, 8527 + 1),
"Number Forms": range(8528, 8591 + 1),
"Arrows": range(8592, 8703 + 1),
"Mathematical Operators": range(8704, 8959 + 1),
"Miscellaneous Technical": range(8960, 9215 + 1),
"Control Pictures": range(9216, 9279 + 1),
"Optical Character Recognition": range(9280, 9311 + 1),
"Enclosed Alphanumerics": range(9312, 9471 + 1),
"Box Drawing": range(9472, 9599 + 1),
"Block Elements": range(9600, 9631 + 1),
"Geometric Shapes": range(9632, 9727 + 1),
"Miscellaneous Symbols": range(9728, 9983 + 1),
"Dingbats": range(9984, 10175 + 1),
"Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1),
"Supplemental Arrows-A": range(10224, 10239 + 1),
"Braille Patterns": range(10240, 10495 + 1),
"Supplemental Arrows-B": range(10496, 10623 + 1),
"Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1),
"Supplemental Mathematical Operators": range(10752, 11007 + 1),
"Miscellaneous Symbols and Arrows": range(11008, 11263 + 1),
"Glagolitic": range(11264, 11359 + 1),
"Latin Extended-C": range(11360, 11391 + 1),
"Coptic": range(11392, 11519 + 1),
"Georgian Supplement": range(11520, 11567 + 1),
"Tifinagh": range(11568, 11647 + 1),
"Ethiopic Extended": range(11648, 11743 + 1),
"Cyrillic Extended-A": range(11744, 11775 + 1),
"Supplemental Punctuation": range(11776, 11903 + 1),
"CJK Radicals Supplement": range(11904, 12031 + 1),
"Kangxi Radicals": range(12032, 12255 + 1),
"Ideographic Description Characters": range(12272, 12287 + 1),
"CJK Symbols and Punctuation": range(12288, 12351 + 1),
"Hiragana": range(12352, 12447 + 1),
"Katakana": range(12448, 12543 + 1),
"Bopomofo": range(12544, 12591 + 1),
"Hangul Compatibility Jamo": range(12592, 12687 + 1),
"Kanbun": range(12688, 12703 + 1),
"Bopomofo Extended": range(12704, 12735 + 1),
"CJK Strokes": range(12736, 12783 + 1),
"Katakana Phonetic Extensions": range(12784, 12799 + 1),
"Enclosed CJK Letters and Months": range(12800, 13055 + 1),
"CJK Compatibility": range(13056, 13311 + 1),
"CJK Unified Ideographs Extension A": range(13312, 19903 + 1),
"Yijing Hexagram Symbols": range(19904, 19967 + 1),
"CJK Unified Ideographs": range(19968, 40959 + 1),
"Yi Syllables": range(40960, 42127 + 1),
"Yi Radicals": range(42128, 42191 + 1),
"Lisu": range(42192, 42239 + 1),
"Vai": range(42240, 42559 + 1),
"Cyrillic Extended-B": range(42560, 42655 + 1),
"Bamum": range(42656, 42751 + 1),
"Modifier Tone Letters": range(42752, 42783 + 1),
"Latin Extended-D": range(42784, 43007 + 1),
"Syloti Nagri": range(43008, 43055 + 1),
"Common Indic Number Forms": range(43056, 43071 + 1),
"Phags-pa": range(43072, 43135 + 1),
"Saurashtra": range(43136, 43231 + 1),
"Devanagari Extended": range(43232, 43263 + 1),
"Kayah Li": range(43264, 43311 + 1),
"Rejang": range(43312, 43359 + 1),
"Hangul Jamo Extended-A": range(43360, 43391 + 1),
"Javanese": range(43392, 43487 + 1),
"Myanmar Extended-B": range(43488, 43519 + 1),
"Cham": range(43520, 43615 + 1),
"Myanmar Extended-A": range(43616, 43647 + 1),
"Tai Viet": range(43648, 43743 + 1),
"Meetei Mayek Extensions": range(43744, 43775 + 1),
"Ethiopic Extended-A": range(43776, 43823 + 1),
"Latin Extended-E": range(43824, 43887 + 1),
"Cherokee Supplement": range(43888, 43967 + 1),
"Meetei Mayek": range(43968, 44031 + 1),
"Hangul Syllables": range(44032, 55215 + 1),
"Hangul Jamo Extended-B": range(55216, 55295 + 1),
"High Surrogates": range(55296, 56191 + 1),
"High Private Use Surrogates": range(56192, 56319 + 1),
"Low Surrogates": range(56320, 57343 + 1),
"Private Use Area": range(57344, 63743 + 1),
"CJK Compatibility Ideographs": range(63744, 64255 + 1),
"Alphabetic Presentation Forms": range(64256, 64335 + 1),
"Arabic Presentation Forms-A": range(64336, 65023 + 1),
"Variation Selectors": range(65024, 65039 + 1),
"Vertical Forms": range(65040, 65055 + 1),
"Combining Half Marks": range(65056, 65071 + 1),
"CJK Compatibility Forms": range(65072, 65103 + 1),
"Small Form Variants": range(65104, 65135 + 1),
"Arabic Presentation Forms-B": range(65136, 65279 + 1),
"Halfwidth and Fullwidth Forms": range(65280, 65519 + 1),
"Specials": range(65520, 65535 + 1),
"Linear B Syllabary": range(65536, 65663 + 1),
"Linear B Ideograms": range(65664, 65791 + 1),
"Aegean Numbers": range(65792, 65855 + 1),
"Ancient Greek Numbers": range(65856, 65935 + 1),
"Ancient Symbols": range(65936, 65999 + 1),
"Phaistos Disc": range(66000, 66047 + 1),
"Lycian": range(66176, 66207 + 1),
"Carian": range(66208, 66271 + 1),
"Coptic Epact Numbers": range(66272, 66303 + 1),
"Old Italic": range(66304, 66351 + 1),
"Gothic": range(66352, 66383 + 1),
"Old Permic": range(66384, 66431 + 1),
"Ugaritic": range(66432, 66463 + 1),
"Old Persian": range(66464, 66527 + 1),
"Deseret": range(66560, 66639 + 1),
"Shavian": range(66640, 66687 + 1),
"Osmanya": range(66688, 66735 + 1),
"Osage": range(66736, 66815 + 1),
"Elbasan": range(66816, 66863 + 1),
"Caucasian Albanian": range(66864, 66927 + 1),
"Linear A": range(67072, 67455 + 1),
"Cypriot Syllabary": range(67584, 67647 + 1),
"Imperial Aramaic": range(67648, 67679 + 1),
"Palmyrene": range(67680, 67711 + 1),
"Nabataean": range(67712, 67759 + 1),
"Hatran": range(67808, 67839 + 1),
"Phoenician": range(67840, 67871 + 1),
"Lydian": range(67872, 67903 + 1),
"Meroitic Hieroglyphs": range(67968, 67999 + 1),
"Meroitic Cursive": range(68000, 68095 + 1),
"Kharoshthi": range(68096, 68191 + 1),
"Old South Arabian": range(68192, 68223 + 1),
"Old North Arabian": range(68224, 68255 + 1),
"Manichaean": range(68288, 68351 + 1),
"Avestan": range(68352, 68415 + 1),
"Inscriptional Parthian": range(68416, 68447 + 1),
"Inscriptional Pahlavi": range(68448, 68479 + 1),
"Psalter Pahlavi": range(68480, 68527 + 1),
"Old Turkic": range(68608, 68687 + 1),
"Old Hungarian": range(68736, 68863 + 1),
"Rumi Numeral Symbols": range(69216, 69247 + 1),
"Brahmi": range(69632, 69759 + 1),
"Kaithi": range(69760, 69839 + 1),
"Sora Sompeng": range(69840, 69887 + 1),
"Chakma": range(69888, 69967 + 1),
"Mahajani": range(69968, 70015 + 1),
"Sharada": range(70016, 70111 + 1),
"Sinhala Archaic Numbers": range(70112, 70143 + 1),
"Khojki": range(70144, 70223 + 1),
"Multani": range(70272, 70319 + 1),
"Khudawadi": range(70320, 70399 + 1),
"Grantha": range(70400, 70527 + 1),
"Newa": range(70656, 70783 + 1),
"Tirhuta": range(70784, 70879 + 1),
"Siddham": range(71040, 71167 + 1),
"Modi": range(71168, 71263 + 1),
"Mongolian Supplement": range(71264, 71295 + 1),
"Takri": range(71296, 71375 + 1),
"Ahom": range(71424, 71487 + 1),
"Warang Citi": range(71840, 71935 + 1),
"Zanabazar Square": range(72192, 72271 + 1),
"Soyombo": range(72272, 72367 + 1),
"Pau Cin Hau": range(72384, 72447 + 1),
"Bhaiksuki": range(72704, 72815 + 1),
"Marchen": range(72816, 72895 + 1),
"Masaram Gondi": range(72960, 73055 + 1),
"Cuneiform": range(73728, 74751 + 1),
"Cuneiform Numbers and Punctuation": range(74752, 74879 + 1),
"Early Dynastic Cuneiform": range(74880, 75087 + 1),
"Egyptian Hieroglyphs": range(77824, 78895 + 1),
"Anatolian Hieroglyphs": range(82944, 83583 + 1),
"Bamum Supplement": range(92160, 92735 + 1),
"Mro": range(92736, 92783 + 1),
"Bassa Vah": range(92880, 92927 + 1),
"Pahawh Hmong": range(92928, 93071 + 1),
"Miao": range(93952, 94111 + 1),
"Ideographic Symbols and Punctuation": range(94176, 94207 + 1),
"Tangut": range(94208, 100351 + 1),
"Tangut Components": range(100352, 101119 + 1),
"Kana Supplement": range(110592, 110847 + 1),
"Kana Extended-A": range(110848, 110895 + 1),
"Nushu": range(110960, 111359 + 1),
"Duployan": range(113664, 113823 + 1),
"Shorthand Format Controls": range(113824, 113839 + 1),
"Byzantine Musical Symbols": range(118784, 119039 + 1),
"Musical Symbols": range(119040, 119295 + 1),
"Ancient Greek Musical Notation": range(119296, 119375 + 1),
"Tai Xuan Jing Symbols": range(119552, 119647 + 1),
"Counting Rod Numerals": range(119648, 119679 + 1),
"Mathematical Alphanumeric Symbols": range(119808, 120831 + 1),
"Sutton SignWriting": range(120832, 121519 + 1),
"Glagolitic Supplement": range(122880, 122927 + 1),
"Mende Kikakui": range(124928, 125151 + 1),
"Adlam": range(125184, 125279 + 1),
"Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1),
"Mahjong Tiles": range(126976, 127023 + 1),
"Domino Tiles": range(127024, 127135 + 1),
"Playing Cards": range(127136, 127231 + 1),
"Enclosed Alphanumeric Supplement": range(127232, 127487 + 1),
"Enclosed Ideographic Supplement": range(127488, 127743 + 1),
"Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1),
"Emoticons range(Emoji)": range(128512, 128591 + 1),
"Ornamental Dingbats": range(128592, 128639 + 1),
"Transport and Map Symbols": range(128640, 128767 + 1),
"Alchemical Symbols": range(128768, 128895 + 1),
"Geometric Shapes Extended": range(128896, 129023 + 1),
"Supplemental Arrows-C": range(129024, 129279 + 1),
"Supplemental Symbols and Pictographs": range(129280, 129535 + 1),
"CJK Unified Ideographs Extension B": range(131072, 173791 + 1),
"CJK Unified Ideographs Extension C": range(173824, 177983 + 1),
"CJK Unified Ideographs Extension D": range(177984, 178207 + 1),
"CJK Unified Ideographs Extension E": range(178208, 183983 + 1),
"CJK Unified Ideographs Extension F": range(183984, 191471 + 1),
"CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1),
"Tags": range(917504, 917631 + 1),
"Variation Selectors Supplement": range(917760, 917999 + 1),
}
UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [
"Supplement",
"Extended",
"Extensions",
"Modifier",
"Marks",
"Punctuation",
"Symbols",
"Forms",
"Operators",
"Miscellaneous",
"Drawing",
"Block",
"Shapes",
"Supplemental",
"Tags",
]
RE_POSSIBLE_ENCODING_INDICATION = re_compile(
r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
IGNORECASE,
)
IANA_SUPPORTED: List[str] = sorted(
filter(
lambda x: x.endswith("_codec") is False
and x not in {"rot_13", "tactis", "mbcs"},
list(set(aliases.values())),
)
)
IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED)
# pre-computed code page that are similar using the function cp_similarity.
IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = {
"cp037": ["cp1026", "cp1140", "cp273", "cp500"],
"cp1026": ["cp037", "cp1140", "cp273", "cp500"],
"cp1125": ["cp866"],
"cp1140": ["cp037", "cp1026", "cp273", "cp500"],
"cp1250": ["iso8859_2"],
"cp1251": ["kz1048", "ptcp154"],
"cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
"cp1253": ["iso8859_7"],
"cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
"cp1257": ["iso8859_13"],
"cp273": ["cp037", "cp1026", "cp1140", "cp500"],
"cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
"cp500": ["cp037", "cp1026", "cp1140", "cp273"],
"cp850": ["cp437", "cp857", "cp858", "cp865"],
"cp857": ["cp850", "cp858", "cp865"],
"cp858": ["cp437", "cp850", "cp857", "cp865"],
"cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
"cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
"cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
"cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
"cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
"cp866": ["cp1125"],
"iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
"iso8859_11": ["tis_620"],
"iso8859_13": ["cp1257"],
"iso8859_14": [
"iso8859_10",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_15": [
"cp1252",
"cp1254",
"iso8859_10",
"iso8859_14",
"iso8859_16",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_16": [
"iso8859_14",
"iso8859_15",
"iso8859_2",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
"iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
"iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
"iso8859_7": ["cp1253"],
"iso8859_9": [
"cp1252",
"cp1254",
"cp1258",
"iso8859_10",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_4",
"latin_1",
],
"kz1048": ["cp1251", "ptcp154"],
"latin_1": [
"cp1252",
"cp1254",
"cp1258",
"iso8859_10",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_4",
"iso8859_9",
],
"mac_iceland": ["mac_roman", "mac_turkish"],
"mac_roman": ["mac_iceland", "mac_turkish"],
"mac_turkish": ["mac_iceland", "mac_roman"],
"ptcp154": ["cp1251", "kz1048"],
"tis_620": ["iso8859_11"],
}
CHARDET_CORRESPONDENCE: Dict[str, str] = {
"iso2022_kr": "ISO-2022-KR",
"iso2022_jp": "ISO-2022-JP",
"euc_kr": "EUC-KR",
"tis_620": "TIS-620",
"utf_32": "UTF-32",
"euc_jp": "EUC-JP",
"koi8_r": "KOI8-R",
"iso8859_1": "ISO-8859-1",
"iso8859_2": "ISO-8859-2",
"iso8859_5": "ISO-8859-5",
"iso8859_6": "ISO-8859-6",
"iso8859_7": "ISO-8859-7",
"iso8859_8": "ISO-8859-8",
"utf_16": "UTF-16",
"cp855": "IBM855",
"mac_cyrillic": "MacCyrillic",
"gb2312": "GB2312",
"gb18030": "GB18030",
"cp932": "CP932",
"cp866": "IBM866",
"utf_8": "utf-8",
"utf_8_sig": "UTF-8-SIG",
"shift_jis": "SHIFT_JIS",
"big5": "Big5",
"cp1250": "windows-1250",
"cp1251": "windows-1251",
"cp1252": "Windows-1252",
"cp1253": "windows-1253",
"cp1255": "windows-1255",
"cp1256": "windows-1256",
"cp1254": "Windows-1254",
"cp949": "CP949",
}
COMMON_SAFE_ASCII_CHARACTERS: Set[str] = {
"<",
">",
"=",
":",
"/",
"&",
";",
"{",
"}",
"[",
"]",
",",
"|",
'"',
"-",
}
KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"}
ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"}
NOT_PRINTABLE_PATTERN = re_compile(r"[0-9\W\n\r\t]+")
LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)
# Logging LEVEL bellow DEBUG
TRACE: int = 5
| 19,157 | Python | 37.469879 | 102 | 0.59075 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cli/normalizer.py | import argparse
import sys
from json import dumps
from os.path import abspath
from platform import python_version
from typing import List, Optional
try:
from unicodedata2 import unidata_version
except ImportError:
from unicodedata import unidata_version
from charset_normalizer import from_fp
from charset_normalizer.models import CliDetectionResult
from charset_normalizer.version import __version__
def query_yes_no(question: str, default: str = "yes") -> bool:
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == "":
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
def cli_detect(argv: Optional[List[str]] = None) -> int:
"""
CLI assistant using ARGV and ArgumentParser
:param argv:
:return: 0 if everything is fine, anything else equal trouble
"""
parser = argparse.ArgumentParser(
description="The Real First Universal Charset Detector. "
"Discover originating encoding used on text file. "
"Normalize text to unicode."
)
parser.add_argument(
"files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="Display complementary information about file if any. "
"Stdout will contain logs about the detection process.",
)
parser.add_argument(
"-a",
"--with-alternative",
action="store_true",
default=False,
dest="alternatives",
help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
)
parser.add_argument(
"-n",
"--normalize",
action="store_true",
default=False,
dest="normalize",
help="Permit to normalize input file. If not set, program does not write anything.",
)
parser.add_argument(
"-m",
"--minimal",
action="store_true",
default=False,
dest="minimal",
help="Only output the charset detected to STDOUT. Disabling JSON output.",
)
parser.add_argument(
"-r",
"--replace",
action="store_true",
default=False,
dest="replace",
help="Replace file when trying to normalize it instead of creating a new one.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
dest="force",
help="Replace file without asking if you are sure, use this flag with caution.",
)
parser.add_argument(
"-t",
"--threshold",
action="store",
default=0.2,
type=float,
dest="threshold",
help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.",
)
parser.add_argument(
"--version",
action="version",
version="Charset-Normalizer {} - Python {} - Unicode {}".format(
__version__, python_version(), unidata_version
),
help="Show version information and exit.",
)
args = parser.parse_args(argv)
if args.replace is True and args.normalize is False:
print("Use --replace in addition of --normalize only.", file=sys.stderr)
return 1
if args.force is True and args.replace is False:
print("Use --force in addition of --replace only.", file=sys.stderr)
return 1
if args.threshold < 0.0 or args.threshold > 1.0:
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
return 1
x_ = []
for my_file in args.files:
matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)
best_guess = matches.best()
if best_guess is None:
print(
'Unable to identify originating encoding for "{}". {}'.format(
my_file.name,
"Maybe try increasing maximum amount of chaos."
if args.threshold < 1.0
else "",
),
file=sys.stderr,
)
x_.append(
CliDetectionResult(
abspath(my_file.name),
None,
[],
[],
"Unknown",
[],
False,
1.0,
0.0,
None,
True,
)
)
else:
x_.append(
CliDetectionResult(
abspath(my_file.name),
best_guess.encoding,
best_guess.encoding_aliases,
[
cp
for cp in best_guess.could_be_from_charset
if cp != best_guess.encoding
],
best_guess.language,
best_guess.alphabets,
best_guess.bom,
best_guess.percent_chaos,
best_guess.percent_coherence,
None,
True,
)
)
if len(matches) > 1 and args.alternatives:
for el in matches:
if el != best_guess:
x_.append(
CliDetectionResult(
abspath(my_file.name),
el.encoding,
el.encoding_aliases,
[
cp
for cp in el.could_be_from_charset
if cp != el.encoding
],
el.language,
el.alphabets,
el.bom,
el.percent_chaos,
el.percent_coherence,
None,
False,
)
)
if args.normalize is True:
if best_guess.encoding.startswith("utf") is True:
print(
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
my_file.name
),
file=sys.stderr,
)
if my_file.closed is False:
my_file.close()
continue
o_: List[str] = my_file.name.split(".")
if args.replace is False:
o_.insert(-1, best_guess.encoding)
if my_file.closed is False:
my_file.close()
elif (
args.force is False
and query_yes_no(
'Are you sure to normalize "{}" by replacing it ?'.format(
my_file.name
),
"no",
)
is False
):
if my_file.closed is False:
my_file.close()
continue
try:
x_[0].unicode_path = abspath("./{}".format(".".join(o_)))
with open(x_[0].unicode_path, "w", encoding="utf-8") as fp:
fp.write(str(best_guess))
except IOError as e:
print(str(e), file=sys.stderr)
if my_file.closed is False:
my_file.close()
return 2
if my_file.closed is False:
my_file.close()
if args.minimal is False:
print(
dumps(
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
ensure_ascii=True,
indent=4,
)
)
else:
for my_file in args.files:
print(
", ".join(
[
el.encoding or "undefined"
for el in x_
if el.path == abspath(my_file.name)
]
)
)
return 0
if __name__ == "__main__":
cli_detect()
| 9,521 | Python | 31.168919 | 111 | 0.461086 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles-0.4.0.dist-info/DESCRIPTION.rst | aiofiles: file support for asyncio
==================================
.. image:: https://img.shields.io/pypi/v/aiofiles.svg
:target: https://pypi.python.org/pypi/aiofiles
.. image:: https://travis-ci.org/Tinche/aiofiles.svg?branch=master
:target: https://travis-ci.org/Tinche/aiofiles
.. image:: https://codecov.io/gh/Tinche/aiofiles/branch/master/graph/badge.svg
:target: https://codecov.io/gh/Tinche/aiofiles
**aiofiles** is an Apache2 licensed library, written in Python, for handling local
disk files in asyncio applications.
Ordinary local file IO is blocking, and cannot easily and portably made
asynchronous. This means doing file IO may interfere with asyncio applications,
which shouldn't block the executing thread. aiofiles helps with this by
introducing asynchronous versions of files that support delegating operations to
a separate thread pool.
.. code-block:: python
async with aiofiles.open('filename', mode='r') as f:
contents = await f.read()
print(contents)
'My file contents'
Asynchronous iteration is also supported.
.. code-block:: python
async with aiofiles.open('filename') as f:
async for line in f:
...
Features
--------
- a file API very similar to Python's standard, blocking API
- support for buffered and unbuffered binary files, and buffered text files
- support for ``async``/``await`` (:PEP:`492`) constructs
Installation
------------
To install aiofiles, simply:
.. code-block:: bash
$ pip install aiofiles
Usage
-----
Files are opened using the ``aiofiles.open()`` coroutine, which in addition to
mirroring the builtin ``open`` accepts optional ``loop`` and ``executor``
arguments. If ``loop`` is absent, the default loop will be used, as per the
set asyncio policy. If ``executor`` is not specified, the default event loop
executor will be used.
In case of success, an asynchronous file object is returned with an
API identical to an ordinary file, except the following methods are coroutines
and delegate to an executor:
* ``close``
* ``flush``
* ``isatty``
* ``read``
* ``readall``
* ``read1``
* ``readinto``
* ``readline``
* ``readlines``
* ``seek``
* ``seekable``
* ``tell``
* ``truncate``
* ``writable``
* ``write``
* ``writelines``
In case of failure, one of the usual exceptions will be raised.
The ``aiofiles.os`` module contains executor-enabled coroutine versions of
several useful ``os`` functions that deal with files:
* ``stat``
* ``sendfile``
Writing tests for aiofiles
~~~~~~~~~~~~~~~~~~~~~~~~~~
Real file IO can be mocked by patching ``aiofiles.threadpool.sync_open``
as desired. The return type also needs to be registered with the
``aiofiles.threadpool.wrap`` dispatcher:
.. code-block:: python
aiofiles.threadpool.wrap.register(mock.MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs))
async def test_stuff():
data = 'data'
mock_file = mock.MagicMock()
with mock.patch('aiofiles.threadpool.sync_open', return_value=mock_file) as mock_open:
async with aiofiles.open('filename', 'w') as f:
await f.write(data)
mock_file.write.assert_called_once_with(data)
History
~~~~~~~
0.4.0 (2018-08-11)
``````````````````
- Python 3.7 support.
- Removed Python 3.3/3.4 support. If you use these versions, stick to aiofiles 0.3.x.
0.3.2 (2017-09-23)
``````````````````
- The LICENSE is now included in the sdist.
`#31 <https://github.com/Tinche/aiofiles/pull/31>`_
0.3.1 (2017-03-10)
``````````````````
- Introduced a changelog.
- ``aiofiles.os.sendfile`` will now work if the standard ``os`` module contains a ``sendfile`` function.
Contributing
~~~~~~~~~~~~
Contributions are very welcome. Tests can be run with ``tox``, please ensure
the coverage at least stays the same before you submit a pull request.
| 3,873 | reStructuredText | 26.870503 | 104 | 0.675704 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/_endpoint_helpers.py | import aiohttp.http_exceptions
from aiohttp.client_reqrep import ClientResponse
import asyncio
import botocore.retryhandler
import wrapt
# Monkey patching: We need to insert the aiohttp exception equivalents
# The only other way to do this would be to have another config file :(
_aiohttp_retryable_exceptions = [
aiohttp.ClientConnectionError,
aiohttp.ClientPayloadError,
aiohttp.ServerDisconnectedError,
aiohttp.http_exceptions.HttpProcessingError,
asyncio.TimeoutError,
]
botocore.retryhandler.EXCEPTION_MAP['GENERAL_CONNECTION_ERROR'].extend(
_aiohttp_retryable_exceptions
)
def _text(s, encoding='utf-8', errors='strict'):
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s # pragma: no cover
# Unfortunately aiohttp changed the behavior of streams:
# github.com/aio-libs/aiohttp/issues/1907
# We need this wrapper until we have a final resolution
class _IOBaseWrapper(wrapt.ObjectProxy):
def close(self):
# this stream should not be closed by aiohttp, like 1.x
pass
# This is similar to botocore.response.StreamingBody
class ClientResponseContentProxy(wrapt.ObjectProxy):
"""Proxy object for content stream of http response. This is here in case
you want to pass around the "Body" of the response without closing the
response itself."""
def __init__(self, response):
super().__init__(response.__wrapped__.content)
self._self_response = response
# Note: we don't have a __del__ method as the ClientResponse has a __del__
# which will warn the user if they didn't close/release the response
# explicitly. A release here would mean reading all the unread data
# (which could be very large), and a close would mean being unable to re-
# use the connection, so the user MUST chose. Default is to warn + close
async def __aenter__(self):
await self._self_response.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self._self_response.__aexit__(exc_type, exc_val, exc_tb)
@property
def url(self):
return self._self_response.url
def close(self):
self._self_response.close()
class ClientResponseProxy(wrapt.ObjectProxy):
"""Proxy object for http response useful for porting from
botocore underlying http library."""
def __init__(self, *args, **kwargs):
super().__init__(ClientResponse(*args, **kwargs))
# this matches ClientResponse._body
self._self_body = None
@property
def status_code(self):
return self.status
@status_code.setter
def status_code(self, value):
# botocore tries to set this, see:
# https://github.com/aio-libs/aiobotocore/issues/190
# Luckily status is an attribute we can set
self.status = value
@property
def content(self):
return self._self_body
@property
def raw(self):
return ClientResponseContentProxy(self)
async def read(self):
self._self_body = await self.__wrapped__.read()
return self._self_body
| 3,127 | Python | 30.28 | 78 | 0.684362 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/config.py | import copy
import botocore.client
from botocore.exceptions import ParamValidationError
class AioConfig(botocore.client.Config):
def __init__(self, connector_args=None, **kwargs):
super().__init__(**kwargs)
self._validate_connector_args(connector_args)
self.connector_args = copy.copy(connector_args)
if not self.connector_args:
self.connector_args = dict()
if 'keepalive_timeout' not in self.connector_args:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# and aiohttp default timeout is 30s so we set it to something
# reasonable here
self.connector_args['keepalive_timeout'] = 12
def merge(self, other_config):
# Adapted from parent class
config_options = copy.copy(self._user_provided_options)
config_options.update(other_config._user_provided_options)
return AioConfig(self.connector_args, **config_options)
@staticmethod
def _validate_connector_args(connector_args):
if connector_args is None:
return
for k, v in connector_args.items():
# verify_ssl is handled by verify parameter to create_client
if k == 'use_dns_cache':
if not isinstance(v, bool):
raise ParamValidationError(
report='{} value must be a boolean'.format(k))
elif k in ['keepalive_timeout']:
if not isinstance(v, (float, int)):
raise ParamValidationError(
report='{} value must be a float/int'.format(k))
elif k == 'force_close':
if not isinstance(v, bool):
raise ParamValidationError(
report='{} value must be a boolean'.format(k))
# limit is handled by max_pool_connections
elif k == 'ssl_context':
import ssl
if not isinstance(v, ssl.SSLContext):
raise ParamValidationError(
report='{} must be an SSLContext instance'.format(k))
else:
raise ParamValidationError(
report='invalid connector_arg:{}'.format(k))
| 2,308 | Python | 38.810344 | 77 | 0.574523 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/response.py | import asyncio
import wrapt
from botocore.exceptions import IncompleteReadError, ReadTimeoutError
class AioReadTimeoutError(ReadTimeoutError, asyncio.TimeoutError):
pass
class StreamingBody(wrapt.ObjectProxy):
"""Wrapper class for an http response body.
This provides a few additional conveniences that do not exist
in the urllib3 model:
* Set the timeout on the socket (i.e read() timeouts)
* Auto validation of content length, if the amount of bytes
we read does not match the content length, an exception
is raised.
"""
_DEFAULT_CHUNK_SIZE = 1024
def __init__(self, raw_stream, content_length):
super().__init__(raw_stream)
self._self_content_length = content_length
self._self_amount_read = 0
# https://github.com/GrahamDumpleton/wrapt/issues/73
async def __aenter__(self):
return await self.__wrapped__.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)
# NOTE: set_socket_timeout was only for when requests didn't support
# read timeouts, so not needed
def tell(self):
return self._self_amount_read
async def read(self, amt=None):
"""Read at most amt bytes from the stream.
If the amt argument is omitted, read all data.
"""
# botocore to aiohttp mapping
try:
chunk = await self.__wrapped__.read(amt if amt is not None else -1)
except asyncio.TimeoutError as e:
raise AioReadTimeoutError(endpoint_url=self.__wrapped__.url,
error=e)
self._self_amount_read += len(chunk)
if amt is None or (not chunk and amt > 0):
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
self._verify_content_length()
return chunk
def __aiter__(self):
"""Return an iterator to yield 1k chunks from the raw stream.
"""
return self.iter_chunks(self._DEFAULT_CHUNK_SIZE)
async def __anext__(self):
"""Return the next 1k chunk from the raw stream.
"""
current_chunk = await self.read(self._DEFAULT_CHUNK_SIZE)
if current_chunk:
return current_chunk
raise StopAsyncIteration
anext = __anext__
async def iter_lines(self, chunk_size=1024, keepends=False):
"""Return an iterator to yield lines from the raw stream.
This is achieved by reading chunk of bytes (of size chunk_size) at a
time from the raw stream, and then yielding lines from there.
"""
pending = b''
async for chunk in self.iter_chunks(chunk_size):
lines = (pending + chunk).splitlines(True)
for line in lines[:-1]:
yield line.splitlines(keepends)[0]
pending = lines[-1]
if pending:
yield pending.splitlines(keepends)[0]
async def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE):
"""Return an iterator to yield chunks of chunk_size bytes from the raw
stream.
"""
while True:
current_chunk = await self.read(chunk_size)
if current_chunk == b"":
break
yield current_chunk
def _verify_content_length(self):
# See: https://github.com/kennethreitz/requests/issues/1855
# Basically, our http library doesn't do this for us, so we have
# to do this ourself.
if self._self_content_length is not None and \
self._self_amount_read != int(self._self_content_length):
raise IncompleteReadError(
actual_bytes=self._self_amount_read,
expected_bytes=int(self._self_content_length))
| 3,916 | Python | 33.973214 | 79 | 0.607763 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/signers.py | import datetime
import botocore
import botocore.auth
from botocore.signers import RequestSigner, UnknownSignatureVersionError, \
UnsupportedSignatureVersionError, create_request_object, prepare_request_dict, \
_should_use_global_endpoint, S3PostPresigner
from botocore.exceptions import UnknownClientMethodError
class AioRequestSigner(RequestSigner):
async def handler(self, operation_name=None, request=None, **kwargs):
# This is typically hooked up to the "request-created" event
# from a client's event emitter. When a new request is created
# this method is invoked to sign the request.
# Don't call this method directly.
return await self.sign(operation_name, request)
async def sign(self, operation_name, request, region_name=None,
signing_type='standard', expires_in=None,
signing_name=None):
explicit_region_name = region_name
if region_name is None:
region_name = self._region_name
if signing_name is None:
signing_name = self._signing_name
signature_version = await self._choose_signer(
operation_name, signing_type, request.context)
# Allow mutating request before signing
await self._event_emitter.emit(
'before-sign.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
request=request, signing_name=signing_name,
region_name=self._region_name,
signature_version=signature_version, request_signer=self,
operation_name=operation_name
)
if signature_version != botocore.UNSIGNED:
kwargs = {
'signing_name': signing_name,
'region_name': region_name,
'signature_version': signature_version
}
if expires_in is not None:
kwargs['expires'] = expires_in
signing_context = request.context.get('signing', {})
if not explicit_region_name and signing_context.get('region'):
kwargs['region_name'] = signing_context['region']
if signing_context.get('signing_name'):
kwargs['signing_name'] = signing_context['signing_name']
try:
auth = await self.get_auth_instance(**kwargs)
except UnknownSignatureVersionError as e:
if signing_type != 'standard':
raise UnsupportedSignatureVersionError(
signature_version=signature_version)
else:
raise e
auth.add_auth(request)
async def get_auth_instance(self, signing_name, region_name,
signature_version=None, **kwargs):
if signature_version is None:
signature_version = self._signature_version
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
if cls is None:
raise UnknownSignatureVersionError(
signature_version=signature_version)
frozen_credentials = None
if self._credentials is not None:
frozen_credentials = await self._credentials.get_frozen_credentials()
kwargs['credentials'] = frozen_credentials
if cls.REQUIRES_REGION:
if self._region_name is None:
raise botocore.exceptions.NoRegionError()
kwargs['region_name'] = region_name
kwargs['service_name'] = signing_name
auth = cls(**kwargs)
return auth
# Alias get_auth for backwards compatibility.
get_auth = get_auth_instance
async def _choose_signer(self, operation_name, signing_type, context):
signing_type_suffix_map = {
'presign-post': '-presign-post',
'presign-url': '-query'
}
suffix = signing_type_suffix_map.get(signing_type, '')
signature_version = self._signature_version
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
handler, response = await self._event_emitter.emit_until_response(
'choose-signer.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
signing_name=self._signing_name, region_name=self._region_name,
signature_version=signature_version, context=context)
if response is not None:
signature_version = response
# The suffix needs to be checked again in case we get an improper
# signature version from choose-signer.
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
return signature_version
async def generate_presigned_url(self, request_dict, operation_name,
expires_in=3600, region_name=None,
signing_name=None):
request = create_request_object(request_dict)
await self.sign(operation_name, request, region_name,
'presign-url', expires_in, signing_name)
request.prepare()
return request.url
def add_generate_db_auth_token(class_attributes, **kwargs):
class_attributes['generate_db_auth_token'] = generate_db_auth_token
async def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None):
"""Generates an auth token used to connect to a db with IAM credentials.
:type DBHostname: str
:param DBHostname: The hostname of the database to connect to.
:type Port: int
:param Port: The port number the database is listening on.
:type DBUsername: str
:param DBUsername: The username to log in as.
:type Region: str
:param Region: The region the database is in. If None, the client
region will be used.
:return: A presigned url which can be used as an auth token.
"""
region = Region
if region is None:
region = self.meta.region_name
params = {
'Action': 'connect',
'DBUser': DBUsername,
}
request_dict = {
'url_path': '/',
'query_string': '',
'headers': {},
'body': params,
'method': 'GET'
}
# RDS requires that the scheme not be set when sent over. This can cause
# issues when signing because the Python url parsing libraries follow
# RFC 1808 closely, which states that a netloc must be introduced by `//`.
# Otherwise the url is presumed to be relative, and thus the whole
# netloc would be treated as a path component. To work around this we
# introduce https here and remove it once we're done processing it.
scheme = 'https://'
endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port)
prepare_request_dict(request_dict, endpoint_url)
presigned_url = await self._request_signer.generate_presigned_url(
operation_name='connect', request_dict=request_dict,
region_name=region, expires_in=900, signing_name='rds-db'
)
return presigned_url[len(scheme):]
def add_generate_presigned_url(class_attributes, **kwargs):
class_attributes['generate_presigned_url'] = generate_presigned_url
async def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600,
HttpMethod=None):
"""Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
:returns: The presigned url
"""
client_method = ClientMethod
params = Params
if params is None:
params = {}
expires_in = ExpiresIn
http_method = HttpMethod
context = {
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
}
request_signer = self._request_signer
serializer = self._serializer
try:
operation_name = self._PY_TO_OP_NAME[client_method]
except KeyError:
raise UnknownClientMethodError(method_name=client_method)
operation_model = self.meta.service_model.operation_model(
operation_name)
params = await self._emit_api_params(params, operation_model, context)
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
params, operation_model)
# Switch out the http method if user specified it.
if http_method is not None:
request_dict['method'] = http_method
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url, context=context)
# Generate the presigned url.
return await request_signer.generate_presigned_url(
request_dict=request_dict, expires_in=expires_in,
operation_name=operation_name)
class AioS3PostPresigner(S3PostPresigner):
async def generate_presigned_post(self, request_dict, fields=None,
conditions=None, expires_in=3600,
region_name=None):
if fields is None:
fields = {}
if conditions is None:
conditions = []
# Create the policy for the post.
policy = {}
# Create an expiration date for the policy
datetime_now = datetime.datetime.utcnow()
expire_date = datetime_now + datetime.timedelta(seconds=expires_in)
policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601)
# Append all of the conditions that the user supplied.
policy['conditions'] = []
for condition in conditions:
policy['conditions'].append(condition)
# Store the policy and the fields in the request for signing
request = create_request_object(request_dict)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
await self._request_signer.sign(
'PutObject', request, region_name, 'presign-post')
# Return the url and the fields for th form to post.
return {'url': request.url, 'fields': fields}
def add_generate_presigned_post(class_attributes, **kwargs):
class_attributes['generate_presigned_post'] = generate_presigned_post
async def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None,
ExpiresIn=3600):
bucket = Bucket
key = Key
fields = Fields
conditions = Conditions
expires_in = ExpiresIn
if fields is None:
fields = {}
else:
fields = fields.copy()
if conditions is None:
conditions = []
post_presigner = AioS3PostPresigner(self._request_signer)
serializer = self._serializer
# We choose the CreateBucket operation model because its url gets
# serialized to what a presign post requires.
operation_model = self.meta.service_model.operation_model(
'CreateBucket')
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
{'Bucket': bucket}, operation_model)
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url,
context={
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
},
)
# Append that the bucket name to the list of conditions.
conditions.append({'bucket': bucket})
# If the key ends with filename, the only constraint that can be
# imposed is if it starts with the specified prefix.
if key.endswith('${filename}'):
conditions.append(["starts-with", '$key', key[:-len('${filename}')]])
else:
conditions.append({'key': key})
# Add the key to the fields.
fields['key'] = key
return await post_presigner.generate_presigned_post(
request_dict=request_dict, fields=fields, conditions=conditions,
expires_in=expires_in)
| 12,640 | Python | 35.961988 | 84 | 0.632832 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/hooks.py | import asyncio
from botocore.hooks import HierarchicalEmitter, logger
class AioHierarchicalEmitter(HierarchicalEmitter):
async def _emit(self, event_name, kwargs, stop_on_response=False):
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
handlers_to_call = self._lookup_cache.get(event_name)
if handlers_to_call is None:
handlers_to_call = self._handlers.prefix_search(event_name)
self._lookup_cache[event_name] = handlers_to_call
elif not handlers_to_call:
# Short circuit and return an empty response is we have
# no handlers to call. This is the common case where
# for the majority of signals, nothing is listening.
return []
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('Event %s: calling handler %s', event_name, handler)
# Await the handler if its a coroutine.
if asyncio.iscoroutinefunction(handler):
response = await handler(**kwargs)
else:
response = handler(**kwargs)
responses.append((handler, response))
if stop_on_response and response is not None:
return responses
return responses
async def emit_until_response(self, event_name, **kwargs):
responses = await self._emit(event_name, kwargs, stop_on_response=True)
if responses:
return responses[-1]
else:
return None, None
| 1,636 | Python | 37.97619 | 79 | 0.614914 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/args.py | import copy
from botocore.args import ClientArgsCreator
import botocore.serialize
import botocore.parsers
from .config import AioConfig
from .endpoint import AioEndpointCreator
from .signers import AioRequestSigner
class AioClientArgsCreator(ClientArgsCreator):
# NOTE: we override this so we can pull out the custom AioConfig params and
# use an AioEndpointCreator
def get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials, scoped_config,
client_config, endpoint_bridge):
final_args = self.compute_client_args(
service_model, client_config, endpoint_bridge, region_name,
endpoint_url, is_secure, scoped_config)
# service_name = final_args['service_name']
parameter_validation = final_args['parameter_validation']
endpoint_config = final_args['endpoint_config']
protocol = final_args['protocol']
config_kwargs = final_args['config_kwargs']
s3_config = final_args['s3_config']
partition = endpoint_config['metadata'].get('partition', None)
socket_options = final_args['socket_options']
signing_region = endpoint_config['signing_region']
endpoint_region_name = endpoint_config['region_name']
event_emitter = copy.copy(self._event_emitter)
signer = AioRequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
# aiobotocore addition
if isinstance(client_config, AioConfig):
connector_args = client_config.connector_args
else:
connector_args = None
new_config = AioConfig(connector_args, **config_kwargs)
endpoint_creator = AioEndpointCreator(event_emitter)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
connector_args=new_config.connector_args)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation)
response_parser = botocore.parsers.create_parser(protocol)
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser,
'event_emitter': event_emitter,
'request_signer': signer,
'service_model': service_model,
'loader': self._loader,
'client_config': new_config,
'partition': partition,
'exceptions_factory': self._exceptions_factory
}
| 3,155 | Python | 38.949367 | 79 | 0.639303 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/__init__.py | from .session import get_session, AioSession
__all__ = ['get_session', 'AioSession']
__version__ = '1.2.0'
| 108 | Python | 20.799996 | 44 | 0.648148 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/waiter.py | import asyncio
# WaiterModel is required for client.py import
from botocore.exceptions import ClientError
from botocore.waiter import WaiterModel # noqa: F401, lgtm[py/unused-import]
from botocore.waiter import Waiter, xform_name, logger, WaiterError, \
NormalizedOperationMethod as _NormalizedOperationMethod
from botocore.docs.docstring import WaiterDocstring
from botocore.utils import get_service_module_name
class NormalizedOperationMethod(_NormalizedOperationMethod):
async def __call__(self, **kwargs):
try:
return await self._client_method(**kwargs)
except ClientError as e:
return e.response
class AIOWaiter(Waiter):
async def wait(self, **kwargs):
acceptors = list(self.config.acceptors)
current_state = 'waiting'
# pop the invocation specific config
config = kwargs.pop('WaiterConfig', {})
sleep_amount = config.get('Delay', self.config.delay)
max_attempts = config.get('MaxAttempts', self.config.max_attempts)
last_matched_acceptor = None
num_attempts = 0
while True:
response = await self._operation_method(**kwargs)
num_attempts += 1
for acceptor in acceptors:
if acceptor.matcher_func(response):
last_matched_acceptor = acceptor
current_state = acceptor.state
break
else:
# If none of the acceptors matched, we should
# transition to the failure state if an error
# response was received.
if 'Error' in response:
# Transition to a failure state, which we
# can just handle here by raising an exception.
raise WaiterError(
name=self.name,
reason='An error occurred (%s): %s' % (
response['Error'].get('Code', 'Unknown'),
response['Error'].get('Message', 'Unknown'),
),
last_response=response,
)
if current_state == 'success':
logger.debug("Waiting complete, waiter matched the "
"success state.")
return
if current_state == 'failure':
reason = 'Waiter encountered a terminal failure state: %s' % (
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
if num_attempts >= max_attempts:
if last_matched_acceptor is None:
reason = 'Max attempts exceeded'
else:
reason = 'Max attempts exceeded. Previously accepted state: %s' % (
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
await asyncio.sleep(sleep_amount)
def create_waiter_with_client(waiter_name, waiter_model, client):
"""
:type waiter_name: str
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiter_model: botocore.waiter.WaiterModel
:param waiter_model: The model for the waiter configuration.
:type client: botocore.client.BaseClient
:param client: The botocore client associated with the service.
:rtype: botocore.waiter.Waiter
:return: The waiter object.
"""
single_waiter_config = waiter_model.get_waiter(waiter_name)
operation_name = xform_name(single_waiter_config.operation)
operation_method = NormalizedOperationMethod(
getattr(client, operation_name))
# Create a new wait method that will serve as a proxy to the underlying
# Waiter.wait method. This is needed to attach a docstring to the
# method.
async def wait(self, **kwargs):
await AIOWaiter.wait(self, **kwargs)
wait.__doc__ = WaiterDocstring(
waiter_name=waiter_name,
event_emitter=client.meta.events,
service_model=client.meta.service_model,
service_waiter_model=waiter_model,
include_signature=False
)
# Rename the waiter class based on the type of waiter.
waiter_class_name = str('%s.AIOWaiter.%s' % (
get_service_module_name(client.meta.service_model),
waiter_name))
# Create the new waiter class
documented_waiter_cls = type(
waiter_class_name, (AIOWaiter,), {'wait': wait})
# Return an instance of the new waiter class.
return documented_waiter_cls(
waiter_name, single_waiter_config, operation_method
)
| 5,020 | Python | 37.037879 | 87 | 0.581275 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/endpoint.py | import aiohttp
import asyncio
import io
import ssl
import aiohttp.http_exceptions
from aiohttp.client import URL
from botocore.endpoint import EndpointCreator, Endpoint, DEFAULT_TIMEOUT, \
MAX_POOL_CONNECTIONS, logger, history_recorder, create_request_object
from botocore.exceptions import ConnectionClosedError
from botocore.hooks import first_non_none_response
from botocore.utils import is_valid_endpoint_url
from multidict import MultiDict
from urllib.parse import urlparse
from urllib3.response import HTTPHeaderDict
from aiobotocore.response import StreamingBody
from aiobotocore._endpoint_helpers import _text, _IOBaseWrapper, \
ClientResponseProxy
async def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
# botocore converts keys to str, so make sure that they are in
# the expected case. See detailed discussion here:
# https://github.com/aio-libs/aiobotocore/pull/116
# aiohttp's CIMultiDict camel cases the headers :(
'headers': HTTPHeaderDict(
{k.decode('utf-8').lower(): v.decode('utf-8')
for k, v in http_response.raw_headers}),
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = await http_response.read()
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = await http_response.read()
return response_dict
class AioEndpoint(Endpoint):
def __init__(self, *args, proxies=None, **kwargs):
super().__init__(*args, **kwargs)
self.proxies = proxies or {}
async def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
await self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
async def _send_request(self, request_dict, operation_model):
attempts = 1
request = await self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = await self._get_response(
request, operation_model, context)
while await self._needs_retry(attempts, operation_model,
request_dict, success_response,
exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = await self.create_request(
request_dict, operation_model)
success_response, exception = await self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
async def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = await self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = await convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
await self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
async def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (
service_id, operation_model.name)
responses = await self._event_emitter.emit(event_name,
request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = await self._send(request)
except aiohttp.ClientConnectionError as e:
e.request = request # botocore expects the request property
return None, e
except aiohttp.http_exceptions.BadStatusLine:
better_exception = ConnectionClosedError(
endpoint_url=request.url, request=request)
return None, better_exception
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return None, e
# This returns the http_response and the parsed_data.
response_dict = await convert_to_response_dict(http_response,
operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
# NOTE: The only line changed here changing time.sleep to asyncio.sleep
async def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = await self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
await asyncio.sleep(handler_response)
return True
async def _send(self, request):
# Note: When using aiobotocore with dynamodb, requests fail on crc32
# checksum computation as soon as the response data reaches ~5KB.
# When AWS response is gzip compressed:
# 1. aiohttp is automatically decompressing the data
# (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
# 2. botocore computes crc32 on the uncompressed data bytes and fails
# cause crc32 has been computed on the compressed data
# The following line forces aws not to use gzip compression,
# if there is a way to configure aiohttp not to perform decompression,
# we can remove the following line and take advantage of
# aws gzip compression.
# https://github.com/boto/botocore/issues/1255
url = request.url
headers = request.headers
data = request.body
headers['Accept-Encoding'] = 'identity'
headers_ = MultiDict(
(z[0], _text(z[1], encoding='utf-8')) for z in headers.items())
# botocore does this during the request so we do this here as well
# TODO: this should be part of the ClientSession, perhaps make wrapper
proxy = self.proxies.get(urlparse(url.lower()).scheme)
if isinstance(data, io.IOBase):
data = _IOBaseWrapper(data)
url = URL(url, encoded=True)
resp = await self.http_session.request(
request.method, url=url, headers=headers_, data=data, proxy=proxy)
# If we're not streaming, read the content so we can retry any timeout
# errors, see:
# https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604
if not request.stream_output:
await resp.read()
return resp
class AioEndpointCreator(EndpointCreator):
# TODO: handle socket_options
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=aiohttp.ClientSession,
proxies=None,
socket_options=None,
client_cert=None,
connector_args=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
if isinstance(timeout, (list, tuple)):
conn_timeout, read_timeout = timeout
else:
conn_timeout = read_timeout = timeout
if connector_args is None:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# aiohttp default timeout is 30s so set something reasonable here
connector_args = dict(keepalive_timeout=12)
timeout = aiohttp.ClientTimeout(
sock_connect=conn_timeout,
sock_read=read_timeout
)
ssl_context = None
if client_cert:
if isinstance(client_cert, str):
key_file = None
cert_file = client_cert
elif isinstance(client_cert, tuple):
cert_file, key_file = client_cert
else:
assert False
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(cert_file, key_file)
connector = aiohttp.TCPConnector(
limit=max_pool_connections,
verify_ssl=self._get_verify_value(verify),
ssl_context=ssl_context,
**connector_args)
aio_session = http_session_cls(
connector=connector,
timeout=timeout,
skip_auto_headers={'CONTENT-TYPE'},
response_class=ClientResponseProxy,
auto_decompress=False)
return AioEndpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=aio_session,
proxies=proxies)
| 13,904 | Python | 42.317757 | 99 | 0.611263 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/utils.py | import asyncio
import logging
import json
import aiohttp
import aiohttp.client_exceptions
from botocore.utils import ContainerMetadataFetcher, InstanceMetadataFetcher, \
IMDSFetcher, get_environ_proxies, BadIMDSRequestError, S3RegionRedirector, \
ClientError
from botocore.exceptions import (
InvalidIMDSEndpointError, MetadataRetrievalError,
)
import botocore.awsrequest
logger = logging.getLogger(__name__)
RETRYABLE_HTTP_ERRORS = (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError)
class AioIMDSFetcher(IMDSFetcher):
class Response(object):
def __init__(self, status_code, text, url):
self.status_code = status_code
self.url = url
self.text = text
self.content = text
def __init__(self, *args, session=None, **kwargs):
super(AioIMDSFetcher, self).__init__(*args, **kwargs)
self._trust_env = bool(get_environ_proxies(self._base_url))
self._session = session or aiohttp.ClientSession
async def _fetch_metadata_token(self):
self._assert_enabled()
url = self._base_url + self._TOKEN_PATH
headers = {
'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL,
}
self._add_user_agent(headers)
request = botocore.awsrequest.AWSRequest(
method='PUT', url=url, headers=headers)
timeout = aiohttp.ClientTimeout(total=self._timeout)
async with self._session(timeout=timeout,
trust_env=self._trust_env) as session:
for i in range(self._num_attempts):
try:
async with session.put(url, headers=headers) as resp:
text = await resp.text()
if resp.status == 200:
return text
elif resp.status in (404, 403, 405):
return None
elif resp.status in (400,):
raise BadIMDSRequestError(request)
except asyncio.TimeoutError:
return None
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
except aiohttp.client_exceptions.ClientConnectorError as e:
if getattr(e, 'errno', None) == 8 or \
str(getattr(e, 'os_error', None)) == \
'Domain name not found': # threaded vs async resolver
raise InvalidIMDSEndpointError(endpoint=url, error=e)
else:
raise
return None
async def _get_request(self, url_path, retry_func, token=None):
self._assert_enabled()
if retry_func is None:
retry_func = self._default_retry
url = self._base_url + url_path
headers = {}
if token is not None:
headers['x-aws-ec2-metadata-token'] = token
self._add_user_agent(headers)
timeout = aiohttp.ClientTimeout(total=self._timeout)
async with self._session(timeout=timeout,
trust_env=self._trust_env) as session:
for i in range(self._num_attempts):
try:
async with session.get(url, headers=headers) as resp:
text = await resp.text()
response = self.Response(resp.status, text, resp.url)
if not retry_func(response):
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
class AioInstanceMetadataFetcher(AioIMDSFetcher, InstanceMetadataFetcher):
async def retrieve_iam_role_credentials(self):
try:
token = await self._fetch_metadata_token()
role_name = await self._get_iam_role(token)
credentials = await self._get_credentials(role_name, token)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
except BadIMDSRequestError as e:
logger.debug("Bad IMDS request: %s", e.request)
return {}
async def _get_iam_role(self, token=None):
r = await self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name,
token=token
)
return r.text
async def _get_credentials(self, role_name, token=None):
r = await self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials,
token=token
)
return json.loads(r.text)
class AioS3RegionRedirector(S3RegionRedirector):
async def redirect_from_error(self, request_dict, response, operation, **kwargs):
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if self._is_s3_accesspoint(request_dict.get('context', {})):
logger.debug(
'S3 request was previously to an accesspoint, not redirecting.'
)
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = await self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
async def get_bucket_region(self, bucket, response):
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = await self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
class AioContainerMetadataFetcher(ContainerMetadataFetcher):
def __init__(self, session=None, sleep=asyncio.sleep):
if session is None:
session = aiohttp.ClientSession
super(AioContainerMetadataFetcher, self).__init__(session, sleep)
async def retrieve_full_uri(self, full_url, headers=None):
self._validate_allowed_url(full_url)
return await self._retrieve_credentials(full_url, headers)
async def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return await self._retrieve_credentials(full_url)
async def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return await self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
await self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
async def _get_response(self, full_url, headers, timeout):
try:
timeout = aiohttp.ClientTimeout(total=self.TIMEOUT_SECONDS)
async with self._session(timeout=timeout) as session:
async with session.get(full_url, headers=headers) as resp:
if resp.status != 200:
text = await resp.text()
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%d) "
"from ECS metadata: %s"
) % (resp.status, text))
try:
return await resp.json()
except ValueError:
text = await resp.text()
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, text)
raise MetadataRetrievalError(error_msg=error_msg)
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
| 12,902 | Python | 40.223642 | 86 | 0.557976 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/credentials.py | import asyncio
import datetime
import logging
import subprocess
import json
from copy import deepcopy
from typing import Optional
from hashlib import sha1
from dateutil.tz import tzutc
from botocore import UNSIGNED
from botocore.config import Config
import botocore.compat
from botocore.credentials import EnvProvider, Credentials, RefreshableCredentials, \
ReadOnlyCredentials, ContainerProvider, ContainerMetadataFetcher, \
_parse_if_needed, InstanceMetadataProvider, _get_client_creator, \
ProfileProviderBuilder, ConfigProvider, SharedCredentialProvider, \
ProcessProvider, AssumeRoleWithWebIdentityProvider, _local_now, \
CachedCredentialFetcher, _serialize_if_needed, BaseAssumeRoleCredentialFetcher, \
AssumeRoleProvider, AssumeRoleCredentialFetcher, CredentialResolver, \
CanonicalNameCredentialSourcer, BotoProvider, OriginalEC2Provider, \
SSOProvider
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.exceptions import MetadataRetrievalError, CredentialRetrievalError, \
InvalidConfigError, PartialCredentialsError, RefreshWithMFAUnsupportedError, \
UnknownCredentialError
from botocore.compat import compat_shell_split
from botocore.utils import SSOTokenLoader
from aiobotocore.utils import AioContainerMetadataFetcher, AioInstanceMetadataFetcher
from aiobotocore.config import AioConfig
logger = logging.getLogger(__name__)
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = AioEnvProvider()
container_provider = AioContainerProvider()
instance_metadata_provider = AioInstanceMetadataProvider(
iam_role_fetcher=AioInstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = AioProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AioAssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=AioCanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
AioOriginalEC2Provider(),
AioBotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = AioCredentialResolver(providers=providers)
return resolver
class AioProfileProviderBuilder(ProfileProviderBuilder):
def _create_process_provider(self, profile_name):
return AioProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return AioSharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return AioConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AioAssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return AioSSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
async def get_credentials(session):
resolver = create_credential_resolver(session)
return await resolver.load_credentials()
def create_assume_role_refresher(client, params):
async def refresh():
async with client as sts:
response = await sts.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_aio_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
async def call(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return await self._refresh()
return _Refresher(actual_refresh).call
class AioCredentials(Credentials):
async def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
@classmethod
def from_credentials(cls, obj: Optional[Credentials]):
if obj is None:
return None
return cls(
obj.access_key, obj.secret_key,
obj.token, obj.method)
class AioRefreshableCredentials(RefreshableCredentials):
def __init__(self, *args, **kwargs):
super(AioRefreshableCredentials, self).__init__(*args, **kwargs)
self._refresh_lock = asyncio.Lock()
@classmethod
def from_refreshable_credentials(cls, obj: Optional[RefreshableCredentials]):
if obj is None:
return None
return cls( # Using interval values here to skip property calling .refresh()
obj._access_key, obj._secret_key,
obj._token, obj._expiry_time,
obj._refresh_using, obj.method,
obj._time_fetcher
)
# Redeclaring the properties so it doesnt call refresh
# Have to redeclare setter as we're overriding the getter
@property
def access_key(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._token
@token.setter
def token(self, value):
self._token = value
async def _refresh(self):
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# By this point we need a refresh but its not critical
if not self._refresh_lock.locked():
async with self._refresh_lock:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
await self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're here, we absolutely need a refresh and the
# lock is held so wait for it
async with self._refresh_lock:
# Might have refreshed by now
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
await self._protected_refresh(is_mandatory=True)
async def _protected_refresh(self, is_mandatory):
try:
metadata = await self._refresh_using()
except Exception:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
async def get_frozen_credentials(self):
await self._refresh()
return self._frozen_credentials
class AioDeferredRefreshableCredentials(AioRefreshableCredentials):
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = asyncio.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(AioDeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class AioCachedCredentialFetcher(CachedCredentialFetcher):
async def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
async def fetch_credentials(self):
return await self._get_cached_credentials()
async def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = await self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
class AioBaseAssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher,
AioCachedCredentialFetcher):
pass
class AioAssumeRoleCredentialFetcher(AssumeRoleCredentialFetcher,
AioBaseAssumeRoleCredentialFetcher):
async def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = await self._create_client()
async with client as sts:
return await sts.assume_role(**kwargs)
async def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = await self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AioAssumeRoleWithWebIdentityCredentialFetcher(
AioBaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
self._web_identity_token_loader = web_identity_token_loader
super(AioAssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
async def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = AioConfig(signature_version=UNSIGNED)
async with self._client_creator('sts', config=config) as client:
return await client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class AioProcessProvider(ProcessProvider):
def __init__(self, *args, popen=asyncio.create_subprocess_exec, **kwargs):
super(AioProcessProvider, self).__init__(*args, **kwargs, popen=popen)
async def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = await self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return AioRefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return AioCredentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
async def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = await self._popen(*process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = await p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
class AioInstanceMetadataProvider(InstanceMetadataProvider):
async def load(self):
fetcher = self._role_fetcher
metadata = await fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
creds = AioRefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class AioEnvProvider(EnvProvider):
async def load(self):
# It gets credentials from an env var,
# so just convert the response to Aio variants
result = super().load()
if isinstance(result, RefreshableCredentials):
return AioRefreshableCredentials.\
from_refreshable_credentials(result)
elif isinstance(result, Credentials):
return AioCredentials.from_credentials(result)
return None
class AioOriginalEC2Provider(OriginalEC2Provider):
async def load(self):
result = super(AioOriginalEC2Provider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioSharedCredentialProvider(SharedCredentialProvider):
async def load(self):
result = super(AioSharedCredentialProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioConfigProvider(ConfigProvider):
async def load(self):
result = super(AioConfigProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioBotoProvider(BotoProvider):
async def load(self):
result = super(AioBotoProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioAssumeRoleProvider(AssumeRoleProvider):
async def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return await self._load_creds_via_assume_role(self._profile_name)
async def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = await self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AioAssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_aio_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
async def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return await self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return await self._resolve_credentials_from_profile(source_profile)
async def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = AioCredentialResolver(profile_providers)
credentials = await profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return AioCredentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
async def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = await self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AioAssumeRoleWithWebIdentityProvider(AssumeRoleWithWebIdentityProvider):
async def load(self):
return await self._assume_role_with_web_identity()
async def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AioAssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class AioCanonicalNameCredentialSourcer(CanonicalNameCredentialSourcer):
async def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, AioCredentialResolver):
return await source.load_credentials()
return await source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return AioCredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
class AioContainerProvider(ContainerProvider):
def __init__(self, *args, **kwargs):
super(AioContainerProvider, self).__init__(*args, **kwargs)
# This will always run if no fetcher arg is provided
if isinstance(self._fetcher, ContainerMetadataFetcher):
self._fetcher = AioContainerMetadataFetcher()
async def load(self):
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return await self._retrieve_or_fail()
async def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = await fetcher()
return AioRefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _create_fetcher(self, full_uri, headers):
async def fetch_creds():
try:
response = await self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
class AioCredentialResolver(CredentialResolver):
async def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = await provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class AioSSOCredentialFetcher(AioCachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(AioSSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
async def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
async with self._client_creator('sso', config=config) as client:
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = await client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class AioSSOProvider(SSOProvider):
async def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = AioSSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
| 35,111 | Python | 37.627063 | 85 | 0.620404 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/paginate.py | from botocore.exceptions import PaginationError
from botocore.paginate import Paginator, PageIterator
from botocore.utils import set_value_from_jmespath, merge_dicts
from botocore.compat import six
import jmespath
import aioitertools
class AioPageIterator(PageIterator):
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = await self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def result_key_iters(self):
teed_results = aioitertools.tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
async def build_full_result(self):
complete_result = {}
async for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
async def search(self, expression):
compiled = jmespath.compile(expression)
async for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
yield results
class AioPaginator(Paginator):
PAGE_ITERATOR_CLS = AioPageIterator
class ResultKeyIterator:
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
async for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
| 7,699 | Python | 42.75 | 79 | 0.572022 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/eventstream.py | from botocore.eventstream import EventStream, EventStreamBuffer
class AioEventStream(EventStream):
async def _create_raw_event_generator(self):
event_stream_buffer = EventStreamBuffer()
async for chunk, _ in self._raw_stream.iter_chunks():
event_stream_buffer.add_data(chunk)
for event in event_stream_buffer:
yield event
def __iter__(self):
raise NotImplementedError('Use async-for instead')
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
async for event in self._event_generator:
parsed_event = self._parse_event(event)
if parsed_event:
yield parsed_event
| 724 | Python | 30.521738 | 63 | 0.620166 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/client.py | from botocore.awsrequest import prepare_request_dict
from botocore.client import logger, PaginatorDocstring, ClientCreator, \
BaseClient, ClientEndpointBridge, S3ArnParamHandler, S3EndpointSetter
from botocore.exceptions import OperationNotPageableError
from botocore.history import get_global_history_recorder
from botocore.utils import get_service_module_name
from botocore.waiter import xform_name
from botocore.hooks import first_non_none_response
from .paginate import AioPaginator
from .args import AioClientArgsCreator
from .utils import AioS3RegionRedirector
from . import waiter
history_recorder = get_global_history_recorder()
class AioClientCreator(ClientCreator):
async def create_client(self, service_name, region_name, is_secure=True,
endpoint_url=None, verify=None,
credentials=None, scoped_config=None,
api_version=None,
client_config=None):
responses = await self._event_emitter.emit(
'choose-service-name', service_name=service_name)
service_name = first_non_none_response(responses, default=service_name)
service_model = self._load_service_model(service_name, api_version)
cls = await self._create_client_class(service_name, service_model)
endpoint_bridge = ClientEndpointBridge(
self._endpoint_resolver, scoped_config, client_config,
service_signing_name=service_model.metadata.get('signingName'))
client_args = self._get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
service_client = cls(**client_args)
self._register_retries(service_client)
self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_endpoint_discovery(
service_client, endpoint_url, client_config
)
return service_client
async def _create_client_class(self, service_name, service_model):
class_attributes = self._create_methods(service_model)
py_name_to_operation_name = self._create_name_mapping(service_model)
class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name
bases = [AioBaseClient]
service_id = service_model.service_id.hyphenize()
await self._event_emitter.emit(
'creating-client-class.%s' % service_id,
class_attributes=class_attributes,
base_classes=bases)
class_name = get_service_module_name(service_model)
cls = type(str(class_name), tuple(bases), class_attributes)
return cls
def _register_s3_events(self, client, endpoint_bridge, endpoint_url,
client_config, scoped_config):
if client.meta.service_model.service_name != 's3':
return
AioS3RegionRedirector(endpoint_bridge, client).register()
S3ArnParamHandler().register(client.meta.events)
S3EndpointSetter(
endpoint_resolver=self._endpoint_resolver,
region=client.meta.region_name,
s3_config=client.meta.config.s3,
endpoint_url=endpoint_url,
partition=client.meta.partition
).register(client.meta.events)
self._set_s3_presign_signature_version(
client.meta, client_config, scoped_config)
def _get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials,
scoped_config, client_config, endpoint_bridge):
# This is a near copy of ClientCreator. What's replaced
# is ClientArgsCreator->AioClientArgsCreator
args_creator = AioClientArgsCreator(
self._event_emitter, self._user_agent,
self._response_parser_factory, self._loader,
self._exceptions_factory, config_store=self._config_store)
return args_creator.get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
class AioBaseClient(BaseClient):
async def _async_getattr(self, item):
event_name = 'getattr.%s.%s' % (
self._service_model.service_id.hyphenize(), item
)
handler, event_response = await self.meta.events.emit_until_response(
event_name, client=self)
return event_response
def __getattr__(self, item):
# NOTE: we can not reliably support this because if we were to make this a
# deferred attrgetter (See #803), it would resolve in hasattr always returning
# true. This ends up breaking ddtrace for example when it tries to set a pin.
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, item))
async def _make_api_call(self, operation_name, api_params):
operation_model = self._service_model.operation_model(operation_name)
service_name = self._service_model.service_name
history_recorder.record('API_CALL', {
'service': service_name,
'operation': operation_name,
'params': api_params,
})
if operation_model.deprecated:
logger.debug('Warning: %s.%s() is deprecated',
service_name, operation_name)
request_context = {
'client_region': self.meta.region_name,
'client_config': self.meta.config,
'has_streaming_input': operation_model.has_streaming_input,
'auth_type': operation_model.auth_type,
}
request_dict = await self._convert_to_request_dict(
api_params, operation_model, context=request_context)
service_id = self._service_model.service_id.hyphenize()
handler, event_response = await self.meta.events.emit_until_response(
'before-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
model=operation_model, params=request_dict,
request_signer=self._request_signer, context=request_context)
if event_response is not None:
http, parsed_response = event_response
else:
http, parsed_response = await self._make_request(
operation_model, request_dict, request_context)
await self.meta.events.emit(
'after-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
http_response=http, parsed=parsed_response,
model=operation_model, context=request_context
)
if http.status_code >= 300:
error_code = parsed_response.get("Error", {}).get("Code")
error_class = self.exceptions.from_code(error_code)
raise error_class(parsed_response, operation_name)
else:
return parsed_response
async def _make_request(self, operation_model, request_dict, request_context):
try:
return await self._endpoint.make_request(operation_model, request_dict)
except Exception as e:
await self.meta.events.emit(
'after-call-error.{service_id}.{operation_name}'.format(
service_id=self._service_model.service_id.hyphenize(),
operation_name=operation_model.name),
exception=e, context=request_context
)
raise
async def _convert_to_request_dict(self, api_params, operation_model,
context=None):
api_params = await self._emit_api_params(
api_params, operation_model, context)
request_dict = self._serializer.serialize_to_request(
api_params, operation_model)
if not self._client_config.inject_host_prefix:
request_dict.pop('host_prefix', None)
prepare_request_dict(request_dict, endpoint_url=self._endpoint.host,
user_agent=self._client_config.user_agent,
context=context)
return request_dict
async def _emit_api_params(self, api_params, operation_model, context):
# Given the API params provided by the user and the operation_model
# we can serialize the request to a request_dict.
operation_name = operation_model.name
# Emit an event that allows users to modify the parameters at the
# beginning of the method. It allows handlers to modify existing
# parameters or return a new set of parameters to use.
service_id = self._service_model.service_id.hyphenize()
responses = await self.meta.events.emit(
'provide-client-params.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
api_params = first_non_none_response(responses, default=api_params)
event_name = (
'before-parameter-build.{service_id}.{operation_name}')
await self.meta.events.emit(
event_name.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
return api_params
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return AioPaginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Add the docstring for the paginate method.
paginate.__doc__ = PaginatorDocstring(
paginator_name=actual_operation_name,
event_emitter=self.meta.events,
service_model=self.meta.service_model,
paginator_config=paginator_config,
include_signature=False
)
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (AioPaginator,), {'paginate': paginate})
operation_model = self._service_model.operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator
# NOTE: this method does not differ from botocore, however it's important to keep
# as the "waiter" value points to our own asyncio waiter module
def get_waiter(self, waiter_name):
"""Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
config = self._get_waiter_config()
if not config:
raise ValueError("Waiter does not exist: %s" % waiter_name)
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name
if waiter_name not in mapping:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return waiter.create_waiter_with_client(
mapping[waiter_name], model, self)
async def __aenter__(self):
await self._endpoint.http_session.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._endpoint.http_session.__aexit__(exc_type, exc_val, exc_tb)
async def close(self):
"""Close all http connections."""
return await self._endpoint.http_session.close()
| 13,609 | Python | 44.366667 | 88 | 0.623117 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/session.py | from botocore.session import Session, EVENT_ALIASES, ServiceModel, UnknownServiceError
from botocore import UNSIGNED
from botocore import retryhandler, translate
from botocore.exceptions import PartialCredentialsError
from .client import AioClientCreator, AioBaseClient
from .hooks import AioHierarchicalEmitter
from .parsers import AioResponseParserFactory
from .signers import add_generate_presigned_url, add_generate_presigned_post, \
add_generate_db_auth_token
from .credentials import create_credential_resolver, AioCredentials
class ClientCreatorContext:
def __init__(self, coro):
self._coro = coro
self._client = None
async def __aenter__(self) -> AioBaseClient:
self._client = await self._coro
return await self._client.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._client.__aexit__(exc_type, exc_val, exc_tb)
class AioSession(Session):
# noinspection PyMissingConstructor
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
if event_hooks is None:
event_hooks = AioHierarchicalEmitter()
super().__init__(session_vars, event_hooks, include_builtin_handlers, profile)
# Register our own handlers. These normally happen via
# `botocore.handlers.BUILTIN_HANDLERS`
self.register('creating-client-class', add_generate_presigned_url)
self.register('creating-client-class.s3', add_generate_presigned_post)
self.register('creating-client-class.rds', add_generate_db_auth_token),
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
AioResponseParserFactory())
def create_client(self, *args, **kwargs):
return ClientCreatorContext(self._create_client(*args, **kwargs))
async def _create_client(self, service_name, region_name=None,
api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
region_name = self._resolve_region_name(region_name, config)
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
if api_version is None:
api_version = self.get_config_variable('api_versions').get(
service_name, None)
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if config is not None and config.signature_version is UNSIGNED:
credentials = None
elif aws_access_key_id is not None and \
aws_secret_access_key is not None:
credentials = AioCredentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
elif self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key):
raise PartialCredentialsError(
provider='explicit',
cred_var=self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key))
else:
credentials = await self.get_credentials()
endpoint_resolver = self._get_internal_component('endpoint_resolver')
exceptions_factory = self._get_internal_component('exceptions_factory')
config_store = self.get_component('config_store')
client_creator = AioClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory,
exceptions_factory, config_store)
client = await client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
monitor = self._get_internal_component('monitor')
if monitor is not None:
monitor.register(client.meta.events)
return client
def _create_credential_resolver(self):
return create_credential_resolver(
self, region_name=self._last_client_region_used)
async def get_credentials(self):
if self._credentials is None:
self._credentials = await (self._components.get_component(
'credential_provider').load_credentials())
return self._credentials
def set_credentials(self, access_key, secret_key, token=None):
self._credentials = AioCredentials(access_key, secret_key, token)
async def get_service_model(self, service_name, api_version=None):
service_description = await self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
async def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
service_id = EVENT_ALIASES.get(service_name, service_name)
await self._events.emit('service-data-loaded.%s' % service_id,
service_data=service_data,
service_name=service_name, session=self)
return service_data
async def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
resolver = self._get_internal_component('endpoint_resolver')
results = []
try:
service_data = await self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
def get_session(env_vars=None):
"""
Return a new session object.
"""
return AioSession(env_vars)
| 7,266 | Python | 42.51497 | 86 | 0.630333 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/parsers.py | from botocore.parsers import ResponseParserFactory, RestXMLParser, \
RestJSONParser, JSONParser, QueryParser, EC2QueryParser
from .eventstream import AioEventStream
class AioRestXMLParser(RestXMLParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioEC2QueryParser(EC2QueryParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioQueryParser(QueryParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioJSONParser(JSONParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioRestJSONParser(RestJSONParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
PROTOCOL_PARSERS = {
'ec2': AioEC2QueryParser,
'query': AioQueryParser,
'json': AioJSONParser,
'rest-json': AioRestJSONParser,
'rest-xml': AioRestXMLParser,
}
class AioResponseParserFactory(ResponseParserFactory):
def create_parser(self, protocol_name):
parser_cls = PROTOCOL_PARSERS[protocol_name]
return parser_cls(**self._defaults)
| 1,857 | Python | 33.407407 | 68 | 0.700054 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/__init__.py | from .core import contents, where
__all__ = ["contents", "where"]
__version__ = "2023.05.07"
| 94 | Python | 17.999996 | 33 | 0.617021 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.