file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/poolmanager.py | from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
from .exceptions import (
LocationValueError,
MaxRetryError,
ProxySchemeUnknown,
ProxySchemeUnsupported,
URLSchemeUnknown,
)
from .packages import six
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.proxy import connection_requires_http_tunnel
from .util.retry import Retry
from .util.url import parse_url
__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
log = logging.getLogger(__name__)
SSL_KEYWORDS = (
"key_file",
"cert_file",
"cert_reqs",
"ca_certs",
"ssl_version",
"ca_cert_dir",
"ssl_context",
"key_password",
"server_hostname",
)
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
"key_scheme", # str
"key_host", # str
"key_port", # int
"key_timeout", # int or float or Timeout
"key_retries", # int or Retry
"key_strict", # bool
"key_block", # bool
"key_source_address", # str
"key_key_file", # str
"key_key_password", # str
"key_cert_file", # str
"key_cert_reqs", # str
"key_ca_certs", # str
"key_ssl_version", # str
"key_ca_cert_dir", # str
"key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
"key_maxsize", # int
"key_headers", # dict
"key__proxy", # parsed proxy url
"key__proxy_headers", # dict
"key__proxy_config", # class
"key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
"key__socks_options", # dict
"key_assert_hostname", # bool or string
"key_assert_fingerprint", # str
"key_server_hostname", # str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple("PoolKey", _key_fields)
_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context["scheme"] = context["scheme"].lower()
context["host"] = context["host"].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ("headers", "_proxy_headers", "_socks_options"):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get("socket_options")
if socket_opts is not None:
context["socket_options"] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context["key_" + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
"http": functools.partial(_default_key_normalizer, PoolKey),
"https": functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
proxy_config = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url):
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def _validate_proxy_scheme_url_selection(self, url_scheme):
"""
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
"""
if self.proxy is None or url_scheme != "https":
return
if self.proxy.scheme != "https":
return
if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
"Contacting HTTPS destinations through HTTPS proxies "
"'via CONNECT tunnels' is not supported in Python 2"
)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(
self,
proxy_url,
num_pools=10,
headers=None,
proxy_headers=None,
proxy_ssl_context=None,
use_forwarding_for_https=False,
**connection_pool_kw
):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = "%s://%s:%i" % (
proxy_url.scheme,
proxy_url.host,
proxy_url.port,
)
proxy = parse_url(proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| 19,752 | Python | 35.715613 | 100 | 0.626519 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/fields.py | from __future__ import absolute_import
import email.utils
import mimetypes
import re
from .packages import six
def guess_content_type(filename, default="application/octet-stream"):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param_rfc2231(name, value):
"""
Helper function to format and quote a single header parameter using the
strategy defined in RFC 2231.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows
`RFC 2388 Section 4.4 <https://tools.ietf.org/html/rfc2388#section-4.4>`_.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as ``bytes`` or `str``.
:ret:
An RFC-2231-formatted unicode string.
"""
if isinstance(value, six.binary_type):
value = value.decode("utf-8")
if not any(ch in value for ch in '"\\\r\n'):
result = u'%s="%s"' % (name, value)
try:
result.encode("ascii")
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if six.PY2: # Python 2:
value = value.encode("utf-8")
# encode_rfc2231 accepts an encoded string and returns an ascii-encoded
# string in Python 2 but accepts and returns unicode strings in Python 3
value = email.utils.encode_rfc2231(value, "utf-8")
value = "%s*=%s" % (name, value)
if six.PY2: # Python 2:
value = value.decode("utf-8")
return value
_HTML5_REPLACEMENTS = {
u"\u0022": u"%22",
# Replace "\" with "\\".
u"\u005C": u"\u005C\u005C",
}
# All control characters from 0x00 to 0x1F *except* 0x1B.
_HTML5_REPLACEMENTS.update(
{
six.unichr(cc): u"%{:02X}".format(cc)
for cc in range(0x00, 0x1F + 1)
if cc not in (0x1B,)
}
)
def _replace_multiple(value, needles_and_replacements):
def replacer(match):
return needles_and_replacements[match.group(0)]
pattern = re.compile(
r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
)
result = pattern.sub(replacer, value)
return result
def format_header_param_html5(name, value):
"""
Helper function to format and quote a single header parameter using the
HTML5 strategy.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows the `HTML5 Working Draft
Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
.. _HTML5 Working Draft Section 4.10.22.7:
https://w3c.github.io/html/sec-forms.html#multipart-form-data
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as ``bytes`` or `str``.
:ret:
A unicode string, stripped of troublesome characters.
"""
if isinstance(value, six.binary_type):
value = value.decode("utf-8")
value = _replace_multiple(value, _HTML5_REPLACEMENTS)
return u'%s="%s"' % (name, value)
# For backwards-compatibility.
format_header_param = format_header_param_html5
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field. Must be unicode.
:param data:
The data/value body.
:param filename:
An optional filename of the request field. Must be unicode.
:param headers:
An optional dict-like object of headers to initially use for the field.
:param header_formatter:
An optional callable that is used to encode and format the headers. By
default, this is :func:`format_header_param_html5`.
"""
def __init__(
self,
name,
data,
filename=None,
headers=None,
header_formatter=format_header_param_html5,
):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
self.header_formatter = header_formatter
@classmethod
def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(
fieldname, data, filename=filename, header_formatter=header_formatter
)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter. By
default, this calls ``self.header_formatter``.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return self.header_formatter(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return u"; ".join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append(u"%s: %s" % (header_name, header_value))
lines.append(u"\r\n")
return u"\r\n".join(lines)
def make_multipart(
self, content_disposition=None, content_type=None, content_location=None
):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers["Content-Disposition"] = content_disposition or u"form-data"
self.headers["Content-Disposition"] += u"; ".join(
[
u"",
self._render_parts(
((u"name", self._name), (u"filename", self._filename))
),
]
)
self.headers["Content-Type"] = content_type
self.headers["Content-Location"] = content_location
| 8,579 | Python | 30.2 | 88 | 0.602984 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/_collections.py | from __future__ import absolute_import
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
from collections import OrderedDict
from .exceptions import InvalidHeader
from .packages import six
from .packages.six import iterkeys, itervalues
__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError(
"Iteration over this class is unlikely to be threadsafe."
)
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = OrderedDict()
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = [key, val]
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ", ".join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, "keys"):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return dict((k.lower(), v) for k, v in self.itermerged()) == dict(
(k.lower(), v) for k, v in other.itermerged()
)
def __ne__(self, other):
return not self.__eq__(other)
if six.PY2: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
"""D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
"""
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
vals.append(val)
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError(
"extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args))
)
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key, default=__marker):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
if default is self.__marker:
return []
return default
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
# Backwards compatibility for http.cookiejar
get_all = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ", ".join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
obs_fold_continued_leaders = (" ", "\t")
headers = []
for line in message.headers:
if line.startswith(obs_fold_continued_leaders):
if not headers:
# We received a header line that starts with OWS as described
# in RFC-7230 S3.2.4. This indicates a multiline header, but
# there exists no previous header to which we can attach it.
raise InvalidHeader(
"Header continuation with no previous header: %s" % line
)
else:
key, value = headers[-1]
headers[-1] = (key, value + " " + line.strip())
continue
key, value = line.split(":", 1)
headers.append((key, value.strip()))
return cls(headers)
| 10,811 | Python | 30.988166 | 86 | 0.575895 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/request.py | from __future__ import absolute_import
from .filepost import encode_multipart_formdata
from .packages.six.moves.urllib.parse import urlencode
__all__ = ["RequestMethods"]
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`urllib3.HTTPConnectionPool` and
:class:`urllib3.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(
self,
method,
url,
body=None,
headers=None,
encode_multipart=True,
multipart_boundary=None,
**kw
): # Abstract
raise NotImplementedError(
"Classes extending RequestMethods must implement "
"their own ``urlopen`` method."
)
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
urlopen_kw["request_url"] = url
if method in self._encode_url_methods:
return self.request_encode_url(
method, url, fields=fields, headers=headers, **urlopen_kw
)
else:
return self.request_encode_body(
method, url, fields=fields, headers=headers, **urlopen_kw
)
def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {"headers": headers}
extra_kw.update(urlopen_kw)
if fields:
url += "?" + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(
self,
method,
url,
fields=None,
headers=None,
encode_multipart=True,
multipart_boundary=None,
**urlopen_kw
):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:func:`urllib3.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:func:`urllib.parse.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {"headers": {}}
if fields:
if "body" in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one."
)
if encode_multipart:
body, content_type = encode_multipart_formdata(
fields, boundary=multipart_boundary
)
else:
body, content_type = (
urlencode(fields),
"application/x-www-form-urlencoded",
)
extra_kw["body"] = body
extra_kw["headers"] = {"Content-Type": content_type}
extra_kw["headers"].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| 5,985 | Python | 34.005848 | 92 | 0.600334 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/__init__.py | """
Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
"""
from __future__ import absolute_import
# Set default logging handler to avoid "No handler found" warnings.
import logging
import warnings
from logging import NullHandler
from . import exceptions
from ._version import __version__
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host
# === NOTE TO REPACKAGERS AND VENDORS ===
# Please delete this block, this logic is only
# for urllib3 being distributed via PyPI.
# See: https://github.com/urllib3/urllib3/issues/2680
try:
import urllib3_secure_extra # type: ignore # noqa: F401
except ImportError:
pass
else:
warnings.warn(
"'urllib3[secure]' extra is deprecated and will be removed "
"in a future release of urllib3 2.x. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2680",
category=DeprecationWarning,
stacklevel=2,
)
__author__ = "Andrey Petrov ([email protected])"
__license__ = "MIT"
__version__ = __version__
__all__ = (
"HTTPConnectionPool",
"HTTPSConnectionPool",
"PoolManager",
"ProxyManager",
"HTTPResponse",
"Retry",
"Timeout",
"add_stderr_logger",
"connection_from_url",
"disable_warnings",
"encode_multipart_formdata",
"get_host",
"make_headers",
"proxy_from_url",
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug("Added a stderr logging handler to logger: %s", __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter("ignore", category)
| 3,333 | Python | 31.368932 | 99 | 0.727273 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/connectionpool.py | from __future__ import absolute_import
import errno
import logging
import re
import socket
import sys
import warnings
from socket import error as SocketError
from socket import timeout as SocketTimeout
from .connection import (
BaseSSLError,
BrokenPipeError,
DummyConnection,
HTTPConnection,
HTTPException,
HTTPSConnection,
VerifiedHTTPSConnection,
port_by_scheme,
)
from .exceptions import (
ClosedPoolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
InsecureRequestWarning,
LocationValueError,
MaxRetryError,
NewConnectionError,
ProtocolError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
)
from .packages import six
from .packages.six.moves import queue
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.proxy import connection_requires_http_tunnel
from .util.queue import LifoQueue
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.ssl_match_hostname import CertificateError
from .util.timeout import Timeout
from .util.url import Url, _encode_target
from .util.url import _normalize_host as normalize_host
from .util.url import get_host, parse_url
try: # Platform-specific: Python 3
import weakref
weakref_finalize = weakref.finalize
except AttributeError: # Platform-specific: Python 2
from .packages.backports.weakref_finalize import weakref_finalize
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
.. note::
ConnectionPool.urlopen() does not normalize or percent-encode target URIs
which is useful if your target server doesn't support percent-encoded
target URIs.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`http.client.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`http.client.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`http.client.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.ProxyManager`
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.ProxyManager`
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
_proxy_config=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
self.proxy_config = _proxy_config
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
self.conn_kw["proxy"] = self.proxy
self.conn_kw["proxy_config"] = self.proxy_config
# Do not pass 'self' as callback to 'finalize'.
# Then the 'finalize' would keep an endless living (leak) to self.
# By just passing a reference to the pool allows the garbage collector
# to free self if nobody else has a reference to it.
pool = self.pool
# Close all the HTTPConnections in the pool before the
# HTTPConnectionPool object is garbage collected.
weakref_finalize(self, _close_pool_connections, pool)
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# http.client._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
"""Helper that always returns a :class:`urllib3.util.Timeout`"""
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls http.client.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
try:
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
# legitimately able to close the connection after sending a valid response.
# With this behaviour, the received response is still readable.
except BrokenPipeError:
# Python 3
pass
except IOError as e:
# Python 2 and macOS/Linux
# EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno not in {
errno.EPIPE,
errno.ESHUTDOWN,
errno.EPROTOTYPE,
}:
raise
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except BaseException as e:
# Remove the TypeError from the exception chain in
# Python 3 (including for exceptions like SystemExit).
# Otherwise it looks like a bug in the code.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
# Close all the HTTPConnections in the pool.
_close_pool_connections(old_pool)
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
parsed_url = parse_url(url)
destination_scheme = parsed_url.scheme
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = six.ensure_str(_encode_target(url))
else:
url = six.ensure_str(parsed_url.url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
http_tunnel_required = connection_requires_http_tunnel(
self.proxy, self.proxy_config, destination_scheme
)
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
# have to copy the headers dict so we can safely change it without those
# changes being reflected in anyone else's copy.
if not http_tunnel_required:
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn and http_tunnel_required:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except EmptyPoolError:
# Didn't get a connection from the pool, no need to clean up
clean_exit = True
release_this_conn = False
raise
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
def _is_ssl_error_message_from_http_proxy(ssl_error):
# We're trying to detect the message 'WRONG_VERSION_NUMBER' but
# SSLErrors are kinda all over the place when it comes to the message,
# so we try to cover our bases here!
message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
return (
"wrong version number" in message or "unknown protocol" in message
)
# Try to detect a common user error with proxies which is to
# set an HTTP proxy to be HTTPS when it should be 'http://'
# (ie {'http': 'http://proxy', 'https': 'https://proxy'})
# Instead we add a nice error message and point to a URL.
if (
isinstance(e, BaseSSLError)
and self.proxy
and _is_ssl_error_message_from_http_proxy(e)
and conn.proxy
and conn.proxy.scheme == "https"
):
e = ProxyError(
"Your proxy appears to only use HTTP and not HTTPS, "
"try changing your proxy URL to be HTTP. See: "
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
"#https-proxy-error-http-proxy",
SSLError(e),
)
elif isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = "GET"
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls = HTTPSConnection
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
ssl_version=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
**conn_kw
):
HTTPConnectionPool.__init__(
self,
host,
port,
strict,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(
key_file=self.key_file,
key_password=self.key_password,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establishes a tunnel connection through HTTP CONNECT.
Tunnel connection is established early because otherwise httplib would
improperly set Host: header to proxy's IP:port.
"""
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
if self.proxy.scheme == "https":
conn.tls_in_tls_required = True
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`http.client.HTTPSConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
**self.conn_kw
)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn(
(
"Unverified HTTPS request is being made to host '%s'. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
"#ssl-warnings" % conn.host
),
InsecureRequestWarning,
)
if getattr(conn, "proxy_is_verified", None) is False:
warnings.warn(
(
"Unverified HTTPS connection done to an HTTPS proxy. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
"#ssl-warnings"
),
InsecureRequestWarning,
)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _normalize_host(host, scheme):
"""
Normalize hosts for comparisons and use with sockets.
"""
host = normalize_host(host, scheme)
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
if host.startswith("[") and host.endswith("]"):
host = host[1:-1]
return host
def _close_pool_connections(pool):
"""Drains a queue of connections and closes each one."""
try:
while True:
conn = pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
| 39,990 | Python | 34.296558 | 106 | 0.58052 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/appengine.py | """
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import io
import logging
import warnings
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
SSLError,
TimeoutError,
)
from ..packages.six.moves.urllib.parse import urljoin
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.retry import Retry
from ..util.timeout import Timeout
from . import _appengine_environ
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabytes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(
self,
headers=None,
retries=None,
validate_certificate=True,
urlfetch_retries=True,
):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment."
)
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.",
AppEnginePlatformWarning,
)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw
):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = redirect and retries.redirect != 0 and retries.total
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if "too large" in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.",
e,
)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if "Too many redirects" in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.",
e,
)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e
)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw
)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if self.urlfetch_retries and retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = "GET"
try:
retries = retries.increment(
method, url, response=http_response, _pool=self
)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method,
redirect_url,
body,
headers,
retries=retries,
redirect=redirect,
timeout=timeout,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.headers.get("Retry-After"))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method,
url,
body=body,
headers=headers,
retries=retries,
redirect=redirect,
timeout=timeout,
**response_kw
)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get("content-encoding")
if content_encoding == "deflate":
del urlfetch_resp.headers["content-encoding"]
transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == "chunked":
encodings = transfer_encoding.split(",")
encodings.remove("chunked")
urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
original_response = HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=io.BytesIO(urlfetch_resp.content),
msg=urlfetch_resp.header_msg,
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
return HTTPResponse(
body=io.BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
original_response=original_response,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning,
)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning,
)
return retries
# Alias methods from _appengine_environ to maintain public API interface.
is_appengine = _appengine_environ.is_appengine
is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
is_local_appengine = _appengine_environ.is_local_appengine
is_prod_appengine = _appengine_environ.is_prod_appengine
is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
| 11,012 | Python | 33.961905 | 88 | 0.612241 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/socks.py | # -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4A (``proxy_url='socks4a://...``)
- SOCKS4 (``proxy_url='socks4://...``)
- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
- SOCKS5 with local DNS (``proxy_url='socks5://...``)
- Usernames and passwords for the SOCKS proxy
.. note::
It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
your ``proxy_url`` to ensure that DNS resolution is done from the remote
server instead of client-side when connecting to a domain name.
SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
supports IPv4, IPv6, and domain names.
When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
will be sent as the ``userid`` section of the SOCKS request:
.. code-block:: python
proxy_url="socks4a://<userid>@proxy-host"
When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
of the ``proxy_url`` will be sent as the username/password to authenticate
with the proxy:
.. code-block:: python
proxy_url="socks5h://<username>:<password>@proxy-host"
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn(
(
"SOCKS support in urllib3 requires the installation of optional "
"dependencies: specifically, PySocks. For more information, see "
"https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies"
),
DependencyWarning,
)
raise
from socket import error as SocketError
from socket import timeout as SocketTimeout
from ..connection import HTTPConnection, HTTPSConnection
from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop("_socks_options")
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options["socks_version"],
proxy_addr=self._socks_options["proxy_host"],
proxy_port=self._socks_options["proxy_port"],
proxy_username=self._socks_options["username"],
proxy_password=self._socks_options["password"],
proxy_rdns=self._socks_options["rdns"],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
else:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
"http": SOCKSHTTPConnectionPool,
"https": SOCKSHTTPSConnectionPool,
}
def __init__(
self,
proxy_url,
username=None,
password=None,
num_pools=10,
headers=None,
**connection_pool_kw
):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(":")
if len(split) == 2:
username, password = split
if parsed.scheme == "socks5":
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == "socks5h":
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == "socks4":
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == "socks4a":
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
self.proxy_url = proxy_url
socks_options = {
"socks_version": socks_version,
"proxy_host": parsed.host,
"proxy_port": parsed.port,
"username": username,
"password": password,
"rdns": rdns,
}
connection_pool_kw["_socks_options"] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
| 7,097 | Python | 31.709677 | 85 | 0.61364 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/pyopenssl.py | """
TLS with SNI_-support for Python 2. Follow these instructions if you would
like to verify TLS certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* `pyOpenSSL`_ (tested with 16.0.0)
* `cryptography`_ (minimum 1.3.4, from pyopenssl)
* `idna`_ (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
.. code-block:: bash
$ python -m pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this:
.. code-block:: python
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
.. _pyopenssl: https://www.pyopenssl.org
.. _cryptography: https://cryptography.io
.. _idna: https://github.com/kjd/idna
"""
from __future__ import absolute_import
import OpenSSL.crypto
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
try:
from cryptography.x509 import UnsupportedExtension
except ImportError:
# UnsupportedExtension is gone in cryptography >= 2.1.0
class UnsupportedExtension(Exception):
pass
from io import BytesIO
from socket import error as SocketError
from socket import timeout
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
import sys
import warnings
from .. import util
from ..packages import six
from ..util.ssl_ import PROTOCOL_TLS_CLIENT
warnings.warn(
"'urllib3.contrib.pyopenssl' module is deprecated and will be removed "
"in a future release of urllib3 2.x. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2680",
category=DeprecationWarning,
stacklevel=2,
)
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
_openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
"Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
_validate_dependencies_met()
util.SSLContext = PyOpenSSLContext
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
"Undo monkey-patching by :func:`inject_into_urllib3`."
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError(
"'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer."
)
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError(
"'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer."
)
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
If the name cannot be idna-encoded then we return None signalling that
the name given should be skipped.
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
import idna
try:
for prefix in [u"*.", u"."]:
if name.startswith(prefix):
name = name[len(prefix) :]
return prefix.encode("ascii") + idna.encode(name)
return idna.encode(name)
except idna.core.IDNAError:
return None
# Don't send IPv6 addresses through the IDNA encoder.
if ":" in name:
return name
name = idna_encode(name)
if name is None:
return None
elif sys.version_info >= (3, 0):
name = name.decode("utf-8")
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, peer_cert)
cert = x509.load_der_x509_certificate(der, openssl_backend)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (
x509.DuplicateExtension,
UnsupportedExtension,
x509.UnsupportedGeneralNameType,
UnicodeError,
) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
# We also want to skip over names which cannot be idna encoded.
names = [
("DNS", name)
for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
"""API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
"""
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
return b""
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b""
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout("The read operation timed out")
else:
return self.recv(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout("The read operation timed out")
else:
return self.recv_into(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(
data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
)
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
return {
"subject": ((("commonName", x509.get_subject().CN),),),
"subjectAltName": get_subj_alt_name(x509),
}
def version(self):
return self.connection.get_protocol_version_name()
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode("utf-8")
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode("utf-8")
if capath is not None:
capath = capath.encode("utf-8")
try:
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("unable to load trusted certificates: %r" % e)
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_chain_file(certfile)
if password is not None:
if not isinstance(password, six.binary_type):
password = password.encode("utf-8")
self._ctx.set_passwd_cb(lambda *_: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def set_alpn_protocols(self, protocols):
protocols = [six.ensure_binary(p) for p in protocols]
return self._ctx.set_alpn_protos(protocols)
def wrap_socket(
self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None,
):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode("utf-8")
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(sock, sock.gettimeout()):
raise timeout("select timed out")
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("bad handshake: %r" % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
| 17,055 | Python | 31.863198 | 88 | 0.640575 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/securetransport.py | """
SecureTranport support for urllib3 via ctypes.
This makes platform-native TLS available to urllib3 users on macOS without the
use of a compiler. This is an important feature because the Python Package
Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
this is to give macOS users an alternative solution to the problem, and that
solution is to use SecureTransport.
We use ctypes here because this solution must not require a compiler. That's
because pip is not allowed to require a compiler either.
This is not intended to be a seriously long-term solution to this problem.
The hope is that PEP 543 will eventually solve this issue for us, at which
point we can retire this contrib module. But in the short term, we need to
solve the impending tire fire that is Python on Mac without this kind of
contrib module. So...here we are.
To use this module, simply import and inject it::
import urllib3.contrib.securetransport
urllib3.contrib.securetransport.inject_into_urllib3()
Happy TLSing!
This code is a bastardised version of the code found in Will Bond's oscrypto
library. An enormous debt is owed to him for blazing this trail for us. For
that reason, this code should be considered to be covered both by urllib3's
license and by oscrypto's:
.. code-block::
Copyright (c) 2015-2016 Will Bond <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import contextlib
import ctypes
import errno
import os.path
import shutil
import socket
import ssl
import struct
import threading
import weakref
import six
from .. import util
from ..util.ssl_ import PROTOCOL_TLS_CLIENT
from ._securetransport.bindings import CoreFoundation, Security, SecurityConst
from ._securetransport.low_level import (
_assert_no_error,
_build_tls_unknown_ca_alert,
_cert_array_from_pem,
_create_cfstring_array,
_load_client_cert_chain,
_temporary_keychain,
)
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
# SNI always works
HAS_SNI = True
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
# This dictionary is used by the read callback to obtain a handle to the
# calling wrapped socket. This is a pretty silly approach, but for now it'll
# do. I feel like I should be able to smuggle a handle to the wrapped socket
# directly in the SSLConnectionRef, but for now this approach will work I
# guess.
#
# We need to lock around this structure for inserts, but we don't do it for
# reads/writes in the callbacks. The reasoning here goes as follows:
#
# 1. It is not possible to call into the callbacks before the dictionary is
# populated, so once in the callback the id must be in the dictionary.
# 2. The callbacks don't mutate the dictionary, they only read from it, and
# so cannot conflict with any of the insertions.
#
# This is good: if we had to lock in the callbacks we'd drastically slow down
# the performance of this code.
_connection_refs = weakref.WeakValueDictionary()
_connection_ref_lock = threading.Lock()
# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
# for no better reason than we need *a* limit, and this one is right there.
SSL_WRITE_BLOCKSIZE = 16384
# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
# individual cipher suites. We need to do this because this is how
# SecureTransport wants them.
CIPHER_SUITES = [
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_AES_256_GCM_SHA384,
SecurityConst.TLS_AES_128_GCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_AES_128_CCM_8_SHA256,
SecurityConst.TLS_AES_128_CCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
]
# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
# TLSv1 to 1.2 are supported on macOS 10.8+
_protocol_to_min_max = {
util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
}
if hasattr(ssl, "PROTOCOL_SSLv2"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
SecurityConst.kSSLProtocol2,
SecurityConst.kSSLProtocol2,
)
if hasattr(ssl, "PROTOCOL_SSLv3"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
SecurityConst.kSSLProtocol3,
SecurityConst.kSSLProtocol3,
)
if hasattr(ssl, "PROTOCOL_TLSv1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
SecurityConst.kTLSProtocol1,
SecurityConst.kTLSProtocol1,
)
if hasattr(ssl, "PROTOCOL_TLSv1_1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
SecurityConst.kTLSProtocol11,
SecurityConst.kTLSProtocol11,
)
if hasattr(ssl, "PROTOCOL_TLSv1_2"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
SecurityConst.kTLSProtocol12,
SecurityConst.kTLSProtocol12,
)
def inject_into_urllib3():
"""
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
"""
util.SSLContext = SecureTransportContext
util.ssl_.SSLContext = SecureTransportContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_SECURETRANSPORT = True
util.ssl_.IS_SECURETRANSPORT = True
def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False
def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
requested_length = data_length_pointer[0]
timeout = wrapped_socket.gettimeout()
error = None
read_count = 0
try:
while read_count < requested_length:
if timeout is None or timeout >= 0:
if not util.wait_for_read(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
remaining = requested_length - read_count
buffer = (ctypes.c_char * remaining).from_address(
data_buffer + read_count
)
chunk_size = base_socket.recv_into(buffer, remaining)
read_count += chunk_size
if not chunk_size:
if not read_count:
return SecurityConst.errSSLClosedGraceful
break
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = read_count
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = read_count
if read_count != requested_length:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
bytes_to_write = data_length_pointer[0]
data = ctypes.string_at(data_buffer, bytes_to_write)
timeout = wrapped_socket.gettimeout()
error = None
sent = 0
try:
while sent < bytes_to_write:
if timeout is None or timeout >= 0:
if not util.wait_for_write(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
chunk_sent = base_socket.send(data)
sent += chunk_sent
# This has some needless copying here, but I'm not sure there's
# much value in optimising this data path.
data = data[chunk_sent:]
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = sent
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = sent
if sent != bytes_to_write:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
# We need to keep these two objects references alive: if they get GC'd while
# in use then SecureTransport could attempt to call a function that is in freed
# memory. That would be...uh...bad. Yeah, that's the word. Bad.
_read_callback_pointer = Security.SSLReadFunc(_read_callback)
_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
class WrappedSocket(object):
"""
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
collector of PyPy.
"""
def __init__(self, socket):
self.socket = socket
self.context = None
self._makefile_refs = 0
self._closed = False
self._exception = None
self._keychain = None
self._keychain_dir = None
self._client_cert_chain = None
# We save off the previously-configured timeout and then set it to
# zero. This is done because we use select and friends to handle the
# timeouts, but if we leave the timeout set on the lower socket then
# Python will "kindly" call select on that socket again for us. Avoid
# that by forcing the timeout to zero.
self._timeout = self.socket.gettimeout()
self.socket.settimeout(0)
@contextlib.contextmanager
def _raise_on_error(self):
"""
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
"""
self._exception = None
# We explicitly don't catch around this yield because in the unlikely
# event that an exception was hit in the block we don't want to swallow
# it.
yield
if self._exception is not None:
exception, self._exception = self._exception, None
self.close()
raise exception
def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
"""
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result)
def _set_alpn_protocols(self, protocols):
"""
Sets up the ALPN protocols on the context.
"""
if not protocols:
return
protocols_arr = _create_cfstring_array(protocols)
try:
result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
_assert_no_error(result)
finally:
CoreFoundation.CFRelease(protocols_arr)
def _custom_validate(self, verify, trust_bundle):
"""
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
Raises an SSLError if the connection is not trusted.
"""
# If we disabled cert validation, just say: cool.
if not verify:
return
successes = (
SecurityConst.kSecTrustResultUnspecified,
SecurityConst.kSecTrustResultProceed,
)
try:
trust_result = self._evaluate_trust(trust_bundle)
if trust_result in successes:
return
reason = "error code: %d" % (trust_result,)
except Exception as e:
# Do not trust on error
reason = "exception: %r" % (e,)
# SecureTransport does not send an alert nor shuts down the connection.
rec = _build_tls_unknown_ca_alert(self.version())
self.socket.sendall(rec)
# close the connection immediately
# l_onoff = 1, activate linger
# l_linger = 0, linger for 0 seoncds
opts = struct.pack("ii", 1, 0)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
self.close()
raise ssl.SSLError("certificate verify failed, %s" % reason)
def _evaluate_trust(self, trust_bundle):
# We want data in memory, so load it up.
if os.path.isfile(trust_bundle):
with open(trust_bundle, "rb") as f:
trust_bundle = f.read()
cert_array = None
trust = Security.SecTrustRef()
try:
# Get a CFArray that contains the certs we want.
cert_array = _cert_array_from_pem(trust_bundle)
# Ok, now the hard part. We want to get the SecTrustRef that ST has
# created for this connection, shove our CAs into it, tell ST to
# ignore everything else it knows, and then ask if it can build a
# chain. This is a buuuunch of code.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
raise ssl.SSLError("Failed to copy trust reference")
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
_assert_no_error(result)
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
_assert_no_error(result)
trust_result = Security.SecTrustResultType()
result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
_assert_no_error(result)
finally:
if trust:
CoreFoundation.CFRelease(trust)
if cert_array is not None:
CoreFoundation.CFRelease(cert_array)
return trust_result.value
def handshake(
self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase,
alpn_protocols,
):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
"""
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode("utf-8")
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Setup the ALPN protocols.
self._set_alpn_protocols(alpn_protocols)
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, bufsiz):
buffer = ctypes.create_string_buffer(bufsiz)
bytes_read = self.recv_into(buffer, bufsiz)
data = buffer[:bytes_read]
return data
def recv_into(self, buffer, nbytes=None):
# Read short on EOF.
if self._closed:
return 0
if nbytes is None:
nbytes = len(buffer)
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLRead(
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
)
# There are some result codes that we want to treat as "not always
# errors". Specifically, those are errSSLWouldBlock,
# errSSLClosedGraceful, and errSSLClosedNoNotify.
if result == SecurityConst.errSSLWouldBlock:
# If we didn't process any bytes, then this was just a time out.
# However, we can get errSSLWouldBlock in situations when we *did*
# read some data, and in those cases we should just read "short"
# and return.
if processed_bytes.value == 0:
# Timed out, no data read.
raise socket.timeout("recv timed out")
elif result in (
SecurityConst.errSSLClosedGraceful,
SecurityConst.errSSLClosedNoNotify,
):
# The remote peer has closed this connection. We should do so as
# well. Note that we don't actually return here because in
# principle this could actually be fired along with return data.
# It's unlikely though.
self.close()
else:
_assert_no_error(result)
# Ok, we read and probably succeeded. We should return whatever data
# was actually read.
return processed_bytes.value
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def send(self, data):
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLWrite(
self.context, data, len(data), ctypes.byref(processed_bytes)
)
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
# Timed out
raise socket.timeout("send timed out")
else:
_assert_no_error(result)
# We sent, and probably succeeded. Tell them how much we sent.
return processed_bytes.value
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
with self._raise_on_error():
Security.SSLClose(self.context)
def close(self):
# TODO: should I do clean shutdown here? Do I have to?
if self._makefile_refs < 1:
self._closed = True
if self.context:
CoreFoundation.CFRelease(self.context)
self.context = None
if self._client_cert_chain:
CoreFoundation.CFRelease(self._client_cert_chain)
self._client_cert_chain = None
if self._keychain:
Security.SecKeychainDelete(self._keychain)
CoreFoundation.CFRelease(self._keychain)
shutil.rmtree(self._keychain_dir)
self._keychain = self._keychain_dir = None
return self.socket.close()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
# Urgh, annoying.
#
# Here's how we do this:
#
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
# connection.
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
# 3. To get the CN, call SecCertificateCopyCommonName and process that
# string so that it's of the appropriate type.
# 4. To get the SAN, we need to do something a bit more complex:
# a. Call SecCertificateCopyValues to get the data, requesting
# kSecOIDSubjectAltName.
# b. Mess about with this dictionary to try to get the SANs out.
#
# This is gross. Really gross. It's going to be a few hundred LoC extra
# just to repeat something that SecureTransport can *already do*. So my
# operating assumption at this time is that what we want to do is
# instead to just flag to urllib3 that it shouldn't do its own hostname
# validation when using SecureTransport.
if not binary_form:
raise ValueError("SecureTransport only supports dumping binary certs")
trust = Security.SecTrustRef()
certdata = None
der_bytes = None
try:
# Grab the trust store.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
# Probably we haven't done the handshake yet. No biggie.
return None
cert_count = Security.SecTrustGetCertificateCount(trust)
if not cert_count:
# Also a case that might happen if we haven't handshaked.
# Handshook? Handshaken?
return None
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
assert leaf
# Ok, now we want the DER bytes.
certdata = Security.SecCertificateCopyData(leaf)
assert certdata
data_length = CoreFoundation.CFDataGetLength(certdata)
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
der_bytes = ctypes.string_at(data_buffer, data_length)
finally:
if certdata:
CoreFoundation.CFRelease(certdata)
if trust:
CoreFoundation.CFRelease(trust)
return der_bytes
def version(self):
protocol = Security.SSLProtocol()
result = Security.SSLGetNegotiatedProtocolVersion(
self.context, ctypes.byref(protocol)
)
_assert_no_error(result)
if protocol.value == SecurityConst.kTLSProtocol13:
raise ssl.SSLError("SecureTransport does not support TLS 1.3")
elif protocol.value == SecurityConst.kTLSProtocol12:
return "TLSv1.2"
elif protocol.value == SecurityConst.kTLSProtocol11:
return "TLSv1.1"
elif protocol.value == SecurityConst.kTLSProtocol1:
return "TLSv1"
elif protocol.value == SecurityConst.kSSLProtocol3:
return "SSLv3"
elif protocol.value == SecurityConst.kSSLProtocol2:
return "SSLv2"
else:
raise ssl.SSLError("Unknown TLS version: %r" % protocol)
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
def makefile(self, mode="r", buffering=None, *args, **kwargs):
# We disable buffering with SecureTransport because it conflicts with
# the buffering that ST does internally (see issue #1153 for more).
buffering = 0
return backport_makefile(self, mode, buffering, *args, **kwargs)
WrappedSocket.makefile = makefile
class SecureTransportContext(object):
"""
I am a wrapper class for the SecureTransport library, to translate the
interface of the standard library ``SSLContext`` object to calls into
SecureTransport.
"""
def __init__(self, protocol):
self._min_version, self._max_version = _protocol_to_min_max[protocol]
self._options = 0
self._verify = False
self._trust_bundle = None
self._client_cert = None
self._client_key = None
self._client_key_passphrase = None
self._alpn_protocols = None
@property
def check_hostname(self):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
return True
@check_hostname.setter
def check_hostname(self, value):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
pass
@property
def options(self):
# TODO: Well, crap.
#
# So this is the bit of the code that is the most likely to cause us
# trouble. Essentially we need to enumerate all of the SSL options that
# users might want to use and try to see if we can sensibly translate
# them, or whether we should just ignore them.
return self._options
@options.setter
def options(self, value):
# TODO: Update in line with above.
self._options = value
@property
def verify_mode(self):
return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
@verify_mode.setter
def verify_mode(self, value):
self._verify = True if value == ssl.CERT_REQUIRED else False
def set_default_verify_paths(self):
# So, this has to do something a bit weird. Specifically, what it does
# is nothing.
#
# This means that, if we had previously had load_verify_locations
# called, this does not undo that. We need to do that because it turns
# out that the rest of the urllib3 code will attempt to load the
# default verify paths if it hasn't been told about any paths, even if
# the context itself was sometime earlier. We resolve that by just
# ignoring it.
pass
def load_default_certs(self):
return self.set_default_verify_paths()
def set_ciphers(self, ciphers):
# For now, we just require the default cipher string.
if ciphers != util.ssl_.DEFAULT_CIPHERS:
raise ValueError("SecureTransport doesn't support custom cipher strings")
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# OK, we only really support cadata and cafile.
if capath is not None:
raise ValueError("SecureTransport does not support cert directories")
# Raise if cafile does not exist.
if cafile is not None:
with open(cafile):
pass
self._trust_bundle = cafile or cadata
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._client_cert = certfile
self._client_key = keyfile
self._client_cert_passphrase = password
def set_alpn_protocols(self, protocols):
"""
Sets the ALPN protocols that will later be set on the context.
Raises a NotImplementedError if ALPN is not supported.
"""
if not hasattr(Security, "SSLSetALPNProtocols"):
raise NotImplementedError(
"SecureTransport supports ALPN only in macOS 10.12+"
)
self._alpn_protocols = [six.ensure_binary(p) for p in protocols]
def wrap_socket(
self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None,
):
# So, what do we do here? Firstly, we assert some properties. This is a
# stripped down shim, so there is some functionality we don't support.
# See PEP 543 for the real deal.
assert not server_side
assert do_handshake_on_connect
assert suppress_ragged_eofs
# Ok, we're good to go. Now we want to create the wrapped socket object
# and store it in the appropriate place.
wrapped_socket = WrappedSocket(sock)
# Now we can handshake
wrapped_socket.handshake(
server_hostname,
self._verify,
self._trust_bundle,
self._min_version,
self._max_version,
self._client_cert,
self._client_key,
self._client_key_passphrase,
self._alpn_protocols,
)
return wrapped_socket
| 34,416 | Python | 36.328633 | 86 | 0.634821 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/_appengine_environ.py | """
This module provides means to detect the App Engine environment.
"""
import os
def is_appengine():
return is_local_appengine() or is_prod_appengine()
def is_appengine_sandbox():
"""Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
"""
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
def is_local_appengine():
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
"SERVER_SOFTWARE", ""
).startswith("Development/")
def is_prod_appengine():
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
"SERVER_SOFTWARE", ""
).startswith("Google App Engine/")
def is_prod_appengine_mvms():
"""Deprecated."""
return False
| 957 | Python | 24.891891 | 78 | 0.6907 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/ntlmpool.py | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
import warnings
from logging import getLogger
from ntlm import ntlm
from .. import HTTPSConnectionPool
from ..packages.six.moves.http_client import HTTPSConnection
warnings.warn(
"The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
"in urllib3 v2.0 release, urllib3 is not able to support it properly due "
"to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
"If you are a user of this module please comment in the mentioned issue.",
DeprecationWarning,
)
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = "https"
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split("\\", 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug(
"Starting NTLM HTTPS connection no. %d: https://%s%s",
self.num_connections,
self.host,
self.authurl,
)
headers = {"Connection": "Keep-Alive"}
req_header = "Authorization"
resp_header = "www-authenticate"
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
self.rawuser
)
log.debug("Request headers: %s", headers)
conn.request("GET", self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.headers)
log.debug("Response status: %s %s", res.status, res.reason)
log.debug("Response headers: %s", reshdr)
log.debug("Response data: %s [...]", res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(", ")
auth_header_value = None
for s in auth_header_values:
if s[:5] == "NTLM ":
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception(
"Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
)
# Send authentication message
ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
auth_header_value
)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
)
headers[req_header] = "NTLM %s" % auth_msg
log.debug("Request headers: %s", headers)
conn.request("GET", self.authurl, None, headers)
res = conn.getresponse()
log.debug("Response status: %s %s", res.status, res.reason)
log.debug("Response headers: %s", dict(res.headers))
log.debug("Response data: %s [...]", res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception("Server rejected request: wrong username or password")
raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
res.fp = None
log.debug("Connection established")
return conn
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=3,
redirect=True,
assert_same_host=True,
):
if headers is None:
headers = {}
headers["Connection"] = "Keep-Alive"
return super(NTLMConnectionPool, self).urlopen(
method, url, body, headers, retries, redirect, assert_same_host
)
| 4,528 | Python | 33.572519 | 88 | 0.605345 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/_securetransport/low_level.py | """
Low-level helpers for the SecureTransport bindings.
These are Python functions that are not directly related to the high-level APIs
but are necessary to get them to work. They include a whole bunch of low-level
CoreFoundation messing about and memory management. The concerns in this module
are almost entirely about trying to avoid memory leaks and providing
appropriate and useful assistance to the higher-level code.
"""
import base64
import ctypes
import itertools
import os
import re
import ssl
import struct
import tempfile
from .bindings import CFConst, CoreFoundation, Security
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring):
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cfstr(py_bstr):
"""
Given a Python binary data, create a CFString.
The string must be CFReleased by the caller.
"""
c_str = ctypes.c_char_p(py_bstr)
cf_str = CoreFoundation.CFStringCreateWithCString(
CoreFoundation.kCFAllocatorDefault,
c_str,
CFConst.kCFStringEncodingUTF8,
)
return cf_str
def _create_cfstring_array(lst):
"""
Given a list of Python binary data, create an associated CFMutableArray.
The array must be CFReleased by the caller.
Raises an ssl.SSLError on failure.
"""
cf_arr = None
try:
cf_arr = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
if not cf_arr:
raise MemoryError("Unable to allocate memory!")
for item in lst:
cf_str = _cfstr(item)
if not cf_str:
raise MemoryError("Unable to allocate memory!")
try:
CoreFoundation.CFArrayAppendValue(cf_arr, cf_str)
finally:
CoreFoundation.CFRelease(cf_str)
except BaseException as e:
if cf_arr:
CoreFoundation.CFRelease(cf_arr)
raise ssl.SSLError("Unable to allocate array: %s" % (e,))
return cf_arr
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p, CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError("Error copying C string from CFStringRef")
string = buffer.value
if string is not None:
string = string.decode("utf-8")
return string
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u"":
output = u"OSStatus %s" % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
# Normalize the PEM bundle's line endings.
pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
der_certs = [
base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
raise
return cert_array
def _is_cert(item):
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _is_identity(item):
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path, len(password), password, False, None, ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(keychain, path):
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, "rb") as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array), # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(keychain, file_path)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain, certificates[0], ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj)
TLS_PROTOCOL_VERSIONS = {
"SSLv2": (0, 2),
"SSLv3": (3, 0),
"TLSv1": (3, 1),
"TLSv1.1": (3, 2),
"TLSv1.2": (3, 3),
}
def _build_tls_unknown_ca_alert(version):
"""
Builds a TLS alert record for an unknown CA.
"""
ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version]
severity_fatal = 0x02
description_unknown_ca = 0x30
msg = struct.pack(">BB", severity_fatal, description_unknown_ca)
msg_len = len(msg)
record_type_alert = 0x15
record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg
return record
| 13,922 | Python | 33.982412 | 88 | 0.659316 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/contrib/_securetransport/bindings.py | """
This module uses ctypes to bind a whole bunch of functions and constants from
SecureTransport. The goal here is to provide the low-level API to
SecureTransport. These are essentially the C-level functions and constants, and
they're pretty gross to work with.
This code is a bastardised version of the code found in Will Bond's oscrypto
library. An enormous debt is owed to him for blazing this trail for us. For
that reason, this code should be considered to be covered both by urllib3's
license and by oscrypto's:
Copyright (c) 2015-2016 Will Bond <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import platform
from ctypes import (
CDLL,
CFUNCTYPE,
POINTER,
c_bool,
c_byte,
c_char_p,
c_int32,
c_long,
c_size_t,
c_uint32,
c_ulong,
c_void_p,
)
from ctypes.util import find_library
from ...packages.six import raise_from
if platform.system() != "Darwin":
raise ImportError("Only macOS is supported")
version = platform.mac_ver()[0]
version_info = tuple(map(int, version.split(".")))
if version_info < (10, 8):
raise OSError(
"Only OS X 10.8 and newer are supported, not %s.%s"
% (version_info[0], version_info[1])
)
def load_cdll(name, macos10_16_path):
"""Loads a CDLL by name, falling back to known path on 10.16+"""
try:
# Big Sur is technically 11 but we use 10.16 due to the Big Sur
# beta being labeled as 10.16.
if version_info >= (10, 16):
path = macos10_16_path
else:
path = find_library(name)
if not path:
raise OSError # Caught and reraised as 'ImportError'
return CDLL(path, use_errno=True)
except OSError:
raise_from(ImportError("The library %s failed to load" % name), None)
Security = load_cdll(
"Security", "/System/Library/Frameworks/Security.framework/Security"
)
CoreFoundation = load_cdll(
"CoreFoundation",
"/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
)
Boolean = c_bool
CFIndex = c_long
CFStringEncoding = c_uint32
CFData = c_void_p
CFString = c_void_p
CFArray = c_void_p
CFMutableArray = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFTypeID = c_ulong
CFTypeRef = POINTER(CFType)
CFAllocatorRef = c_void_p
OSStatus = c_int32
CFDataRef = POINTER(CFData)
CFStringRef = POINTER(CFString)
CFArrayRef = POINTER(CFArray)
CFMutableArrayRef = POINTER(CFMutableArray)
CFDictionaryRef = POINTER(CFDictionary)
CFArrayCallBacks = c_void_p
CFDictionaryKeyCallBacks = c_void_p
CFDictionaryValueCallBacks = c_void_p
SecCertificateRef = POINTER(c_void_p)
SecExternalFormat = c_uint32
SecExternalItemType = c_uint32
SecIdentityRef = POINTER(c_void_p)
SecItemImportExportFlags = c_uint32
SecItemImportExportKeyParameters = c_void_p
SecKeychainRef = POINTER(c_void_p)
SSLProtocol = c_uint32
SSLCipherSuite = c_uint32
SSLContextRef = POINTER(c_void_p)
SecTrustRef = POINTER(c_void_p)
SSLConnectionRef = c_uint32
SecTrustResultType = c_uint32
SecTrustOptionFlags = c_uint32
SSLProtocolSide = c_uint32
SSLConnectionType = c_uint32
SSLSessionOption = c_uint32
try:
Security.SecItemImport.argtypes = [
CFDataRef,
CFStringRef,
POINTER(SecExternalFormat),
POINTER(SecExternalItemType),
SecItemImportExportFlags,
POINTER(SecItemImportExportKeyParameters),
SecKeychainRef,
POINTER(CFArrayRef),
]
Security.SecItemImport.restype = OSStatus
Security.SecCertificateGetTypeID.argtypes = []
Security.SecCertificateGetTypeID.restype = CFTypeID
Security.SecIdentityGetTypeID.argtypes = []
Security.SecIdentityGetTypeID.restype = CFTypeID
Security.SecKeyGetTypeID.argtypes = []
Security.SecKeyGetTypeID.restype = CFTypeID
Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
Security.SecCertificateCreateWithData.restype = SecCertificateRef
Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
Security.SecCertificateCopyData.restype = CFDataRef
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
Security.SecCopyErrorMessageString.restype = CFStringRef
Security.SecIdentityCreateWithCertificate.argtypes = [
CFTypeRef,
SecCertificateRef,
POINTER(SecIdentityRef),
]
Security.SecIdentityCreateWithCertificate.restype = OSStatus
Security.SecKeychainCreate.argtypes = [
c_char_p,
c_uint32,
c_void_p,
Boolean,
c_void_p,
POINTER(SecKeychainRef),
]
Security.SecKeychainCreate.restype = OSStatus
Security.SecKeychainDelete.argtypes = [SecKeychainRef]
Security.SecKeychainDelete.restype = OSStatus
Security.SecPKCS12Import.argtypes = [
CFDataRef,
CFDictionaryRef,
POINTER(CFArrayRef),
]
Security.SecPKCS12Import.restype = OSStatus
SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
SSLWriteFunc = CFUNCTYPE(
OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
)
Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
Security.SSLSetIOFuncs.restype = OSStatus
Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
Security.SSLSetPeerID.restype = OSStatus
Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
Security.SSLSetCertificate.restype = OSStatus
Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
Security.SSLSetCertificateAuthorities.restype = OSStatus
Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
Security.SSLSetConnection.restype = OSStatus
Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
Security.SSLSetPeerDomainName.restype = OSStatus
Security.SSLHandshake.argtypes = [SSLContextRef]
Security.SSLHandshake.restype = OSStatus
Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
Security.SSLRead.restype = OSStatus
Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
Security.SSLWrite.restype = OSStatus
Security.SSLClose.argtypes = [SSLContextRef]
Security.SSLClose.restype = OSStatus
Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
Security.SSLGetNumberSupportedCiphers.restype = OSStatus
Security.SSLGetSupportedCiphers.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite),
POINTER(c_size_t),
]
Security.SSLGetSupportedCiphers.restype = OSStatus
Security.SSLSetEnabledCiphers.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite),
c_size_t,
]
Security.SSLSetEnabledCiphers.restype = OSStatus
Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
Security.SSLGetNumberEnabledCiphers.restype = OSStatus
Security.SSLGetEnabledCiphers.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite),
POINTER(c_size_t),
]
Security.SSLGetEnabledCiphers.restype = OSStatus
Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
Security.SSLGetNegotiatedCipher.restype = OSStatus
Security.SSLGetNegotiatedProtocolVersion.argtypes = [
SSLContextRef,
POINTER(SSLProtocol),
]
Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
Security.SSLCopyPeerTrust.restype = OSStatus
Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
Security.SecTrustSetAnchorCertificates.restype = OSStatus
Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
Security.SecTrustEvaluate.restype = OSStatus
Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
Security.SecTrustGetCertificateCount.restype = CFIndex
Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
Security.SSLCreateContext.argtypes = [
CFAllocatorRef,
SSLProtocolSide,
SSLConnectionType,
]
Security.SSLCreateContext.restype = SSLContextRef
Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
Security.SSLSetSessionOption.restype = OSStatus
Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
Security.SSLSetProtocolVersionMin.restype = OSStatus
Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
Security.SSLSetProtocolVersionMax.restype = OSStatus
try:
Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
Security.SSLSetALPNProtocols.restype = OSStatus
except AttributeError:
# Supported only in 10.12+
pass
Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
Security.SecCopyErrorMessageString.restype = CFStringRef
Security.SSLReadFunc = SSLReadFunc
Security.SSLWriteFunc = SSLWriteFunc
Security.SSLContextRef = SSLContextRef
Security.SSLProtocol = SSLProtocol
Security.SSLCipherSuite = SSLCipherSuite
Security.SecIdentityRef = SecIdentityRef
Security.SecKeychainRef = SecKeychainRef
Security.SecTrustRef = SecTrustRef
Security.SecTrustResultType = SecTrustResultType
Security.SecExternalFormat = SecExternalFormat
Security.OSStatus = OSStatus
Security.kSecImportExportPassphrase = CFStringRef.in_dll(
Security, "kSecImportExportPassphrase"
)
Security.kSecImportItemIdentity = CFStringRef.in_dll(
Security, "kSecImportItemIdentity"
)
# CoreFoundation time!
CoreFoundation.CFRetain.argtypes = [CFTypeRef]
CoreFoundation.CFRetain.restype = CFTypeRef
CoreFoundation.CFRelease.argtypes = [CFTypeRef]
CoreFoundation.CFRelease.restype = None
CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
CoreFoundation.CFGetTypeID.restype = CFTypeID
CoreFoundation.CFStringCreateWithCString.argtypes = [
CFAllocatorRef,
c_char_p,
CFStringEncoding,
]
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
CoreFoundation.CFStringGetCString.argtypes = [
CFStringRef,
c_char_p,
CFIndex,
CFStringEncoding,
]
CoreFoundation.CFStringGetCString.restype = c_bool
CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
CoreFoundation.CFDataCreate.restype = CFDataRef
CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
CoreFoundation.CFDataGetLength.restype = CFIndex
CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
CoreFoundation.CFDictionaryCreate.argtypes = [
CFAllocatorRef,
POINTER(CFTypeRef),
POINTER(CFTypeRef),
CFIndex,
CFDictionaryKeyCallBacks,
CFDictionaryValueCallBacks,
]
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
CoreFoundation.CFArrayCreate.argtypes = [
CFAllocatorRef,
POINTER(CFTypeRef),
CFIndex,
CFArrayCallBacks,
]
CoreFoundation.CFArrayCreate.restype = CFArrayRef
CoreFoundation.CFArrayCreateMutable.argtypes = [
CFAllocatorRef,
CFIndex,
CFArrayCallBacks,
]
CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
CoreFoundation.CFArrayAppendValue.restype = None
CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
CoreFoundation.CFArrayGetCount.restype = CFIndex
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
CoreFoundation, "kCFAllocatorDefault"
)
CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
CoreFoundation, "kCFTypeArrayCallBacks"
)
CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
)
CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
CoreFoundation, "kCFTypeDictionaryValueCallBacks"
)
CoreFoundation.CFTypeRef = CFTypeRef
CoreFoundation.CFArrayRef = CFArrayRef
CoreFoundation.CFStringRef = CFStringRef
CoreFoundation.CFDictionaryRef = CFDictionaryRef
except (AttributeError):
raise ImportError("Error initializing ctypes")
class CFConst(object):
"""
A class object that acts as essentially a namespace for CoreFoundation
constants.
"""
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
class SecurityConst(object):
"""
A class object that acts as essentially a namespace for Security constants.
"""
kSSLSessionOptionBreakOnServerAuth = 0
kSSLProtocol2 = 1
kSSLProtocol3 = 2
kTLSProtocol1 = 4
kTLSProtocol11 = 7
kTLSProtocol12 = 8
# SecureTransport does not support TLS 1.3 even if there's a constant for it
kTLSProtocol13 = 10
kTLSProtocolMaxSupported = 999
kSSLClientSide = 1
kSSLStreamType = 0
kSecFormatPEMSequence = 10
kSecTrustResultInvalid = 0
kSecTrustResultProceed = 1
# This gap is present on purpose: this was kSecTrustResultConfirm, which
# is deprecated.
kSecTrustResultDeny = 3
kSecTrustResultUnspecified = 4
kSecTrustResultRecoverableTrustFailure = 5
kSecTrustResultFatalTrustFailure = 6
kSecTrustResultOtherError = 7
errSSLProtocol = -9800
errSSLWouldBlock = -9803
errSSLClosedGraceful = -9805
errSSLClosedNoNotify = -9816
errSSLClosedAbort = -9806
errSSLXCertChainInvalid = -9807
errSSLCrypto = -9809
errSSLInternal = -9810
errSSLCertExpired = -9814
errSSLCertNotYetValid = -9815
errSSLUnknownRootCert = -9812
errSSLNoRootCert = -9813
errSSLHostNameMismatch = -9843
errSSLPeerHandshakeFail = -9824
errSSLPeerUserCancelled = -9839
errSSLWeakPeerEphemeralDHKey = -9850
errSSLServerAuthCompleted = -9841
errSSLRecordOverflow = -9847
errSecVerifyFailed = -67808
errSecNoTrustSettings = -25263
errSecItemNotFound = -25300
errSecInvalidTrustSettings = -25262
# Cipher suites. We only pick the ones our default cipher string allows.
# Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
TLS_AES_128_GCM_SHA256 = 0x1301
TLS_AES_256_GCM_SHA384 = 0x1302
TLS_AES_128_CCM_8_SHA256 = 0x1305
TLS_AES_128_CCM_SHA256 = 0x1304
| 17,632 | Python | 32.909615 | 96 | 0.735311 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/packages/backports/makefile.py | # -*- coding: utf-8 -*-
"""
backports.makefile
~~~~~~~~~~~~~~~~~~
Backports the Python 3 ``socket.makefile`` method for use with anything that
wants to create a "fake" socket object.
"""
import io
from socket import SocketIO
def backport_makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
| 1,417 | Python | 26.26923 | 76 | 0.605505 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/urllib3/packages/backports/weakref_finalize.py | # -*- coding: utf-8 -*-
"""
backports.weakref_finalize
~~~~~~~~~~~~~~~~~~
Backports the Python 3 ``weakref.finalize`` method.
"""
from __future__ import absolute_import
import itertools
import sys
from weakref import ref
__all__ = ["weakref_finalize"]
class weakref_finalize(object):
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info(object):
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
weakref_finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
weakref_finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return "<%s object at %#x; dead>" % (type(self).__name__, id(self))
else:
return "<%s object at %#x; for %r at %#x>" % (
type(self).__name__,
id(self),
type(obj).__name__,
id(obj),
)
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f, i) for (f, i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item: item[1].index)
return [f for (f, i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or weakref_finalize._dirty:
pending = cls._select_for_exit()
weakref_finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
weakref_finalize._shutdown = True
if reenable_gc:
gc.enable()
| 5,343 | Python | 33.25641 | 79 | 0.551563 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/frozenlist/__init__.py | import os
import sys
import types
from collections.abc import MutableSequence
from functools import total_ordering
from typing import Tuple, Type
__version__ = "1.3.3"
__all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...]
NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool
@total_ordering
class FrozenList(MutableSequence):
__slots__ = ("_frozen", "_items")
if sys.version_info >= (3, 9):
__class_getitem__ = classmethod(types.GenericAlias)
else:
@classmethod
def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]:
return cls
def __init__(self, items=None):
self._frozen = False
if items is not None:
items = list(items)
else:
items = []
self._items = items
@property
def frozen(self):
return self._frozen
def freeze(self):
self._frozen = True
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
self._items[index] = value
def __delitem__(self, index):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
del self._items[index]
def __len__(self):
return self._items.__len__()
def __iter__(self):
return self._items.__iter__()
def __reversed__(self):
return self._items.__reversed__()
def __eq__(self, other):
return list(self) == other
def __le__(self, other):
return list(self) <= other
def insert(self, pos, item):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
self._items.insert(pos, item)
def __repr__(self):
return f"<FrozenList(frozen={self._frozen}, {self._items!r})>"
def __hash__(self):
if self._frozen:
return hash(tuple(self))
else:
raise RuntimeError("Cannot hash unfrozen list.")
PyFrozenList = FrozenList
try:
from ._frozenlist import FrozenList as CFrozenList # type: ignore
if not NO_EXTENSIONS: # pragma: no cover
FrozenList = CFrozenList # type: ignore
except ImportError: # pragma: no cover
pass
| 2,323 | Python | 22.958763 | 78 | 0.583297 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/_version.py | # This file MUST NOT contain anything but the __version__ assignment.
#
# When making a release, change the value of __version__
# to an appropriate value, and open a pull request against
# the correct branch (master if making a new feature release).
# The commit message MUST contain a properly formatted release
# log, and the commit must be signed.
#
# The release automation will: build and test the packages for the
# supported platforms, publish the packages on PyPI, merge the PR
# to the target branch, create a Git tag pointing to the commit.
__version__ = '0.4.0'
| 575 | Python | 40.142854 | 69 | 0.749565 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/__init__.py | from . import parser
from .parser import * # NOQA
from ._version import __version__ # NOQA
__all__ = parser.__all__ + ('__version__',) # NOQA
| 147 | Python | 20.142854 | 51 | 0.591837 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/parser/__init__.py | from .parser import * # NoQA
from .errors import * # NoQA
from .url_parser import * # NoQA
__all__ = parser.__all__ + errors.__all__ + url_parser.__all__ # NoQA
| 166 | Python | 26.833329 | 70 | 0.60241 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/httptools/parser/errors.py | __all__ = ('HttpParserError',
'HttpParserCallbackError',
'HttpParserInvalidStatusError',
'HttpParserInvalidMethodError',
'HttpParserInvalidURLError',
'HttpParserUpgrade')
class HttpParserError(Exception):
pass
class HttpParserCallbackError(HttpParserError):
pass
class HttpParserInvalidStatusError(HttpParserError):
pass
class HttpParserInvalidMethodError(HttpParserError):
pass
class HttpParserInvalidURLError(HttpParserError):
pass
class HttpParserUpgrade(Exception):
pass
| 566 | Python | 17.290322 | 52 | 0.719081 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/_build_tables.py | #-----------------------------------------------------------------
# pycparser: _build_tables.py
#
# A dummy for generating the lexing/parsing tables and and
# compiling them into .pyc for faster execution in optimized mode.
# Also generates AST code from the configuration file.
# Should be called from the pycparser directory.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
# Insert '.' and '..' as first entries to the search path for modules.
# Restricted environments like embeddable python do not include the
# current working directory on startup.
import sys
sys.path[0:0] = ['.', '..']
# Generate c_ast.py
from _ast_gen import ASTCodeGenerator
ast_gen = ASTCodeGenerator('_c_ast.cfg')
ast_gen.generate(open('c_ast.py', 'w'))
from pycparser import c_parser
# Generates the tables
#
c_parser.CParser(
lex_optimize=True,
yacc_debug=False,
yacc_optimize=True)
# Load to compile into .pyc
#
import lextab
import yacctab
import c_ast
| 1,039 | Python | 26.36842 | 70 | 0.639076 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/_ast_gen.py | #-----------------------------------------------------------------
# _ast_gen.py
#
# Generates the AST Node classes from a specification given in
# a configuration file
#
# The design of this module was inspired by astgen.py from the
# Python 2.5 code-base.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
from string import Template
class ASTCodeGenerator(object):
def __init__(self, cfg_filename='_c_ast.cfg'):
""" Initialize the code generator from a configuration
file.
"""
self.cfg_filename = cfg_filename
self.node_cfg = [NodeCfg(name, contents)
for (name, contents) in self.parse_cfgfile(cfg_filename)]
def generate(self, file=None):
""" Generates the code into file, an open file buffer.
"""
src = Template(_PROLOGUE_COMMENT).substitute(
cfg_filename=self.cfg_filename)
src += _PROLOGUE_CODE
for node_cfg in self.node_cfg:
src += node_cfg.generate_source() + '\n\n'
file.write(src)
def parse_cfgfile(self, filename):
""" Parse the configuration file and yield pairs of
(name, contents) for each node.
"""
with open(filename, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
colon_i = line.find(':')
lbracket_i = line.find('[')
rbracket_i = line.find(']')
if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i:
raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line))
name = line[:colon_i]
val = line[lbracket_i + 1:rbracket_i]
vallist = [v.strip() for v in val.split(',')] if val else []
yield name, vallist
class NodeCfg(object):
""" Node configuration.
name: node name
contents: a list of contents - attributes and child nodes
See comment at the top of the configuration file for details.
"""
def __init__(self, name, contents):
self.name = name
self.all_entries = []
self.attr = []
self.child = []
self.seq_child = []
for entry in contents:
clean_entry = entry.rstrip('*')
self.all_entries.append(clean_entry)
if entry.endswith('**'):
self.seq_child.append(clean_entry)
elif entry.endswith('*'):
self.child.append(clean_entry)
else:
self.attr.append(entry)
def generate_source(self):
src = self._gen_init()
src += '\n' + self._gen_children()
src += '\n' + self._gen_iter()
src += '\n' + self._gen_attr_names()
return src
def _gen_init(self):
src = "class %s(Node):\n" % self.name
if self.all_entries:
args = ', '.join(self.all_entries)
slots = ', '.join("'{0}'".format(e) for e in self.all_entries)
slots += ", 'coord', '__weakref__'"
arglist = '(self, %s, coord=None)' % args
else:
slots = "'coord', '__weakref__'"
arglist = '(self, coord=None)'
src += " __slots__ = (%s)\n" % slots
src += " def __init__%s:\n" % arglist
for name in self.all_entries + ['coord']:
src += " self.%s = %s\n" % (name, name)
return src
def _gen_children(self):
src = ' def children(self):\n'
if self.all_entries:
src += ' nodelist = []\n'
for child in self.child:
src += (
' if self.%(child)s is not None:' +
' nodelist.append(("%(child)s", self.%(child)s))\n') % (
dict(child=child))
for seq_child in self.seq_child:
src += (
' for i, child in enumerate(self.%(child)s or []):\n'
' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % (
dict(child=seq_child))
src += ' return tuple(nodelist)\n'
else:
src += ' return ()\n'
return src
def _gen_iter(self):
src = ' def __iter__(self):\n'
if self.all_entries:
for child in self.child:
src += (
' if self.%(child)s is not None:\n' +
' yield self.%(child)s\n') % (dict(child=child))
for seq_child in self.seq_child:
src += (
' for child in (self.%(child)s or []):\n'
' yield child\n') % (dict(child=seq_child))
if not (self.child or self.seq_child):
# Empty generator
src += (
' return\n' +
' yield\n')
else:
# Empty generator
src += (
' return\n' +
' yield\n')
return src
def _gen_attr_names(self):
src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')'
return src
_PROLOGUE_COMMENT = \
r'''#-----------------------------------------------------------------
# ** ATTENTION **
# This code was automatically generated from the file:
# $cfg_filename
#
# Do not modify it directly. Modify the configuration file and
# run the generator again.
# ** ** *** ** **
#
# pycparser: c_ast.py
#
# AST Node classes.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
'''
_PROLOGUE_CODE = r'''
import sys
def _repr(obj):
"""
Get the representation of an object, with dedicated pprint-like format for lists.
"""
if isinstance(obj, list):
return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
else:
return repr(obj)
class Node(object):
__slots__ = ()
""" Abstract base class for AST nodes.
"""
def __repr__(self):
""" Generates a python representation of the current node
"""
result = self.__class__.__name__ + '('
indent = ''
separator = ''
for name in self.__slots__[:-2]:
result += separator
result += indent
result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
separator = ','
indent = '\n ' + (' ' * len(self.__class__.__name__))
result += indent + ')'
return result
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
_method_cache = None
def visit(self, node):
""" Visit a node.
"""
if self._method_cache is None:
self._method_cache = {}
visitor = self._method_cache.get(node.__class__.__name__, None)
if visitor is None:
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
self._method_cache[node.__class__.__name__] = visitor
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c in node:
self.visit(c)
'''
| 10,555 | Python | 30.323442 | 138 | 0.484889 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_ast.py | #-----------------------------------------------------------------
# ** ATTENTION **
# This code was automatically generated from the file:
# _c_ast.cfg
#
# Do not modify it directly. Modify the configuration file and
# run the generator again.
# ** ** *** ** **
#
# pycparser: c_ast.py
#
# AST Node classes.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
import sys
def _repr(obj):
"""
Get the representation of an object, with dedicated pprint-like format for lists.
"""
if isinstance(obj, list):
return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
else:
return repr(obj)
class Node(object):
__slots__ = ()
""" Abstract base class for AST nodes.
"""
def __repr__(self):
""" Generates a python representation of the current node
"""
result = self.__class__.__name__ + '('
indent = ''
separator = ''
for name in self.__slots__[:-2]:
result += separator
result += indent
result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
separator = ','
indent = '\n ' + (' ' * len(self.__class__.__name__))
result += indent + ')'
return result
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
_method_cache = None
def visit(self, node):
""" Visit a node.
"""
if self._method_cache is None:
self._method_cache = {}
visitor = self._method_cache.get(node.__class__.__name__, None)
if visitor is None:
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
self._method_cache[node.__class__.__name__] = visitor
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c in node:
self.visit(c)
class ArrayDecl(Node):
__slots__ = ('type', 'dim', 'dim_quals', 'coord', '__weakref__')
def __init__(self, type, dim, dim_quals, coord=None):
self.type = type
self.dim = dim
self.dim_quals = dim_quals
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.dim is not None: nodelist.append(("dim", self.dim))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
if self.dim is not None:
yield self.dim
attr_names = ('dim_quals', )
class ArrayRef(Node):
__slots__ = ('name', 'subscript', 'coord', '__weakref__')
def __init__(self, name, subscript, coord=None):
self.name = name
self.subscript = subscript
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.subscript is not None: nodelist.append(("subscript", self.subscript))
return tuple(nodelist)
def __iter__(self):
if self.name is not None:
yield self.name
if self.subscript is not None:
yield self.subscript
attr_names = ()
class Assignment(Node):
__slots__ = ('op', 'lvalue', 'rvalue', 'coord', '__weakref__')
def __init__(self, op, lvalue, rvalue, coord=None):
self.op = op
self.lvalue = lvalue
self.rvalue = rvalue
self.coord = coord
def children(self):
nodelist = []
if self.lvalue is not None: nodelist.append(("lvalue", self.lvalue))
if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue))
return tuple(nodelist)
def __iter__(self):
if self.lvalue is not None:
yield self.lvalue
if self.rvalue is not None:
yield self.rvalue
attr_names = ('op', )
class Alignas(Node):
__slots__ = ('alignment', 'coord', '__weakref__')
def __init__(self, alignment, coord=None):
self.alignment = alignment
self.coord = coord
def children(self):
nodelist = []
if self.alignment is not None: nodelist.append(("alignment", self.alignment))
return tuple(nodelist)
def __iter__(self):
if self.alignment is not None:
yield self.alignment
attr_names = ()
class BinaryOp(Node):
__slots__ = ('op', 'left', 'right', 'coord', '__weakref__')
def __init__(self, op, left, right, coord=None):
self.op = op
self.left = left
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.left is not None: nodelist.append(("left", self.left))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
def __iter__(self):
if self.left is not None:
yield self.left
if self.right is not None:
yield self.right
attr_names = ('op', )
class Break(Node):
__slots__ = ('coord', '__weakref__')
def __init__(self, coord=None):
self.coord = coord
def children(self):
return ()
def __iter__(self):
return
yield
attr_names = ()
class Case(Node):
__slots__ = ('expr', 'stmts', 'coord', '__weakref__')
def __init__(self, expr, stmts, coord=None):
self.expr = expr
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
if self.expr is not None:
yield self.expr
for child in (self.stmts or []):
yield child
attr_names = ()
class Cast(Node):
__slots__ = ('to_type', 'expr', 'coord', '__weakref__')
def __init__(self, to_type, expr, coord=None):
self.to_type = to_type
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.to_type is not None: nodelist.append(("to_type", self.to_type))
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
def __iter__(self):
if self.to_type is not None:
yield self.to_type
if self.expr is not None:
yield self.expr
attr_names = ()
class Compound(Node):
__slots__ = ('block_items', 'coord', '__weakref__')
def __init__(self, block_items, coord=None):
self.block_items = block_items
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.block_items or []):
nodelist.append(("block_items[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.block_items or []):
yield child
attr_names = ()
class CompoundLiteral(Node):
__slots__ = ('type', 'init', 'coord', '__weakref__')
def __init__(self, type, init, coord=None):
self.type = type
self.init = init
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.init is not None: nodelist.append(("init", self.init))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
if self.init is not None:
yield self.init
attr_names = ()
class Constant(Node):
__slots__ = ('type', 'value', 'coord', '__weakref__')
def __init__(self, type, value, coord=None):
self.type = type
self.value = value
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
def __iter__(self):
return
yield
attr_names = ('type', 'value', )
class Continue(Node):
__slots__ = ('coord', '__weakref__')
def __init__(self, coord=None):
self.coord = coord
def children(self):
return ()
def __iter__(self):
return
yield
attr_names = ()
class Decl(Node):
__slots__ = ('name', 'quals', 'align', 'storage', 'funcspec', 'type', 'init', 'bitsize', 'coord', '__weakref__')
def __init__(self, name, quals, align, storage, funcspec, type, init, bitsize, coord=None):
self.name = name
self.quals = quals
self.align = align
self.storage = storage
self.funcspec = funcspec
self.type = type
self.init = init
self.bitsize = bitsize
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
if self.init is not None: nodelist.append(("init", self.init))
if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
if self.init is not None:
yield self.init
if self.bitsize is not None:
yield self.bitsize
attr_names = ('name', 'quals', 'align', 'storage', 'funcspec', )
class DeclList(Node):
__slots__ = ('decls', 'coord', '__weakref__')
def __init__(self, decls, coord=None):
self.decls = decls
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.decls or []):
nodelist.append(("decls[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.decls or []):
yield child
attr_names = ()
class Default(Node):
__slots__ = ('stmts', 'coord', '__weakref__')
def __init__(self, stmts, coord=None):
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.stmts or []):
yield child
attr_names = ()
class DoWhile(Node):
__slots__ = ('cond', 'stmt', 'coord', '__weakref__')
def __init__(self, cond, stmt, coord=None):
self.cond = cond
self.stmt = stmt
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
def __iter__(self):
if self.cond is not None:
yield self.cond
if self.stmt is not None:
yield self.stmt
attr_names = ()
class EllipsisParam(Node):
__slots__ = ('coord', '__weakref__')
def __init__(self, coord=None):
self.coord = coord
def children(self):
return ()
def __iter__(self):
return
yield
attr_names = ()
class EmptyStatement(Node):
__slots__ = ('coord', '__weakref__')
def __init__(self, coord=None):
self.coord = coord
def children(self):
return ()
def __iter__(self):
return
yield
attr_names = ()
class Enum(Node):
__slots__ = ('name', 'values', 'coord', '__weakref__')
def __init__(self, name, values, coord=None):
self.name = name
self.values = values
self.coord = coord
def children(self):
nodelist = []
if self.values is not None: nodelist.append(("values", self.values))
return tuple(nodelist)
def __iter__(self):
if self.values is not None:
yield self.values
attr_names = ('name', )
class Enumerator(Node):
__slots__ = ('name', 'value', 'coord', '__weakref__')
def __init__(self, name, value, coord=None):
self.name = name
self.value = value
self.coord = coord
def children(self):
nodelist = []
if self.value is not None: nodelist.append(("value", self.value))
return tuple(nodelist)
def __iter__(self):
if self.value is not None:
yield self.value
attr_names = ('name', )
class EnumeratorList(Node):
__slots__ = ('enumerators', 'coord', '__weakref__')
def __init__(self, enumerators, coord=None):
self.enumerators = enumerators
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.enumerators or []):
nodelist.append(("enumerators[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.enumerators or []):
yield child
attr_names = ()
class ExprList(Node):
__slots__ = ('exprs', 'coord', '__weakref__')
def __init__(self, exprs, coord=None):
self.exprs = exprs
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.exprs or []):
nodelist.append(("exprs[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.exprs or []):
yield child
attr_names = ()
class FileAST(Node):
__slots__ = ('ext', 'coord', '__weakref__')
def __init__(self, ext, coord=None):
self.ext = ext
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.ext or []):
nodelist.append(("ext[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.ext or []):
yield child
attr_names = ()
class For(Node):
__slots__ = ('init', 'cond', 'next', 'stmt', 'coord', '__weakref__')
def __init__(self, init, cond, next, stmt, coord=None):
self.init = init
self.cond = cond
self.next = next
self.stmt = stmt
self.coord = coord
def children(self):
nodelist = []
if self.init is not None: nodelist.append(("init", self.init))
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.next is not None: nodelist.append(("next", self.next))
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
def __iter__(self):
if self.init is not None:
yield self.init
if self.cond is not None:
yield self.cond
if self.next is not None:
yield self.next
if self.stmt is not None:
yield self.stmt
attr_names = ()
class FuncCall(Node):
__slots__ = ('name', 'args', 'coord', '__weakref__')
def __init__(self, name, args, coord=None):
self.name = name
self.args = args
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
def __iter__(self):
if self.name is not None:
yield self.name
if self.args is not None:
yield self.args
attr_names = ()
class FuncDecl(Node):
__slots__ = ('args', 'type', 'coord', '__weakref__')
def __init__(self, args, type, coord=None):
self.args = args
self.type = type
self.coord = coord
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
def __iter__(self):
if self.args is not None:
yield self.args
if self.type is not None:
yield self.type
attr_names = ()
class FuncDef(Node):
__slots__ = ('decl', 'param_decls', 'body', 'coord', '__weakref__')
def __init__(self, decl, param_decls, body, coord=None):
self.decl = decl
self.param_decls = param_decls
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.decl is not None: nodelist.append(("decl", self.decl))
if self.body is not None: nodelist.append(("body", self.body))
for i, child in enumerate(self.param_decls or []):
nodelist.append(("param_decls[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
if self.decl is not None:
yield self.decl
if self.body is not None:
yield self.body
for child in (self.param_decls or []):
yield child
attr_names = ()
class Goto(Node):
__slots__ = ('name', 'coord', '__weakref__')
def __init__(self, name, coord=None):
self.name = name
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
def __iter__(self):
return
yield
attr_names = ('name', )
class ID(Node):
__slots__ = ('name', 'coord', '__weakref__')
def __init__(self, name, coord=None):
self.name = name
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
def __iter__(self):
return
yield
attr_names = ('name', )
class IdentifierType(Node):
__slots__ = ('names', 'coord', '__weakref__')
def __init__(self, names, coord=None):
self.names = names
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
def __iter__(self):
return
yield
attr_names = ('names', )
class If(Node):
__slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__')
def __init__(self, cond, iftrue, iffalse, coord=None):
self.cond = cond
self.iftrue = iftrue
self.iffalse = iffalse
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue))
if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
return tuple(nodelist)
def __iter__(self):
if self.cond is not None:
yield self.cond
if self.iftrue is not None:
yield self.iftrue
if self.iffalse is not None:
yield self.iffalse
attr_names = ()
class InitList(Node):
__slots__ = ('exprs', 'coord', '__weakref__')
def __init__(self, exprs, coord=None):
self.exprs = exprs
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.exprs or []):
nodelist.append(("exprs[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.exprs or []):
yield child
attr_names = ()
class Label(Node):
__slots__ = ('name', 'stmt', 'coord', '__weakref__')
def __init__(self, name, stmt, coord=None):
self.name = name
self.stmt = stmt
self.coord = coord
def children(self):
nodelist = []
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
def __iter__(self):
if self.stmt is not None:
yield self.stmt
attr_names = ('name', )
class NamedInitializer(Node):
__slots__ = ('name', 'expr', 'coord', '__weakref__')
def __init__(self, name, expr, coord=None):
self.name = name
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
for i, child in enumerate(self.name or []):
nodelist.append(("name[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
if self.expr is not None:
yield self.expr
for child in (self.name or []):
yield child
attr_names = ()
class ParamList(Node):
__slots__ = ('params', 'coord', '__weakref__')
def __init__(self, params, coord=None):
self.params = params
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.params or []):
nodelist.append(("params[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.params or []):
yield child
attr_names = ()
class PtrDecl(Node):
__slots__ = ('quals', 'type', 'coord', '__weakref__')
def __init__(self, quals, type, coord=None):
self.quals = quals
self.type = type
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
attr_names = ('quals', )
class Return(Node):
__slots__ = ('expr', 'coord', '__weakref__')
def __init__(self, expr, coord=None):
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
def __iter__(self):
if self.expr is not None:
yield self.expr
attr_names = ()
class StaticAssert(Node):
__slots__ = ('cond', 'message', 'coord', '__weakref__')
def __init__(self, cond, message, coord=None):
self.cond = cond
self.message = message
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.message is not None: nodelist.append(("message", self.message))
return tuple(nodelist)
def __iter__(self):
if self.cond is not None:
yield self.cond
if self.message is not None:
yield self.message
attr_names = ()
class Struct(Node):
__slots__ = ('name', 'decls', 'coord', '__weakref__')
def __init__(self, name, decls, coord=None):
self.name = name
self.decls = decls
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.decls or []):
nodelist.append(("decls[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.decls or []):
yield child
attr_names = ('name', )
class StructRef(Node):
__slots__ = ('name', 'type', 'field', 'coord', '__weakref__')
def __init__(self, name, type, field, coord=None):
self.name = name
self.type = type
self.field = field
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.field is not None: nodelist.append(("field", self.field))
return tuple(nodelist)
def __iter__(self):
if self.name is not None:
yield self.name
if self.field is not None:
yield self.field
attr_names = ('type', )
class Switch(Node):
__slots__ = ('cond', 'stmt', 'coord', '__weakref__')
def __init__(self, cond, stmt, coord=None):
self.cond = cond
self.stmt = stmt
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
def __iter__(self):
if self.cond is not None:
yield self.cond
if self.stmt is not None:
yield self.stmt
attr_names = ()
class TernaryOp(Node):
__slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__')
def __init__(self, cond, iftrue, iffalse, coord=None):
self.cond = cond
self.iftrue = iftrue
self.iffalse = iffalse
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue))
if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
return tuple(nodelist)
def __iter__(self):
if self.cond is not None:
yield self.cond
if self.iftrue is not None:
yield self.iftrue
if self.iffalse is not None:
yield self.iffalse
attr_names = ()
class TypeDecl(Node):
__slots__ = ('declname', 'quals', 'align', 'type', 'coord', '__weakref__')
def __init__(self, declname, quals, align, type, coord=None):
self.declname = declname
self.quals = quals
self.align = align
self.type = type
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
attr_names = ('declname', 'quals', 'align', )
class Typedef(Node):
__slots__ = ('name', 'quals', 'storage', 'type', 'coord', '__weakref__')
def __init__(self, name, quals, storage, type, coord=None):
self.name = name
self.quals = quals
self.storage = storage
self.type = type
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
attr_names = ('name', 'quals', 'storage', )
class Typename(Node):
__slots__ = ('name', 'quals', 'align', 'type', 'coord', '__weakref__')
def __init__(self, name, quals, align, type, coord=None):
self.name = name
self.quals = quals
self.align = align
self.type = type
self.coord = coord
def children(self):
nodelist = []
if self.type is not None: nodelist.append(("type", self.type))
return tuple(nodelist)
def __iter__(self):
if self.type is not None:
yield self.type
attr_names = ('name', 'quals', 'align', )
class UnaryOp(Node):
__slots__ = ('op', 'expr', 'coord', '__weakref__')
def __init__(self, op, expr, coord=None):
self.op = op
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
def __iter__(self):
if self.expr is not None:
yield self.expr
attr_names = ('op', )
class Union(Node):
__slots__ = ('name', 'decls', 'coord', '__weakref__')
def __init__(self, name, decls, coord=None):
self.name = name
self.decls = decls
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.decls or []):
nodelist.append(("decls[%d]" % i, child))
return tuple(nodelist)
def __iter__(self):
for child in (self.decls or []):
yield child
attr_names = ('name', )
class While(Node):
__slots__ = ('cond', 'stmt', 'coord', '__weakref__')
def __init__(self, cond, stmt, coord=None):
self.cond = cond
self.stmt = stmt
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.stmt is not None: nodelist.append(("stmt", self.stmt))
return tuple(nodelist)
def __iter__(self):
if self.cond is not None:
yield self.cond
if self.stmt is not None:
yield self.stmt
attr_names = ()
class Pragma(Node):
__slots__ = ('string', 'coord', '__weakref__')
def __init__(self, string, coord=None):
self.string = string
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
def __iter__(self):
return
yield
attr_names = ('string', )
| 31,445 | Python | 26.927176 | 138 | 0.536747 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/__init__.py | #-----------------------------------------------------------------
# pycparser: __init__.py
#
# This package file exports some convenience functions for
# interacting with pycparser
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '2.21'
import io
from subprocess import check_output
from .c_parser import CParser
def preprocess_file(filename, cpp_path='cpp', cpp_args=''):
""" Preprocess a file using cpp.
filename:
Name of the file you want to preprocess.
cpp_path:
cpp_args:
Refer to the documentation of parse_file for the meaning of these
arguments.
When successful, returns the preprocessed file's contents.
Errors from cpp will be printed out.
"""
path_list = [cpp_path]
if isinstance(cpp_args, list):
path_list += cpp_args
elif cpp_args != '':
path_list += [cpp_args]
path_list += [filename]
try:
# Note the use of universal_newlines to treat all newlines
# as \n for Python's purpose
text = check_output(path_list, universal_newlines=True)
except OSError as e:
raise RuntimeError("Unable to invoke 'cpp'. " +
'Make sure its path was passed correctly\n' +
('Original error: %s' % e))
return text
def parse_file(filename, use_cpp=False, cpp_path='cpp', cpp_args='',
parser=None):
""" Parse a C file using pycparser.
filename:
Name of the file you want to parse.
use_cpp:
Set to True if you want to execute the C pre-processor
on the file prior to parsing it.
cpp_path:
If use_cpp is True, this is the path to 'cpp' on your
system. If no path is provided, it attempts to just
execute 'cpp', so it must be in your PATH.
cpp_args:
If use_cpp is True, set this to the command line arguments strings
to cpp. Be careful with quotes - it's best to pass a raw string
(r'') here. For example:
r'-I../utils/fake_libc_include'
If several arguments are required, pass a list of strings.
parser:
Optional parser object to be used instead of the default CParser
When successful, an AST is returned. ParseError can be
thrown if the file doesn't parse successfully.
Errors from cpp will be printed out.
"""
if use_cpp:
text = preprocess_file(filename, cpp_path, cpp_args)
else:
with io.open(filename) as f:
text = f.read()
if parser is None:
parser = CParser()
return parser.parse(text, filename)
| 2,815 | Python | 29.945055 | 78 | 0.572647 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_lexer.py | #------------------------------------------------------------------------------
# pycparser: c_lexer.py
#
# CLexer class: lexer for the C language
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import lex
from .ply.lex import TOKEN
class CLexer(object):
""" A lexer for the C language. After building it, set the
input text with input(), and call token() to get new
tokens.
The public attribute filename can be set to an initial
filename, but the lexer will update it upon #line
directives.
"""
def __init__(self, error_func, on_lbrace_func, on_rbrace_func,
type_lookup_func):
""" Create a new Lexer.
error_func:
An error function. Will be called with an error
message, line and column as arguments, in case of
an error during lexing.
on_lbrace_func, on_rbrace_func:
Called when an LBRACE or RBRACE is encountered
(likely to push/pop type_lookup_func's scope)
type_lookup_func:
A type lookup function. Given a string, it must
return True IFF this string is a name of a type
that was defined with a typedef earlier.
"""
self.error_func = error_func
self.on_lbrace_func = on_lbrace_func
self.on_rbrace_func = on_rbrace_func
self.type_lookup_func = type_lookup_func
self.filename = ''
# Keeps track of the last token returned from self.token()
self.last_token = None
# Allow either "# line" or "# <num>" to support GCC's
# cpp output
#
self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)')
self.pragma_pattern = re.compile(r'[ \t]*pragma\W')
def build(self, **kwargs):
""" Builds the lexer from the specification. Must be
called after the lexer object is created.
This method exists separately, because the PLY
manual warns against calling lex.lex inside
__init__
"""
self.lexer = lex.lex(object=self, **kwargs)
def reset_lineno(self):
""" Resets the internal line number counter of the lexer.
"""
self.lexer.lineno = 1
def input(self, text):
self.lexer.input(text)
def token(self):
self.last_token = self.lexer.token()
return self.last_token
def find_tok_column(self, token):
""" Find the column of the token in its line.
"""
last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos)
return token.lexpos - last_cr
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _make_tok_location(self, token):
return (token.lineno, self.find_tok_column(token))
##
## Reserved keywords
##
keywords = (
'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST',
'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG',
'REGISTER', 'OFFSETOF',
'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
'VOLATILE', 'WHILE', '__INT128',
)
keywords_new = (
'_BOOL', '_COMPLEX',
'_NORETURN', '_THREAD_LOCAL', '_STATIC_ASSERT',
'_ATOMIC', '_ALIGNOF', '_ALIGNAS',
)
keyword_map = {}
for keyword in keywords:
keyword_map[keyword.lower()] = keyword
for keyword in keywords_new:
keyword_map[keyword[:2].upper() + keyword[2:].lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + keywords_new + (
# Identifiers
'ID',
# Type identifiers (identifiers previously defined as
# types with typedef)
'TYPEID',
# constants
'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN', 'INT_CONST_CHAR',
'FLOAT_CONST', 'HEX_FLOAT_CONST',
'CHAR_CONST',
'WCHAR_CONST',
'U8CHAR_CONST',
'U16CHAR_CONST',
'U32CHAR_CONST',
# String literals
'STRING_LITERAL',
'WSTRING_LITERAL',
'U8STRING_LITERAL',
'U16STRING_LITERAL',
'U32STRING_LITERAL',
# Operators
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL',
'OREQUAL',
# Increment/decrement
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Conditional operator (?)
'CONDOP',
# Delimiters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'COMMA', 'PERIOD', # . ,
'SEMI', 'COLON', # ; :
# Ellipsis (...)
'ELLIPSIS',
# pre-processor
'PPHASH', # '#'
'PPPRAGMA', # 'pragma'
'PPPRAGMASTR',
)
##
## Regexes for use in tokens
##
##
# valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers)
identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
bin_prefix = '0[bB]'
bin_digits = '[01]+'
# integer constants (K&R2: A.2.5.1)
integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?'
decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
octal_constant = '0[0-7]*'+integer_suffix_opt
hex_constant = hex_prefix+hex_digits+integer_suffix_opt
bin_constant = bin_prefix+bin_digits+integer_suffix_opt
bad_octal_constant = '0[0-7]*[89]'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
# The original regexes were taken verbatim from the C syntax definition,
# and were later modified to avoid worst-case exponential running time.
#
# simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
# decimal_escape = r"""(\d+)"""
# hex_escape = r"""(x[0-9a-fA-F]+)"""
# bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
#
# The following modifications were made to avoid the ambiguity that allowed backtracking:
# (https://github.com/eliben/pycparser/issues/61)
#
# - \x was removed from simple_escape, unless it was not followed by a hex digit, to avoid ambiguity with hex_escape.
# - hex_escape allows one or more hex characters, but requires that the next character(if any) is not hex
# - decimal_escape allows one or more decimal characters, but requires that the next character(if any) is not a decimal
# - bad_escape does not allow any decimals (8-9), to avoid conflicting with the permissive decimal_escape.
#
# Without this change, python's `re` module would recursively try parsing each ambiguous escape sequence in multiple ways.
# e.g. `\123` could be parsed as `\1`+`23`, `\12`+`3`, and `\123`.
simple_escape = r"""([a-wyzA-Z._~!=&\^\-\\?'"]|x(?![0-9a-fA-F]))"""
decimal_escape = r"""(\d+)(?!\d)"""
hex_escape = r"""(x[0-9a-fA-F]+)(?![0-9a-fA-F])"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-9])"""
escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
# This complicated regex with lookahead might be slow for strings, so because all of the valid escapes (including \x) allowed
# 0 or more non-escaped characters after the first character, simple_escape+decimal_escape+hex_escape got simplified to
escape_sequence_start_in_string = r"""(\\[0-9a-zA-Z._~!=&\^\-\\?'"])"""
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
char_const = "'"+cconst_char+"'"
wchar_const = 'L'+char_const
u8char_const = 'u8'+char_const
u16char_const = 'u'+char_const
u32char_const = 'U'+char_const
multicharacter_constant = "'"+cconst_char+"{2,4}'"
unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')"""
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence_start_in_string+')'
string_literal = '"'+string_char+'*"'
wstring_literal = 'L'+string_literal
u8string_literal = 'u8'+string_literal
u16string_literal = 'u'+string_literal
u32string_literal = 'U'+string_literal
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)'
binary_exponent_part = r'''([pP][+-]?[0-9]+)'''
hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))"""
hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)'
##
## Lexer states: used for preprocessor \n-terminated directives
##
states = (
# ppline: preprocessor line directives
#
('ppline', 'exclusive'),
# pppragma: pragma
#
('pppragma', 'exclusive'),
)
def t_PPHASH(self, t):
r'[ \t]*\#'
if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
t.lexer.begin('ppline')
self.pp_line = self.pp_filename = None
elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
t.lexer.begin('pppragma')
else:
t.type = 'PPHASH'
return t
##
## Rules for the ppline state
##
@TOKEN(string_literal)
def t_ppline_FILENAME(self, t):
if self.pp_line is None:
self._error('filename before line number in #line', t)
else:
self.pp_filename = t.value.lstrip('"').rstrip('"')
@TOKEN(decimal_constant)
def t_ppline_LINE_NUMBER(self, t):
if self.pp_line is None:
self.pp_line = t.value
else:
# Ignore: GCC's cpp sometimes inserts a numeric flag
# after the file name
pass
def t_ppline_NEWLINE(self, t):
r'\n'
if self.pp_line is None:
self._error('line number missing in #line', t)
else:
self.lexer.lineno = int(self.pp_line)
if self.pp_filename is not None:
self.filename = self.pp_filename
t.lexer.begin('INITIAL')
def t_ppline_PPLINE(self, t):
r'line'
pass
t_ppline_ignore = ' \t'
def t_ppline_error(self, t):
self._error('invalid #line directive', t)
##
## Rules for the pppragma state
##
def t_pppragma_NEWLINE(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.begin('INITIAL')
def t_pppragma_PPPRAGMA(self, t):
r'pragma'
return t
t_pppragma_ignore = ' \t'
def t_pppragma_STR(self, t):
'.+'
t.type = 'PPPRAGMASTR'
return t
def t_pppragma_error(self, t):
self._error('invalid #pragma directive', t)
##
## Rules for the normal state
##
t_ignore = ' \t'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# ->
t_ARROW = r'->'
# ?
t_CONDOP = r'\?'
# Delimiters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Scope delimiters
# To see why on_lbrace_func is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# TT x = 5;
# Outside the function, TT is a typedef, but inside (starting and ending
# with the braces) it's a parameter. The trouble begins with yacc's
# lookahead token. If we open a new scope in brace_open, then TT has
# already been read and incorrectly interpreted as TYPEID. So, we need
# to open and close scopes from within the lexer.
# Similar for the TT immediately outside the end of the function.
#
@TOKEN(r'\{')
def t_LBRACE(self, t):
self.on_lbrace_func()
return t
@TOKEN(r'\}')
def t_RBRACE(self, t):
self.on_rbrace_func()
return t
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_floating_constant)
def t_HEX_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(bin_constant)
def t_INT_CONST_BIN(self, t):
return t
@TOKEN(bad_octal_constant)
def t_BAD_CONST_OCT(self, t):
msg = "Invalid octal constant"
self._error(msg, t)
@TOKEN(octal_constant)
def t_INT_CONST_OCT(self, t):
return t
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# Must come before bad_char_const, to prevent it from
# catching valid char constants as invalid
#
@TOKEN(multicharacter_constant)
def t_INT_CONST_CHAR(self, t):
return t
@TOKEN(char_const)
def t_CHAR_CONST(self, t):
return t
@TOKEN(wchar_const)
def t_WCHAR_CONST(self, t):
return t
@TOKEN(u8char_const)
def t_U8CHAR_CONST(self, t):
return t
@TOKEN(u16char_const)
def t_U16CHAR_CONST(self, t):
return t
@TOKEN(u32char_const)
def t_U32CHAR_CONST(self, t):
return t
@TOKEN(unmatched_quote)
def t_UNMATCHED_QUOTE(self, t):
msg = "Unmatched '"
self._error(msg, t)
@TOKEN(bad_char_const)
def t_BAD_CHAR_CONST(self, t):
msg = "Invalid char constant %s" % t.value
self._error(msg, t)
@TOKEN(wstring_literal)
def t_WSTRING_LITERAL(self, t):
return t
@TOKEN(u8string_literal)
def t_U8STRING_LITERAL(self, t):
return t
@TOKEN(u16string_literal)
def t_U16STRING_LITERAL(self, t):
return t
@TOKEN(u32string_literal)
def t_U32STRING_LITERAL(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.keyword_map.get(t.value, "ID")
if t.type == 'ID' and self.type_lookup_func(t.value):
t.type = "TYPEID"
return t
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
| 17,167 | Python | 29.933333 | 129 | 0.528747 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_parser.py | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, ParseError, parameterized, template
from .ast_transforms import fix_switch_cases, fix_atomic_specifiers
@template
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lexer=CLexer,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False,
taboutputdir=''):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lexer:
Set this parameter to define the lexer to use if
you're not using the default CLexer.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
taboutputdir:
Set this parameter to control the location of generated
lextab and yacctab files.
"""
self.clex = lexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab,
outputdir=taboutputdir)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers_no_type',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'id_init_declarator_list',
'initializer_list',
'parameter_type_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab,
outputdir=taboutputdir)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debug=False):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debug:
Debug flag to YACC
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debug)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module c_ast) and the
# modifiers are FuncDecl, PtrDecl and ArrayDecl.
#
# The standard states that whenever a new modifier is parsed, it should be
# added to the end of the list of modifiers. For example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto it.
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals[:]
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind, append=False):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
* alignment: a list of alignment specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
If `append` is True, the new specifier is added to the end of
the specifiers list, otherwise it's added at the beginning.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[], alignment=[])
if append:
spec[kind].append(newspec)
else:
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
align=spec['alignment'],
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
elif not isinstance(decls[0]['decl'], (
c_ast.Enum, c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
align=spec['alignment'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type, (
c_ast.Enum, c_ast.Struct, c_ast.Union,
c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
fixed_decl = fix_atomic_specifiers(fixed_decl)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
if 'typedef' in spec['storage']:
self._parse_error("Invalid typedef", decl.coord)
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
# If this changes, c_generator.CGenerator.precedence_map needs to change as
# well
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogeneous.
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
| pppragma_directive
"""
p[0] = [p[1]]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = []
def p_external_declaration_5(self, p):
""" external_declaration : static_assert
"""
p[0] = p[1]
def p_static_assert_declaration(self, p):
""" static_assert : _STATIC_ASSERT LPAREN constant_expression COMMA unified_string_literal RPAREN
| _STATIC_ASSERT LPAREN constant_expression RPAREN
"""
if len(p) == 5:
p[0] = [c_ast.StaticAssert(p[3], None, self._token_coord(p, 1))]
else:
p[0] = [c_ast.StaticAssert(p[3], p[5], self._token_coord(p, 1))]
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._token_coord(p, 1))
def p_pppragma_directive(self, p):
""" pppragma_directive : PPPRAGMA
| PPPRAGMA PPPRAGMASTR
"""
if len(p) == 3:
p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2))
else:
p[0] = c_ast.Pragma("", self._token_coord(p, 1))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
def p_function_definition_1(self, p):
""" function_definition : id_declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
alignment=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._token_coord(p, 1))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
# Note, according to C18 A.2.2 6.7.10 static_assert-declaration _Static_assert
# is a declaration, not a statement. We additionally recognise it as a statement
# to fix parsing of _Static_assert inside the functions.
#
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
| pppragma_directive
| static_assert
"""
p[0] = p[1]
# A pragma is generally considered a decorator rather than an actual
# statement. Still, for the purposes of analyzing an abstract syntax tree of
# C code, pragma's should not be ignored and were previously treated as a
# statement. This presents a problem for constructs that take a statement
# such as labeled_statements, selection_statements, and
# iteration_statements, causing a misleading structure in the AST. For
# example, consider the following C code.
#
# for (int i = 0; i < 3; i++)
# #pragma omp critical
# sum += 1;
#
# This code will compile and execute "sum += 1;" as the body of the for
# loop. Previous implementations of PyCParser would render the AST for this
# block of code as follows:
#
# For:
# DeclList:
# Decl: i, [], [], []
# TypeDecl: i, []
# IdentifierType: ['int']
# Constant: int, 0
# BinaryOp: <
# ID: i
# Constant: int, 3
# UnaryOp: p++
# ID: i
# Pragma: omp critical
# Assignment: +=
# ID: sum
# Constant: int, 1
#
# This AST misleadingly takes the Pragma as the body of the loop and the
# assignment then becomes a sibling of the loop.
#
# To solve edge cases like these, the pragmacomp_or_statement rule groups
# a pragma and its following statement (which would otherwise be orphaned)
# using a compound block, effectively turning the above code into:
#
# for (int i = 0; i < 3; i++) {
# #pragma omp critical
# sum += 1;
# }
def p_pragmacomp_or_statement(self, p):
""" pragmacomp_or_statement : pppragma_directive statement
| statement
"""
if isinstance(p[1], c_ast.Pragma) and len(p) == 3:
p[0] = c_ast.Compound(
block_items=[p[1], p[2]],
coord=self._token_coord(p, 1))
else:
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
| declaration_specifiers_no_type id_init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
align=spec['alignment'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
# To know when declaration-specifiers end and declarators begin,
# we require declaration-specifiers to have at least one
# type-specifier, and disallow typedef-names after we've seen any
# type-specifier. These are both required by the spec.
#
def p_declaration_specifiers_no_type_1(self, p):
""" declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_no_type_2(self, p):
""" declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_no_type_3(self, p):
""" declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
# Without this, `typedef _Atomic(T) U` will parse incorrectly because the
# _Atomic qualifier will match, instead of the specifier.
def p_declaration_specifiers_no_type_4(self, p):
""" declaration_specifiers_no_type : atomic_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_no_type_5(self, p):
""" declaration_specifiers_no_type : alignment_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'alignment')
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : declaration_specifiers type_qualifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : declaration_specifiers storage_class_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True)
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : declaration_specifiers function_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True)
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : declaration_specifiers type_specifier_no_typeid
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_declaration_specifiers_5(self, p):
""" declaration_specifiers : type_specifier
"""
p[0] = self._add_declaration_specifier(None, p[1], 'type')
def p_declaration_specifiers_6(self, p):
""" declaration_specifiers : declaration_specifiers_no_type type_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_declaration_specifiers_7(self, p):
""" declaration_specifiers : declaration_specifiers alignment_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment', append=True)
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
| _THREAD_LOCAL
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
| _NORETURN
"""
p[0] = p[1]
def p_type_specifier_no_typeid(self, p):
""" type_specifier_no_typeid : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
| __INT128
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
def p_type_specifier(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
| type_specifier_no_typeid
| atomic_specifier
"""
p[0] = p[1]
# See section 6.7.2.4 of the C11 standard.
def p_atomic_specifier(self, p):
""" atomic_specifier : _ATOMIC LPAREN type_name RPAREN
"""
typ = p[3]
typ.quals.append('_Atomic')
p[0] = typ
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
| _ATOMIC
"""
p[0] = p[1]
def p_init_declarator_list(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_id_init_declarator_list(self, p):
""" id_init_declarator_list : id_init_declarator
| id_init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
def p_id_init_declarator(self, p):
""" id_init_declarator : id_declarator
| id_declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
# Require at least one type specifier in a specifier-qualifier-list
#
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : specifier_qualifier_list type_qualifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
def p_specifier_qualifier_list_3(self, p):
""" specifier_qualifier_list : type_specifier
"""
p[0] = self._add_declaration_specifier(None, p[1], 'type')
def p_specifier_qualifier_list_4(self, p):
""" specifier_qualifier_list : type_qualifier_list type_specifier
"""
p[0] = dict(qual=p[1], alignment=[], storage=[], type=[p[2]], function=[])
def p_specifier_qualifier_list_5(self, p):
""" specifier_qualifier_list : alignment_specifier
"""
p[0] = dict(qual=[], alignment=[p[1]], storage=[], type=[], function=[])
def p_specifier_qualifier_list_6(self, p):
""" specifier_qualifier_list : specifier_qualifier_list alignment_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
# None means no list of members
p[0] = klass(
name=p[2],
decls=None,
coord=self._token_coord(p, 2))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
| struct_or_union brace_open brace_close
"""
klass = self._select_struct_union_class(p[1])
if len(p) == 4:
# Empty sequence means an empty list of members
p[0] = klass(
name=None,
decls=[],
coord=self._token_coord(p, 2))
else:
p[0] = klass(
name=None,
decls=p[3],
coord=self._token_coord(p, 2))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union ID brace_open brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open brace_close
"""
klass = self._select_struct_union_class(p[1])
if len(p) == 5:
# Empty sequence means an empty list of members
p[0] = klass(
name=p[2],
decls=[],
coord=self._token_coord(p, 2))
else:
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._token_coord(p, 2))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
if len(p) == 2:
p[0] = p[1] or []
else:
p[0] = p[1] + (p[2] or [])
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : SEMI
"""
p[0] = None
def p_struct_declaration_3(self, p):
""" struct_declaration : pppragma_directive
"""
p[0] = [p[1]]
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_alignment_specifier(self, p):
""" alignment_specifier : _ALIGNAS LPAREN type_name RPAREN
| _ALIGNAS LPAREN constant_expression RPAREN
"""
p[0] = c_ast.Alignas(p[3], self._token_coord(p, 1))
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._token_coord(p, 1))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._token_coord(p, 1))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator(self, p):
""" declarator : id_declarator
| typeid_declarator
"""
p[0] = p[1]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_xxx_declarator_1(self, p):
""" xxx_declarator : direct_xxx_declarator
"""
p[0] = p[1]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_xxx_declarator_2(self, p):
""" xxx_declarator : pointer direct_xxx_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_1(self, p):
""" direct_xxx_declarator : yyy
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
align=None,
coord=self._token_coord(p, 1))
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'))
def p_direct_xxx_declarator_2(self, p):
""" direct_xxx_declarator : LPAREN xxx_declarator RPAREN
"""
p[0] = p[2]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_3(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[3] if len(p) > 5 else []) or []
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4] if len(p) > 5 else p[3],
dim_quals=quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_4(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_5(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._token_coord(p, 4)),
dim_quals=p[3] if p[3] is not None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_6(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN
| direct_xxx_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._token_coord(p, 1)
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
# char * const * p;
#
# Means "pointer to const pointer to char"
#
# While:
#
# char ** const p;
#
# Means "const pointer to pointer to char"
#
# So when we construct PtrDecl nestings, the leftmost pointer goes in
# as the most nested type.
nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
if len(p) > 3:
tail_type = p[3]
while tail_type.type is not None:
tail_type = tail_type.type
tail_type.type = nested_type
p[0] = p[3]
else:
p[0] = nested_type
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3)))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
# From ISO/IEC 9899:TC2, 6.7.5.3.11:
# "If, in a parameter declaration, an identifier can be treated either
# as a typedef name or as a parameter name, it shall be taken as a
# typedef name."
#
# Inside a parameter declaration, once we've reduced declaration specifiers,
# if we shift in an LPAREN and see a TYPEID, it could be either an abstract
# declarator or a declarator nested inside parens. This rule tells us to
# always treat it as an abstract declarator. Therefore, we only accept
# `id_declarator`s and `typeid_noparen_declarator`s.
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers id_declarator
| declaration_specifiers typeid_noparen_declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._token_coord(p, 1))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._token_coord(p, 1))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
name='',
quals=spec['qual'],
align=None,
type=p[2] or c_ast.TypeDecl(None, None, None, None),
coord=self._token_coord(p, 2))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list_opt brace_close
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
p[0] = c_ast.InitList([], self._token_coord(p, 1))
else:
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
typename = c_ast.Typename(
name='',
quals=p[1]['qual'][:],
align=None,
type=p[2] or c_ast.TypeDecl(None, None, None, None),
coord=self._token_coord(p, 2))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[2] if len(p) > 4 else []) or []
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None, None),
dim=p[3] if len(p) > 4 else p[2],
dim_quals=quals,
coord=self._token_coord(p, 1))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._token_coord(p, 3)),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None, None),
dim=c_ast.ID(p[3], self._token_coord(p, 3)),
dim_quals=[],
coord=self._token_coord(p, 1))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None, None),
coord=self._token_coord(p, 1))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._token_coord(p, 1))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON pragmacomp_or_statement """
p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """
p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON pragmacomp_or_statement """
p[0] = c_ast.Default([p[3]], self._token_coord(p, 1))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)),
p[4], p[6], p[8], self._token_coord(p, 1))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._token_coord(p, 1))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._token_coord(p, 1))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._token_coord(p, 1))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._token_coord(p, 2))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_parenthesized_compound_expression(self, p):
""" assignment_expression : LPAREN compound_statement RPAREN """
p[0] = p[2]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
| _ALIGNOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._token_coord(p, 1))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._token_coord(p, 3))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_primary_expression_5(self, p):
""" primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN
"""
coord = self._token_coord(p, 1)
p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
c_ast.ExprList([p[3], p[5]], coord),
coord)
def p_offsetof_member_designator(self, p):
""" offsetof_member_designator : identifier
| offsetof_member_designator PERIOD identifier
| offsetof_member_designator LBRACKET expression RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = c_ast.StructRef(p[1], p[2], p[3], p[1].coord)
elif len(p) == 5:
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
else:
raise NotImplementedError("Unexpected parsing state. len(p): %u" % len(p))
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._token_coord(p, 1))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
| INT_CONST_BIN
| INT_CONST_CHAR
"""
uCount = 0
lCount = 0
for x in p[1][-3:]:
if x in ('l', 'L'):
lCount += 1
elif x in ('u', 'U'):
uCount += 1
t = ''
if uCount > 1:
raise ValueError('Constant cannot have more than one u/U suffix.')
elif lCount > 2:
raise ValueError('Constant cannot have more than two l/L suffix.')
prefix = 'unsigned ' * uCount + 'long ' * lCount
p[0] = c_ast.Constant(
prefix + 'int', p[1], self._token_coord(p, 1))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
if 'x' in p[1].lower():
t = 'float'
else:
if p[1][-1] in ('f', 'F'):
t = 'float'
elif p[1][-1] in ('l', 'L'):
t = 'long double'
else:
t = 'double'
p[0] = c_ast.Constant(
t, p[1], self._token_coord(p, 1))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
| U8CHAR_CONST
| U16CHAR_CONST
| U32CHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._token_coord(p, 1))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._token_coord(p, 1))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| U8STRING_LITERAL
| U16STRING_LITERAL
| U32STRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
| unified_wstring_literal U8STRING_LITERAL
| unified_wstring_literal U16STRING_LITERAL
| unified_wstring_literal U32STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._token_coord(p, 1))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
p.set_lineno(0, p.lineno(1))
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
p.set_lineno(0, p.lineno(1))
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', self.clex.filename)
| 73,680 | Python | 37.03872 | 134 | 0.525841 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/c_generator.py | #------------------------------------------------------------------------------
# pycparser: c_generator.py
#
# C code generator from pycparser AST nodes.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
class CGenerator(object):
""" Uses the same visitor pattern as c_ast.NodeVisitor, but modified to
return a value from each visit method, using string accumulation in
generic_visit.
"""
def __init__(self, reduce_parentheses=False):
""" Constructs C-code generator
reduce_parentheses:
if True, eliminates needless parentheses on binary operators
"""
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method.
self.indent_level = 0
self.reduce_parentheses = reduce_parentheses
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_' + node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
if node is None:
return ''
else:
return ''.join(self.visit(c) for c_name, c in node.children())
def visit_Constant(self, n):
return n.value
def visit_ID(self, n):
return n.name
def visit_Pragma(self, n):
ret = '#pragma'
if n.string:
ret += ' ' + n.string
return ret
def visit_ArrayRef(self, n):
arrref = self._parenthesize_unless_simple(n.name)
return arrref + '[' + self.visit(n.subscript) + ']'
def visit_StructRef(self, n):
sref = self._parenthesize_unless_simple(n.name)
return sref + n.type + self.visit(n.field)
def visit_FuncCall(self, n):
fref = self._parenthesize_unless_simple(n.name)
return fref + '(' + self.visit(n.args) + ')'
def visit_UnaryOp(self, n):
if n.op == 'sizeof':
# Always parenthesize the argument of sizeof since it can be
# a name.
return 'sizeof(%s)' % self.visit(n.expr)
else:
operand = self._parenthesize_unless_simple(n.expr)
if n.op == 'p++':
return '%s++' % operand
elif n.op == 'p--':
return '%s--' % operand
else:
return '%s%s' % (n.op, operand)
# Precedence map of binary operators:
precedence_map = {
# Should be in sync with c_parser.CParser.precedence
# Higher numbers are stronger binding
'||': 0, # weakest binding
'&&': 1,
'|': 2,
'^': 3,
'&': 4,
'==': 5, '!=': 5,
'>': 6, '>=': 6, '<': 6, '<=': 6,
'>>': 7, '<<': 7,
'+': 8, '-': 8,
'*': 9, '/': 9, '%': 9 # strongest binding
}
def visit_BinaryOp(self, n):
# Note: all binary operators are left-to-right associative
#
# If `n.left.op` has a stronger or equally binding precedence in
# comparison to `n.op`, no parenthesis are needed for the left:
# e.g., `(a*b) + c` is equivalent to `a*b + c`, as well as
# `(a+b) - c` is equivalent to `a+b - c` (same precedence).
# If the left operator is weaker binding than the current, then
# parentheses are necessary:
# e.g., `(a+b) * c` is NOT equivalent to `a+b * c`.
lval_str = self._parenthesize_if(
n.left,
lambda d: not (self._is_simple_node(d) or
self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and
self.precedence_map[d.op] >= self.precedence_map[n.op]))
# If `n.right.op` has a stronger -but not equal- binding precedence,
# parenthesis can be omitted on the right:
# e.g., `a + (b*c)` is equivalent to `a + b*c`.
# If the right operator is weaker or equally binding, then parentheses
# are necessary:
# e.g., `a * (b+c)` is NOT equivalent to `a * b+c` and
# `a - (b+c)` is NOT equivalent to `a - b+c` (same precedence).
rval_str = self._parenthesize_if(
n.right,
lambda d: not (self._is_simple_node(d) or
self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and
self.precedence_map[d.op] > self.precedence_map[n.op]))
return '%s %s %s' % (lval_str, n.op, rval_str)
def visit_Assignment(self, n):
rval_str = self._parenthesize_if(
n.rvalue,
lambda n: isinstance(n, c_ast.Assignment))
return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str)
def visit_IdentifierType(self, n):
return ' '.join(n.names)
def _visit_expr(self, n):
if isinstance(n, c_ast.InitList):
return '{' + self.visit(n) + '}'
elif isinstance(n, c_ast.ExprList):
return '(' + self.visit(n) + ')'
else:
return self.visit(n)
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first declaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
if n.init:
s += ' = ' + self._visit_expr(n.init)
return s
def visit_DeclList(self, n):
s = self.visit(n.decls[0])
if len(n.decls) > 1:
s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True)
for decl in n.decls[1:])
return s
def visit_Typedef(self, n):
s = ''
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def visit_Cast(self, n):
s = '(' + self._generate_type(n.to_type, emit_declname=False) + ')'
return s + ' ' + self._parenthesize_unless_simple(n.expr)
def visit_ExprList(self, n):
visited_subexprs = []
for expr in n.exprs:
visited_subexprs.append(self._visit_expr(expr))
return ', '.join(visited_subexprs)
def visit_InitList(self, n):
visited_subexprs = []
for expr in n.exprs:
visited_subexprs.append(self._visit_expr(expr))
return ', '.join(visited_subexprs)
def visit_Enum(self, n):
return self._generate_struct_union_enum(n, name='enum')
def visit_Alignas(self, n):
return '_Alignas({})'.format(self.visit(n.alignment))
def visit_Enumerator(self, n):
if not n.value:
return '{indent}{name},\n'.format(
indent=self._make_indent(),
name=n.name,
)
else:
return '{indent}{name} = {value},\n'.format(
indent=self._make_indent(),
name=n.name,
value=self.visit(n.value),
)
def visit_FuncDef(self, n):
decl = self.visit(n.decl)
self.indent_level = 0
body = self.visit(n.body)
if n.param_decls:
knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls)
return decl + '\n' + knrdecls + ';\n' + body + '\n'
else:
return decl + '\n' + body + '\n'
def visit_FileAST(self, n):
s = ''
for ext in n.ext:
if isinstance(ext, c_ast.FuncDef):
s += self.visit(ext)
elif isinstance(ext, c_ast.Pragma):
s += self.visit(ext) + '\n'
else:
s += self.visit(ext) + ';\n'
return s
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.indent_level += 2
if n.block_items:
s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items)
self.indent_level -= 2
s += self._make_indent() + '}\n'
return s
def visit_CompoundLiteral(self, n):
return '(' + self.visit(n.type) + '){' + self.visit(n.init) + '}'
def visit_EmptyStatement(self, n):
return ';'
def visit_ParamList(self, n):
return ', '.join(self.visit(param) for param in n.params)
def visit_Return(self, n):
s = 'return'
if n.expr: s += ' ' + self.visit(n.expr)
return s + ';'
def visit_Break(self, n):
return 'break;'
def visit_Continue(self, n):
return 'continue;'
def visit_TernaryOp(self, n):
s = '(' + self._visit_expr(n.cond) + ') ? '
s += '(' + self._visit_expr(n.iftrue) + ') : '
s += '(' + self._visit_expr(n.iffalse) + ')'
return s
def visit_If(self, n):
s = 'if ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
s += self._generate_stmt(n.iftrue, add_indent=True)
if n.iffalse:
s += self._make_indent() + 'else\n'
s += self._generate_stmt(n.iffalse, add_indent=True)
return s
def visit_For(self, n):
s = 'for ('
if n.init: s += self.visit(n.init)
s += ';'
if n.cond: s += ' ' + self.visit(n.cond)
s += ';'
if n.next: s += ' ' + self.visit(n.next)
s += ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_While(self, n):
s = 'while ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_DoWhile(self, n):
s = 'do\n'
s += self._generate_stmt(n.stmt, add_indent=True)
s += self._make_indent() + 'while ('
if n.cond: s += self.visit(n.cond)
s += ');'
return s
def visit_StaticAssert(self, n):
s = '_Static_assert('
s += self.visit(n.cond)
if n.message:
s += ','
s += self.visit(n.message)
s += ')'
return s
def visit_Switch(self, n):
s = 'switch (' + self.visit(n.cond) + ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_Case(self, n):
s = 'case ' + self.visit(n.expr) + ':\n'
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Default(self, n):
s = 'default:\n'
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Label(self, n):
return n.name + ':\n' + self._generate_stmt(n.stmt)
def visit_Goto(self, n):
return 'goto ' + n.name + ';'
def visit_EllipsisParam(self, n):
return '...'
def visit_Struct(self, n):
return self._generate_struct_union_enum(n, 'struct')
def visit_Typename(self, n):
return self._generate_type(n.type)
def visit_Union(self, n):
return self._generate_struct_union_enum(n, 'union')
def visit_NamedInitializer(self, n):
s = ''
for name in n.name:
if isinstance(name, c_ast.ID):
s += '.' + name.name
else:
s += '[' + self.visit(name) + ']'
s += ' = ' + self._visit_expr(n.expr)
return s
def visit_FuncDecl(self, n):
return self._generate_type(n)
def visit_ArrayDecl(self, n):
return self._generate_type(n, emit_declname=False)
def visit_TypeDecl(self, n):
return self._generate_type(n, emit_declname=False)
def visit_PtrDecl(self, n):
return self._generate_type(n, emit_declname=False)
def _generate_struct_union_enum(self, n, name):
""" Generates code for structs, unions, and enums. name should be
'struct', 'union', or 'enum'.
"""
if name in ('struct', 'union'):
members = n.decls
body_function = self._generate_struct_union_body
else:
assert name == 'enum'
members = None if n.values is None else n.values.enumerators
body_function = self._generate_enum_body
s = name + ' ' + (n.name or '')
if members is not None:
# None means no members
# Empty sequence means an empty list of members
s += '\n'
s += self._make_indent()
self.indent_level += 2
s += '{\n'
s += body_function(members)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def _generate_struct_union_body(self, members):
return ''.join(self._generate_stmt(decl) for decl in members)
def _generate_enum_body(self, members):
# `[:-2] + '\n'` removes the final `,` from the enumerator list
return ''.join(self.visit(value) for value in members)[:-2] + '\n'
def _generate_stmt(self, n, add_indent=False):
""" Generation from a statement node. This method exists as a wrapper
for individual visit_* methods to handle different treatment of
some statements in this context.
"""
typ = type(n)
if add_indent: self.indent_level += 2
indent = self._make_indent()
if add_indent: self.indent_level -= 2
if typ in (
c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,
c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,
c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef,
c_ast.ExprList):
# These can also appear in an expression context so no semicolon
# is added to them automatically
#
return indent + self.visit(n) + ';\n'
elif typ in (c_ast.Compound,):
# No extra indentation required before the opening brace of a
# compound - because it consists of multiple lines it has to
# compute its own indentation.
#
return self.visit(n)
elif typ in (c_ast.If,):
return indent + self.visit(n)
else:
return indent + self.visit(n) + '\n'
def _generate_decl(self, n):
""" Generation from a Decl node.
"""
s = ''
if n.funcspec: s = ' '.join(n.funcspec) + ' '
if n.storage: s += ' '.join(n.storage) + ' '
if n.align: s += self.visit(n.align[0]) + ' '
s += self._generate_type(n.type)
return s
def _generate_type(self, n, modifiers=[], emit_declname = True):
""" Recursive generation from a type node. n is the type node.
modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers
encountered on the way down to a TypeDecl, to allow proper
generation from it.
"""
typ = type(n)
#~ print(n, modifiers)
if typ == c_ast.TypeDecl:
s = ''
if n.quals: s += ' '.join(n.quals) + ' '
s += self.visit(n.type)
nstr = n.declname if n.declname and emit_declname else ''
# Resolve modifiers.
# Wrap in parens to distinguish pointer to array and pointer to
# function syntax.
#
for i, modifier in enumerate(modifiers):
if isinstance(modifier, c_ast.ArrayDecl):
if (i != 0 and
isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '['
if modifier.dim_quals:
nstr += ' '.join(modifier.dim_quals) + ' '
nstr += self.visit(modifier.dim) + ']'
elif isinstance(modifier, c_ast.FuncDecl):
if (i != 0 and
isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '(' + self.visit(modifier.args) + ')'
elif isinstance(modifier, c_ast.PtrDecl):
if modifier.quals:
nstr = '* %s%s' % (' '.join(modifier.quals),
' ' + nstr if nstr else '')
else:
nstr = '*' + nstr
if nstr: s += ' ' + nstr
return s
elif typ == c_ast.Decl:
return self._generate_decl(n.type)
elif typ == c_ast.Typename:
return self._generate_type(n.type, emit_declname = emit_declname)
elif typ == c_ast.IdentifierType:
return ' '.join(n.names) + ' '
elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl):
return self._generate_type(n.type, modifiers + [n],
emit_declname = emit_declname)
else:
return self.visit(n)
def _parenthesize_if(self, n, condition):
""" Visits 'n' and returns its string representation, parenthesized
if the condition function applied to the node returns True.
"""
s = self._visit_expr(n)
if condition(n):
return '(' + s + ')'
else:
return s
def _parenthesize_unless_simple(self, n):
""" Common use case for _parenthesize_if
"""
return self._parenthesize_if(n, lambda d: not self._is_simple_node(d))
def _is_simple_node(self, n):
""" Returns True for nodes that are "simple" - i.e. nodes that always
have higher precedence than operators.
"""
return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
c_ast.StructRef, c_ast.FuncCall))
| 17,772 | Python | 34.333996 | 83 | 0.507934 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/plyparser.py | #-----------------------------------------------------------------
# plyparser.py
#
# PLYParser class and other utilities for simplifying programming
# parsers with PLY
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
import warnings
class Coord(object):
""" Coordinates of a syntactic element. Consists of:
- File name
- Line number
- (optional) column number, for the Lexer
"""
__slots__ = ('file', 'line', 'column', '__weakref__')
def __init__(self, file, line, column=None):
self.file = file
self.line = line
self.column = column
def __str__(self):
str = "%s:%s" % (self.file, self.line)
if self.column: str += ":%s" % self.column
return str
class ParseError(Exception): pass
class PLYParser(object):
def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule)
def _coord(self, lineno, column=None):
return Coord(
file=self.clex.filename,
line=lineno,
column=column)
def _token_coord(self, p, token_idx):
""" Returns the coordinates for the YaccProduction object 'p' indexed
with 'token_idx'. The coordinate includes the 'lineno' and
'column'. Both follow the lex semantic, starting from 1.
"""
last_cr = p.lexer.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx))
if last_cr < 0:
last_cr = -1
column = (p.lexpos(token_idx) - (last_cr))
return self._coord(p.lineno(token_idx), column)
def _parse_error(self, msg, coord):
raise ParseError("%s: %s" % (coord, msg))
def parameterized(*params):
""" Decorator to create parameterized rules.
Parameterized rule methods must be named starting with 'p_' and contain
'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be
replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with
docstring 'xxx_rule : yyy' when decorated with
``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring
'id_rule : ID'. Using multiple tuples produces multiple rules.
"""
def decorate(rule_func):
rule_func._params = params
return rule_func
return decorate
def template(cls):
""" Class decorator to generate rules from parameterized rule templates.
See `parameterized` for more information on parameterized rules.
"""
issued_nodoc_warning = False
for attr_name in dir(cls):
if attr_name.startswith('p_'):
method = getattr(cls, attr_name)
if hasattr(method, '_params'):
# Remove the template method
delattr(cls, attr_name)
# Create parameterized rules from this method; only run this if
# the method has a docstring. This is to address an issue when
# pycparser's users are installed in -OO mode which strips
# docstrings away.
# See: https://github.com/eliben/pycparser/pull/198/ and
# https://github.com/eliben/pycparser/issues/197
# for discussion.
if method.__doc__ is not None:
_create_param_rules(cls, method)
elif not issued_nodoc_warning:
warnings.warn(
'parsing methods must have __doc__ for pycparser to work properly',
RuntimeWarning,
stacklevel=2)
issued_nodoc_warning = True
return cls
def _create_param_rules(cls, func):
""" Create ply.yacc rules based on a parameterized rule function
Generates new methods (one per each pair of parameters) based on the
template rule function `func`, and attaches them to `cls`. The rule
function's parameters must be accessible via its `_params` attribute.
"""
for xxx, yyy in func._params:
# Use the template method's body for each new method
def param_rule(self, p):
func(self, p)
# Substitute in the params for the grammar rule and function name
param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy)
param_rule.__name__ = func.__name__.replace('xxx', xxx)
# Attach the new method to the class
setattr(cls, param_rule.__name__, param_rule)
| 4,875 | Python | 35.388059 | 91 | 0.568615 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ast_transforms.py | #------------------------------------------------------------------------------
# pycparser: ast_transforms.py
#
# Some utilities used by the parser to create a friendlier AST.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
def fix_switch_cases(switch_node):
""" The 'case' statements in a 'switch' come out of parsing with one
child node, so subsequent statements are just tucked to the parent
Compound. Additionally, consecutive (fall-through) case statements
come out messy. This is a peculiarity of the C grammar. The following:
switch (myvar) {
case 10:
k = 10;
p = k + 1;
return 10;
case 20:
case 30:
return 20;
default:
break;
}
Creates this tree (pseudo-dump):
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
The goal of this transform is to fix this mess, turning it into the
following:
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
A fixed AST node is returned. The argument may be modified.
"""
assert isinstance(switch_node, c_ast.Switch)
if not isinstance(switch_node.stmt, c_ast.Compound):
return switch_node
# The new Compound child for the Switch, which will collect children in the
# correct order
new_compound = c_ast.Compound([], switch_node.stmt.coord)
# The last Case/Default node
last_case = None
# Goes over the children of the Compound below the Switch, adding them
# either directly below new_compound or below the last Case as appropriate
# (for `switch(cond) {}`, block_items would have been None)
for child in (switch_node.stmt.block_items or []):
if isinstance(child, (c_ast.Case, c_ast.Default)):
# If it's a Case/Default:
# 1. Add it to the Compound and mark as "last case"
# 2. If its immediate child is also a Case or Default, promote it
# to a sibling.
new_compound.block_items.append(child)
_extract_nested_case(child, new_compound.block_items)
last_case = new_compound.block_items[-1]
else:
# Other statements are added as children to the last case, if it
# exists.
if last_case is None:
new_compound.block_items.append(child)
else:
last_case.stmts.append(child)
switch_node.stmt = new_compound
return switch_node
def _extract_nested_case(case_node, stmts_list):
""" Recursively extract consecutive Case statements that are made nested
by the parser and add them to the stmts_list.
"""
if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)):
stmts_list.append(case_node.stmts.pop())
_extract_nested_case(stmts_list[-1], stmts_list)
def fix_atomic_specifiers(decl):
""" Atomic specifiers like _Atomic(type) are unusually structured,
conferring a qualifier upon the contained type.
This function fixes a decl with atomic specifiers to have a sane AST
structure, by removing spurious Typename->TypeDecl pairs and attaching
the _Atomic qualifier in the right place.
"""
# There can be multiple levels of _Atomic in a decl; fix them until a
# fixed point is reached.
while True:
decl, found = _fix_atomic_specifiers_once(decl)
if not found:
break
# Make sure to add an _Atomic qual on the topmost decl if needed. Also
# restore the declname on the innermost TypeDecl (it gets placed in the
# wrong place during construction).
typ = decl
while not isinstance(typ, c_ast.TypeDecl):
try:
typ = typ.type
except AttributeError:
return decl
if '_Atomic' in typ.quals and '_Atomic' not in decl.quals:
decl.quals.append('_Atomic')
if typ.declname is None:
typ.declname = decl.name
return decl
def _fix_atomic_specifiers_once(decl):
""" Performs one 'fix' round of atomic specifiers.
Returns (modified_decl, found) where found is True iff a fix was made.
"""
parent = decl
grandparent = None
node = decl.type
while node is not None:
if isinstance(node, c_ast.Typename) and '_Atomic' in node.quals:
break
try:
grandparent = parent
parent = node
node = node.type
except AttributeError:
# If we've reached a node without a `type` field, it means we won't
# find what we're looking for at this point; give up the search
# and return the original decl unmodified.
return decl, False
assert isinstance(parent, c_ast.TypeDecl)
grandparent.type = node.type
if '_Atomic' not in node.type.quals:
node.type.quals.append('_Atomic')
return decl, True
| 5,691 | Python | 33.496969 | 79 | 0.54718 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/cpp.py | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2017
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
| 33,282 | Python | 35.736203 | 141 | 0.420588 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/yacc.py | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.10'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| 137,323 | Python | 38.291559 | 119 | 0.467358 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/__init__.py | # PLY package
# Author: David Beazley ([email protected])
__version__ = '3.9'
__all__ = ['lex','yacc']
| 102 | Python | 16.166664 | 41 | 0.588235 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/ctokens.py | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| 3,177 | Python | 22.716418 | 90 | 0.393768 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/ygen.py | # ply: ygen.py
#
# This is a support program that auto-generates different versions of the YACC parsing
# function with different features removed for the purposes of performance.
#
# Users should edit the method LParser.parsedebug() in yacc.py. The source code
# for that method is then used to create the other methods. See the comments in
# yacc.py for further details.
import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index)
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines
def main():
dirname = os.path.dirname(__file__)
shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
lines = f.readlines()
parse_start, parse_end = get_source_range(lines, 'parsedebug')
parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
# Get the original source
orig_lines = lines[parse_start:parse_end]
# Filter the DEBUG sections out
parseopt_lines = filter_section(orig_lines, 'DEBUG')
# Filter the TRACKING sections out
parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
# Replace the parser source sections with updated versions
lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
lines[parseopt_start:parseopt_end] = parseopt_lines
lines = [line.rstrip()+'\n' for line in lines]
with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
f.writelines(lines)
print('Updated yacc.py')
if __name__ == '__main__':
main()
| 2,251 | Python | 29.026666 | 94 | 0.657486 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycparser/ply/lex.py | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.10'
__tabversion__ = '3.10'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| 42,918 | Python | 38.017273 | 131 | 0.507386 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/_version.py | # This file is imported from __init__.py and exec'd from setup.py
__version__ = "1.3.0"
| 89 | Python | 21.499995 | 65 | 0.640449 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/_impl.py | from contextvars import ContextVar
from typing import Optional
import sys
import threading
current_async_library_cvar = ContextVar(
"current_async_library_cvar", default=None
) # type: ContextVar[Optional[str]]
class _ThreadLocal(threading.local):
# Since threading.local provides no explicit mechanism is for setting
# a default for a value, a custom class with a class attribute is used
# instead.
name = None # type: Optional[str]
thread_local = _ThreadLocal()
class AsyncLibraryNotFoundError(RuntimeError):
pass
def current_async_library() -> str:
"""Detect which async library is currently running.
The following libraries are currently supported:
================ =========== ============================
Library Requires Magic string
================ =========== ============================
**Trio** Trio v0.6+ ``"trio"``
**Curio** - ``"curio"``
**asyncio** ``"asyncio"``
**Trio-asyncio** v0.8.2+ ``"trio"`` or ``"asyncio"``,
depending on current mode
================ =========== ============================
Returns:
A string like ``"trio"``.
Raises:
AsyncLibraryNotFoundError: if called from synchronous context,
or if the current async library was not recognized.
Examples:
.. code-block:: python3
from sniffio import current_async_library
async def generic_sleep(seconds):
library = current_async_library()
if library == "trio":
import trio
await trio.sleep(seconds)
elif library == "asyncio":
import asyncio
await asyncio.sleep(seconds)
# ... and so on ...
else:
raise RuntimeError(f"Unsupported library {library!r}")
"""
value = thread_local.name
if value is not None:
return value
value = current_async_library_cvar.get()
if value is not None:
return value
# Need to sniff for asyncio
if "asyncio" in sys.modules:
import asyncio
try:
current_task = asyncio.current_task # type: ignore[attr-defined]
except AttributeError:
current_task = asyncio.Task.current_task # type: ignore[attr-defined]
try:
if current_task() is not None:
return "asyncio"
except RuntimeError:
pass
# Sniff for curio (for now)
if 'curio' in sys.modules:
from curio.meta import curio_running
if curio_running():
return 'curio'
raise AsyncLibraryNotFoundError(
"unknown async library, or not in async context"
)
| 2,843 | Python | 28.625 | 82 | 0.539923 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/__init__.py | """Top-level package for sniffio."""
__all__ = [
"current_async_library", "AsyncLibraryNotFoundError",
"current_async_library_cvar"
]
from ._version import __version__
from ._impl import (
current_async_library,
AsyncLibraryNotFoundError,
current_async_library_cvar,
thread_local,
)
| 310 | Python | 18.437499 | 57 | 0.680645 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sniffio/_tests/test_sniffio.py | import os
import sys
import pytest
from .. import (
current_async_library, AsyncLibraryNotFoundError,
current_async_library_cvar, thread_local
)
def test_basics_cvar():
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
token = current_async_library_cvar.set("generic-lib")
try:
assert current_async_library() == "generic-lib"
finally:
current_async_library_cvar.reset(token)
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
def test_basics_tlocal():
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
old_name, thread_local.name = thread_local.name, "generic-lib"
try:
assert current_async_library() == "generic-lib"
finally:
thread_local.name = old_name
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
def test_asyncio():
import asyncio
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
ran = []
async def this_is_asyncio():
assert current_async_library() == "asyncio"
# Call it a second time to exercise the caching logic
assert current_async_library() == "asyncio"
ran.append(True)
asyncio.run(this_is_asyncio())
assert ran == [True]
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
# https://github.com/dabeaz/curio/pull/354
@pytest.mark.skipif(
os.name == "nt" and sys.version_info >= (3, 9),
reason="Curio breaks on Python 3.9+ on Windows. Fix was not released yet",
)
def test_curio():
import curio
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
ran = []
async def this_is_curio():
assert current_async_library() == "curio"
# Call it a second time to exercise the caching logic
assert current_async_library() == "curio"
ran.append(True)
curio.run(this_is_curio)
assert ran == [True]
with pytest.raises(AsyncLibraryNotFoundError):
current_async_library()
| 2,110 | Python | 23.835294 | 78 | 0.661137 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pclass.py | from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants)
from pyrsistent._field_common import (
set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
)
from pyrsistent._transformations import transform
def _is_pclass(bases):
return len(bases) == 1 and bases[0] == CheckedType
class PClassMeta(type):
def __new__(mcs, name, bases, dct):
set_fields(dct, bases, name='_pclass_fields')
store_invariants(dct, bases, '_pclass_invariants', '__invariant__')
dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields'])
# There must only be one __weakref__ entry in the inheritance hierarchy,
# lets put it on the top level class.
if _is_pclass(bases):
dct['__slots__'] += ('__weakref__',)
return super(PClassMeta, mcs).__new__(mcs, name, bases, dct)
_MISSING_VALUE = object()
def _check_and_set_attr(cls, field, name, value, result, invariant_errors):
check_type(cls, field, name, value)
is_ok, error_code = field.invariant(value)
if not is_ok:
invariant_errors.append(error_code)
else:
setattr(result, name, value)
class PClass(CheckedType, metaclass=PClassMeta):
"""
A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
is not a PMap and hence not a collection but rather a plain Python object.
More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent
"""
def __new__(cls, **kwargs): # Support *args?
result = super(PClass, cls).__new__(cls)
factory_fields = kwargs.pop('_factory_fields', None)
ignore_extra = kwargs.pop('ignore_extra', None)
missing_fields = []
invariant_errors = []
for name, field in cls._pclass_fields.items():
if name in kwargs:
if factory_fields is None or name in factory_fields:
if is_field_ignore_extra_complaint(PClass, field, ignore_extra):
value = field.factory(kwargs[name], ignore_extra=ignore_extra)
else:
value = field.factory(kwargs[name])
else:
value = kwargs[name]
_check_and_set_attr(cls, field, name, value, result, invariant_errors)
del kwargs[name]
elif field.initial is not PFIELD_NO_INITIAL:
initial = field.initial() if callable(field.initial) else field.initial
_check_and_set_attr(
cls, field, name, initial, result, invariant_errors)
elif field.mandatory:
missing_fields.append('{0}.{1}'.format(cls.__name__, name))
if invariant_errors or missing_fields:
raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed')
if kwargs:
raise AttributeError("'{0}' are not among the specified fields for {1}".format(
', '.join(kwargs), cls.__name__))
check_global_invariants(result, cls._pclass_invariants)
result._pclass_frozen = True
return result
def set(self, *args, **kwargs):
"""
Set a field in the instance. Returns a new instance with the updated value. The original instance remains
unmodified. Accepts key-value pairs or single string representing the field name and a value.
>>> from pyrsistent import PClass, field
>>> class AClass(PClass):
... x = field()
...
>>> a = AClass(x=1)
>>> a2 = a.set(x=2)
>>> a3 = a.set('x', 3)
>>> a
AClass(x=1)
>>> a2
AClass(x=2)
>>> a3
AClass(x=3)
"""
if args:
kwargs[args[0]] = args[1]
factory_fields = set(kwargs)
for key in self._pclass_fields:
if key not in kwargs:
value = getattr(self, key, _MISSING_VALUE)
if value is not _MISSING_VALUE:
kwargs[key] = value
return self.__class__(_factory_fields=factory_fields, **kwargs)
@classmethod
def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
"""
Factory method. Will create a new PClass of the current type and assign the values
specified in kwargs.
:param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
in the set of fields on the PClass.
"""
if isinstance(kwargs, cls):
return kwargs
if ignore_extra:
kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs}
return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs)
def serialize(self, format=None):
"""
Serialize the current PClass using custom serializer functions for fields where
such have been supplied.
"""
result = {}
for name in self._pclass_fields:
value = getattr(self, name, _MISSING_VALUE)
if value is not _MISSING_VALUE:
result[name] = serialize(self._pclass_fields[name].serializer, format, value)
return result
def transform(self, *transformations):
"""
Apply transformations to the currency PClass. For more details on transformations see
the documentation for PMap. Transformations on PClasses do not support key matching
since the PClass is not a collection. Apart from that the transformations available
for other persistent types work as expected.
"""
return transform(self, transformations)
def __eq__(self, other):
if isinstance(other, self.__class__):
for name in self._pclass_fields:
if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE):
return False
return True
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
# May want to optimize this by caching the hash somehow
return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields))
def __setattr__(self, key, value):
if getattr(self, '_pclass_frozen', False):
raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value))
super(PClass, self).__setattr__(key, value)
def __delattr__(self, key):
raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key))
def _to_dict(self):
result = {}
for key in self._pclass_fields:
value = getattr(self, key, _MISSING_VALUE)
if value is not _MISSING_VALUE:
result[key] = value
return result
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items()))
def __reduce__(self):
# Pickling support
data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key))
return _restore_pickle, (self.__class__, data,)
def evolver(self):
"""
Returns an evolver for this object.
"""
return _PClassEvolver(self, self._to_dict())
def remove(self, name):
"""
Remove attribute given by name from the current instance. Raises AttributeError if the
attribute doesn't exist.
"""
evolver = self.evolver()
del evolver[name]
return evolver.persistent()
class _PClassEvolver(object):
__slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields')
def __init__(self, original, initial_dict):
self._pclass_evolver_original = original
self._pclass_evolver_data = initial_dict
self._pclass_evolver_data_is_dirty = False
self._factory_fields = set()
def __getitem__(self, item):
return self._pclass_evolver_data[item]
def set(self, key, value):
if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value:
self._pclass_evolver_data[key] = value
self._factory_fields.add(key)
self._pclass_evolver_data_is_dirty = True
return self
def __setitem__(self, key, value):
self.set(key, value)
def remove(self, item):
if item in self._pclass_evolver_data:
del self._pclass_evolver_data[item]
self._factory_fields.discard(item)
self._pclass_evolver_data_is_dirty = True
return self
raise AttributeError(item)
def __delitem__(self, item):
self.remove(item)
def persistent(self):
if self._pclass_evolver_data_is_dirty:
return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields,
**self._pclass_evolver_data)
return self._pclass_evolver_original
def __setattr__(self, key, value):
if key not in self.__slots__:
self.set(key, value)
else:
super(_PClassEvolver, self).__setattr__(key, value)
def __getattr__(self, item):
return self[item]
| 9,702 | Python | 35.893536 | 120 | 0.591012 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_immutable.py | import sys
def immutable(members='', name='Immutable', verbose=False):
"""
Produces a class that either can be used standalone or as a base class for persistent classes.
This is a thin wrapper around a named tuple.
Constructing a type and using it to instantiate objects:
>>> Point = immutable('x, y', name='Point')
>>> p = Point(1, 2)
>>> p2 = p.set(x=3)
>>> p
Point(x=1, y=2)
>>> p2
Point(x=3, y=2)
Inheriting from a constructed type. In this case no type name needs to be supplied:
>>> class PositivePoint(immutable('x, y')):
... __slots__ = tuple()
... def __new__(cls, x, y):
... if x > 0 and y > 0:
... return super(PositivePoint, cls).__new__(cls, x, y)
... raise Exception('Coordinates must be positive!')
...
>>> p = PositivePoint(1, 2)
>>> p.set(x=3)
PositivePoint(x=3, y=2)
>>> p.set(y=-3)
Traceback (most recent call last):
Exception: Coordinates must be positive!
The persistent class also supports the notion of frozen members. The value of a frozen member
cannot be updated. For example it could be used to implement an ID that should remain the same
over time. A frozen member is denoted by a trailing underscore.
>>> Point = immutable('x, y, id_', name='Point')
>>> p = Point(1, 2, id_=17)
>>> p.set(x=3)
Point(x=3, y=2, id_=17)
>>> p.set(id_=18)
Traceback (most recent call last):
AttributeError: Cannot set frozen members id_
"""
if isinstance(members, str):
members = members.replace(',', ' ').split()
def frozen_member_test():
frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
if frozen_members:
return """
frozen_fields = fields_to_modify & set([{frozen_members}])
if frozen_fields:
raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
""".format(frozen_members=', '.join(frozen_members))
return ''
quoted_members = ', '.join("'%s'" % m for m in members)
template = """
class {class_name}(namedtuple('ImmutableBase', [{quoted_members}])):
__slots__ = tuple()
def __repr__(self):
return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
def set(self, **kwargs):
if not kwargs:
return self
fields_to_modify = set(kwargs.keys())
if not fields_to_modify <= {member_set}:
raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
{frozen_member_test}
return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
""".format(quoted_members=quoted_members,
member_set="set([%s])" % quoted_members if quoted_members else 'set()',
frozen_member_test=frozen_member_test(),
class_name=name)
if verbose:
print(template)
from collections import namedtuple
namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
try:
exec(template, namespace)
except SyntaxError as e:
raise SyntaxError(str(e) + ':\n' + template) from e
return namespace[name]
| 3,287 | Python | 32.55102 | 101 | 0.585336 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pmap.py | from collections.abc import Mapping, Hashable
from itertools import chain
from pyrsistent._pvector import pvector
from pyrsistent._transformations import transform
class PMapView:
"""View type for the persistent map/dict type `PMap`.
Provides an equivalent of Python's built-in `dict_values` and `dict_items`
types that result from expreessions such as `{}.values()` and
`{}.items()`. The equivalent for `{}.keys()` is absent because the keys are
instead represented by a `PSet` object, which can be created in `O(1)` time.
The `PMapView` class is overloaded by the `PMapValues` and `PMapItems`
classes which handle the specific case of values and items, respectively
Parameters
----------
m : mapping
The mapping/dict-like object of which a view is to be created. This
should generally be a `PMap` object.
"""
# The public methods that use the above.
def __init__(self, m):
# Make sure this is a persistnt map
if not isinstance(m, PMap):
# We can convert mapping objects into pmap objects, I guess (but why?)
if isinstance(m, Mapping):
m = pmap(m)
else:
raise TypeError("PViewMap requires a Mapping object")
object.__setattr__(self, '_map', m)
def __len__(self):
return len(self._map)
def __setattr__(self, k, v):
raise TypeError("%s is immutable" % (type(self),))
def __reversed__(self):
raise TypeError("Persistent maps are not reversible")
class PMapValues(PMapView):
"""View type for the values of the persistent map/dict type `PMap`.
Provides an equivalent of Python's built-in `dict_values` type that result
from expreessions such as `{}.values()`. See also `PMapView`.
Parameters
----------
m : mapping
The mapping/dict-like object of which a view is to be created. This
should generally be a `PMap` object.
"""
def __iter__(self):
return self._map.itervalues()
def __contains__(self, arg):
return arg in self._map.itervalues()
# The str and repr methods imitate the dict_view style currently.
def __str__(self):
return f"pmap_values({list(iter(self))})"
def __repr__(self):
return f"pmap_values({list(iter(self))})"
def __eq__(self, x):
# For whatever reason, dict_values always seem to return False for ==
# (probably it's not implemented), so we mimic that.
if x is self: return True
else: return False
class PMapItems(PMapView):
"""View type for the items of the persistent map/dict type `PMap`.
Provides an equivalent of Python's built-in `dict_items` type that result
from expreessions such as `{}.items()`. See also `PMapView`.
Parameters
----------
m : mapping
The mapping/dict-like object of which a view is to be created. This
should generally be a `PMap` object.
"""
def __iter__(self):
return self._map.iteritems()
def __contains__(self, arg):
try: (k,v) = arg
except Exception: return False
return k in self._map and self._map[k] == v
# The str and repr methods mitate the dict_view style currently.
def __str__(self):
return f"pmap_items({list(iter(self))})"
def __repr__(self):
return f"pmap_items({list(iter(self))})"
def __eq__(self, x):
if x is self: return True
elif not isinstance(x, type(self)): return False
else: return self._map == x._map
class PMap(object):
"""
Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible.
Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to
create an instance.
Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer
re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are
hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of
the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid
excessive hash collisions.
This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the
semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
for example assignments and deletion of values.
PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for
element access.
Random access and insert is log32(n) where n is the size of the map.
The following are examples of some common operations on persistent maps
>>> m1 = m(a=1, b=3)
>>> m2 = m1.set('c', 3)
>>> m3 = m2.remove('a')
>>> m1 == {'a': 1, 'b': 3}
True
>>> m2 == {'a': 1, 'b': 3, 'c': 3}
True
>>> m3 == {'b': 3, 'c': 3}
True
>>> m3['c']
3
>>> m3.c
3
"""
__slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash')
def __new__(cls, size, buckets):
self = super(PMap, cls).__new__(cls)
self._size = size
self._buckets = buckets
return self
@staticmethod
def _get_bucket(buckets, key):
index = hash(key) % len(buckets)
bucket = buckets[index]
return index, bucket
@staticmethod
def _getitem(buckets, key):
_, bucket = PMap._get_bucket(buckets, key)
if bucket:
for k, v in bucket:
if k == key:
return v
raise KeyError(key)
def __getitem__(self, key):
return PMap._getitem(self._buckets, key)
@staticmethod
def _contains(buckets, key):
_, bucket = PMap._get_bucket(buckets, key)
if bucket:
for k, _ in bucket:
if k == key:
return True
return False
return False
def __contains__(self, key):
return self._contains(self._buckets, key)
get = Mapping.get
def __iter__(self):
return self.iterkeys()
# If this method is not defined, then reversed(pmap) will attempt to reverse
# the map using len() and getitem, usually resulting in a mysterious
# KeyError.
def __reversed__(self):
raise TypeError("Persistent maps are not reversible")
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(
"{0} has no attribute '{1}'".format(type(self).__name__, key)
) from e
def iterkeys(self):
for k, _ in self.iteritems():
yield k
# These are more efficient implementations compared to the original
# methods that are based on the keys iterator and then calls the
# accessor functions to access the value for the corresponding key
def itervalues(self):
for _, v in self.iteritems():
yield v
def iteritems(self):
for bucket in self._buckets:
if bucket:
for k, v in bucket:
yield k, v
def values(self):
return PMapValues(self)
def keys(self):
from ._pset import PSet
return PSet(self)
def items(self):
return PMapItems(self)
def __len__(self):
return self._size
def __repr__(self):
return 'pmap({0})'.format(str(dict(self)))
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Mapping):
return NotImplemented
if len(self) != len(other):
return False
if isinstance(other, PMap):
if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash')
and self._cached_hash != other._cached_hash):
return False
if self._buckets == other._buckets:
return True
return dict(self.iteritems()) == dict(other.iteritems())
elif isinstance(other, dict):
return dict(self.iteritems()) == other
return dict(self.iteritems()) == dict(other.items())
__ne__ = Mapping.__ne__
def __lt__(self, other):
raise TypeError('PMaps are not orderable')
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
def __str__(self):
return self.__repr__()
def __hash__(self):
if not hasattr(self, '_cached_hash'):
self._cached_hash = hash(frozenset(self.iteritems()))
return self._cached_hash
def set(self, key, val):
"""
Return a new PMap with key and val inserted.
>>> m1 = m(a=1, b=2)
>>> m2 = m1.set('a', 3)
>>> m3 = m1.set('c' ,4)
>>> m1 == {'a': 1, 'b': 2}
True
>>> m2 == {'a': 3, 'b': 2}
True
>>> m3 == {'a': 1, 'b': 2, 'c': 4}
True
"""
return self.evolver().set(key, val).persistent()
def remove(self, key):
"""
Return a new PMap without the element specified by key. Raises KeyError if the element
is not present.
>>> m1 = m(a=1, b=2)
>>> m1.remove('a')
pmap({'b': 2})
"""
return self.evolver().remove(key).persistent()
def discard(self, key):
"""
Return a new PMap without the element specified by key. Returns reference to itself
if element is not present.
>>> m1 = m(a=1, b=2)
>>> m1.discard('a')
pmap({'b': 2})
>>> m1 is m1.discard('c')
True
"""
try:
return self.remove(key)
except KeyError:
return self
def update(self, *maps):
"""
Return a new PMap with the items in Mappings inserted. If the same key is present in multiple
maps the rightmost (last) value is inserted.
>>> m1 = m(a=1, b=2)
>>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35}) == {'a': 17, 'b': 2, 'c': 3, 'd': 35}
True
"""
return self.update_with(lambda l, r: r, *maps)
def update_with(self, update_fn, *maps):
"""
Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple
maps the values will be merged using merge_fn going from left to right.
>>> from operator import add
>>> m1 = m(a=1, b=2)
>>> m1.update_with(add, m(a=2)) == {'a': 3, 'b': 2}
True
The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost.
>>> m1 = m(a=1)
>>> m1.update_with(lambda l, r: l, m(a=2), {'a':3})
pmap({'a': 1})
"""
evolver = self.evolver()
for map in maps:
for key, value in map.items():
evolver.set(key, update_fn(evolver[key], value) if key in evolver else value)
return evolver.persistent()
def __add__(self, other):
return self.update(other)
__or__ = __add__
def __reduce__(self):
# Pickling support
return pmap, (dict(self),)
def transform(self, *transformations):
"""
Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
consists of two parts. One match expression that specifies which elements to transform
and one transformation function that performs the actual transformation.
>>> from pyrsistent import freeze, ny
>>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
... {'author': 'Steve', 'content': 'A slightly longer article'}],
... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
>>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
>>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
>>> very_short_news.articles[0].content
'A short article'
>>> very_short_news.articles[1].content
'A slightly long...'
When nothing has been transformed the original data structure is kept
>>> short_news is news_paper
True
>>> very_short_news is news_paper
False
>>> very_short_news.articles[0] is news_paper.articles[0]
True
"""
return transform(self, transformations)
def copy(self):
return self
class _Evolver(object):
__slots__ = ('_buckets_evolver', '_size', '_original_pmap')
def __init__(self, original_pmap):
self._original_pmap = original_pmap
self._buckets_evolver = original_pmap._buckets.evolver()
self._size = original_pmap._size
def __getitem__(self, key):
return PMap._getitem(self._buckets_evolver, key)
def __setitem__(self, key, val):
self.set(key, val)
def set(self, key, val):
kv = (key, val)
index, bucket = PMap._get_bucket(self._buckets_evolver, key)
reallocation_required = len(self._buckets_evolver) < 0.67 * self._size
if bucket:
for k, v in bucket:
if k == key:
if v is not val:
new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket]
self._buckets_evolver[index] = new_bucket
return self
# Only check and perform reallocation if not replacing an existing value.
# This is a performance tweak, see #247.
if reallocation_required:
self._reallocate()
return self.set(key, val)
new_bucket = [kv]
new_bucket.extend(bucket)
self._buckets_evolver[index] = new_bucket
self._size += 1
else:
if reallocation_required:
self._reallocate()
return self.set(key, val)
self._buckets_evolver[index] = [kv]
self._size += 1
return self
def _reallocate(self):
new_size = 2 * len(self._buckets_evolver)
new_list = new_size * [None]
buckets = self._buckets_evolver.persistent()
for k, v in chain.from_iterable(x for x in buckets if x):
index = hash(k) % new_size
if new_list[index]:
new_list[index].append((k, v))
else:
new_list[index] = [(k, v)]
# A reallocation should always result in a dirty buckets evolver to avoid
# possible loss of elements when doing the reallocation.
self._buckets_evolver = pvector().evolver()
self._buckets_evolver.extend(new_list)
def is_dirty(self):
return self._buckets_evolver.is_dirty()
def persistent(self):
if self.is_dirty():
self._original_pmap = PMap(self._size, self._buckets_evolver.persistent())
return self._original_pmap
def __len__(self):
return self._size
def __contains__(self, key):
return PMap._contains(self._buckets_evolver, key)
def __delitem__(self, key):
self.remove(key)
def remove(self, key):
index, bucket = PMap._get_bucket(self._buckets_evolver, key)
if bucket:
new_bucket = [(k, v) for (k, v) in bucket if k != key]
if len(bucket) > len(new_bucket):
self._buckets_evolver[index] = new_bucket if new_bucket else None
self._size -= 1
return self
raise KeyError('{0}'.format(key))
def evolver(self):
"""
Create a new evolver for this pmap. For a discussion on evolvers in general see the
documentation for the pvector evolver.
Create the evolver and perform various mutating updates to it:
>>> m1 = m(a=1, b=2)
>>> e = m1.evolver()
>>> e['c'] = 3
>>> len(e)
3
>>> del e['a']
The underlying pmap remains the same:
>>> m1 == {'a': 1, 'b': 2}
True
The changes are kept in the evolver. An updated pmap can be created using the
persistent() function on the evolver.
>>> m2 = e.persistent()
>>> m2 == {'b': 2, 'c': 3}
True
The new pmap will share data with the original pmap in the same way that would have
been done if only using operations on the pmap.
"""
return self._Evolver(self)
Mapping.register(PMap)
Hashable.register(PMap)
def _turbo_mapping(initial, pre_size):
if pre_size:
size = pre_size
else:
try:
size = 2 * len(initial) or 8
except Exception:
# Guess we can't figure out the length. Give up on length hinting,
# we can always reallocate later.
size = 8
buckets = size * [None]
if not isinstance(initial, Mapping):
# Make a dictionary of the initial data if it isn't already,
# that will save us some job further down since we can assume no
# key collisions
initial = dict(initial)
for k, v in initial.items():
h = hash(k)
index = h % size
bucket = buckets[index]
if bucket:
bucket.append((k, v))
else:
buckets[index] = [(k, v)]
return PMap(len(initial), pvector().extend(buckets))
_EMPTY_PMAP = _turbo_mapping({}, 0)
def pmap(initial={}, pre_size=0):
"""
Create new persistent map, inserts all elements in initial into the newly created map.
The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This
may have a positive performance impact in the cases where you know beforehand that a large number of elements
will be inserted into the map eventually since it will reduce the number of reallocations required.
>>> pmap({'a': 13, 'b': 14}) == {'a': 13, 'b': 14}
True
"""
if not initial and pre_size == 0:
return _EMPTY_PMAP
return _turbo_mapping(initial, pre_size)
def m(**kwargs):
"""
Creates a new persistent map. Inserts all key value arguments into the newly created map.
>>> m(a=13, b=14) == {'a': 13, 'b': 14}
True
"""
return pmap(kwargs)
| 18,781 | Python | 31.551126 | 127 | 0.559235 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pbag.py | from collections.abc import Container, Iterable, Sized, Hashable
from functools import reduce
from pyrsistent._pmap import pmap
def _add_to_counters(counters, element):
return counters.set(element, counters.get(element, 0) + 1)
class PBag(object):
"""
A persistent bag/multiset type.
Requires elements to be hashable, and allows duplicates, but has no
ordering. Bags are hashable.
Do not instantiate directly, instead use the factory functions :py:func:`b`
or :py:func:`pbag` to create an instance.
Some examples:
>>> s = pbag([1, 2, 3, 1])
>>> s2 = s.add(4)
>>> s3 = s2.remove(1)
>>> s
pbag([1, 1, 2, 3])
>>> s2
pbag([1, 1, 2, 3, 4])
>>> s3
pbag([1, 2, 3, 4])
"""
__slots__ = ('_counts', '__weakref__')
def __init__(self, counts):
self._counts = counts
def add(self, element):
"""
Add an element to the bag.
>>> s = pbag([1])
>>> s2 = s.add(1)
>>> s3 = s.add(2)
>>> s2
pbag([1, 1])
>>> s3
pbag([1, 2])
"""
return PBag(_add_to_counters(self._counts, element))
def update(self, iterable):
"""
Update bag with all elements in iterable.
>>> s = pbag([1])
>>> s.update([1, 2])
pbag([1, 1, 2])
"""
if iterable:
return PBag(reduce(_add_to_counters, iterable, self._counts))
return self
def remove(self, element):
"""
Remove an element from the bag.
>>> s = pbag([1, 1, 2])
>>> s2 = s.remove(1)
>>> s3 = s.remove(2)
>>> s2
pbag([1, 2])
>>> s3
pbag([1, 1])
"""
if element not in self._counts:
raise KeyError(element)
elif self._counts[element] == 1:
newc = self._counts.remove(element)
else:
newc = self._counts.set(element, self._counts[element] - 1)
return PBag(newc)
def count(self, element):
"""
Return the number of times an element appears.
>>> pbag([]).count('non-existent')
0
>>> pbag([1, 1, 2]).count(1)
2
"""
return self._counts.get(element, 0)
def __len__(self):
"""
Return the length including duplicates.
>>> len(pbag([1, 1, 2]))
3
"""
return sum(self._counts.itervalues())
def __iter__(self):
"""
Return an iterator of all elements, including duplicates.
>>> list(pbag([1, 1, 2]))
[1, 1, 2]
>>> list(pbag([1, 2]))
[1, 2]
"""
for elt, count in self._counts.iteritems():
for i in range(count):
yield elt
def __contains__(self, elt):
"""
Check if an element is in the bag.
>>> 1 in pbag([1, 1, 2])
True
>>> 0 in pbag([1, 2])
False
"""
return elt in self._counts
def __repr__(self):
return "pbag({0})".format(list(self))
def __eq__(self, other):
"""
Check if two bags are equivalent, honoring the number of duplicates,
and ignoring insertion order.
>>> pbag([1, 1, 2]) == pbag([1, 2])
False
>>> pbag([2, 1, 0]) == pbag([0, 1, 2])
True
"""
if type(other) is not PBag:
raise TypeError("Can only compare PBag with PBags")
return self._counts == other._counts
def __lt__(self, other):
raise TypeError('PBags are not orderable')
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
# Multiset-style operations similar to collections.Counter
def __add__(self, other):
"""
Combine elements from two PBags.
>>> pbag([1, 2, 2]) + pbag([2, 3, 3])
pbag([1, 2, 2, 2, 3, 3])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
result[elem] = self.count(elem) + other_count
return PBag(result.persistent())
def __sub__(self, other):
"""
Remove elements from one PBag that are present in another.
>>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4])
pbag([1, 2, 2])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
newcount = self.count(elem) - other_count
if newcount > 0:
result[elem] = newcount
elif elem in self:
result.remove(elem)
return PBag(result.persistent())
def __or__(self, other):
"""
Union: Keep elements that are present in either of two PBags.
>>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3])
pbag([1, 2, 2, 2, 3, 3])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
count = self.count(elem)
newcount = max(count, other_count)
result[elem] = newcount
return PBag(result.persistent())
def __and__(self, other):
"""
Intersection: Only keep elements that are present in both PBags.
>>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3])
pbag([2])
"""
if not isinstance(other, PBag):
return NotImplemented
result = pmap().evolver()
for elem, count in self._counts.iteritems():
newcount = min(count, other.count(elem))
if newcount > 0:
result[elem] = newcount
return PBag(result.persistent())
def __hash__(self):
"""
Hash based on value of elements.
>>> m = pmap({pbag([1, 2]): "it's here!"})
>>> m[pbag([2, 1])]
"it's here!"
>>> pbag([1, 1, 2]) in m
False
"""
return hash(self._counts)
Container.register(PBag)
Iterable.register(PBag)
Sized.register(PBag)
Hashable.register(PBag)
def b(*elements):
"""
Construct a persistent bag.
Takes an arbitrary number of arguments to insert into the new persistent
bag.
>>> b(1, 2, 3, 2)
pbag([1, 2, 2, 3])
"""
return pbag(elements)
def pbag(elements):
"""
Convert an iterable to a persistent bag.
Takes an iterable with elements to insert.
>>> pbag([1, 2, 3, 2])
pbag([1, 2, 2, 3])
"""
if not elements:
return _EMPTY_PBAG
return PBag(reduce(_add_to_counters, elements, pmap()))
_EMPTY_PBAG = PBag(pmap())
| 6,730 | Python | 24.115672 | 79 | 0.504903 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_checked_types.py | from enum import Enum
from abc import abstractmethod, ABCMeta
from collections.abc import Iterable
from pyrsistent._pmap import PMap, pmap
from pyrsistent._pset import PSet, pset
from pyrsistent._pvector import PythonPVector, python_pvector
class CheckedType(object):
"""
Marker class to enable creation and serialization of checked object graphs.
"""
__slots__ = ()
@classmethod
@abstractmethod
def create(cls, source_data, _factory_fields=None):
raise NotImplementedError()
@abstractmethod
def serialize(self, format=None):
raise NotImplementedError()
def _restore_pickle(cls, data):
return cls.create(data, _factory_fields=set())
class InvariantException(Exception):
"""
Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory
field is missing.
Contains two fields of interest:
invariant_errors, a tuple of error data for the failing invariants
missing_fields, a tuple of strings specifying the missing names
"""
def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs):
self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes)
self.missing_fields = missing_fields
super(InvariantException, self).__init__(*args, **kwargs)
def __str__(self):
return super(InvariantException, self).__str__() + \
", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format(
invariant_errors=', '.join(str(e) for e in self.invariant_errors),
missing_fields=', '.join(self.missing_fields))
_preserved_iterable_types = (
Enum,
)
"""Some types are themselves iterable, but we want to use the type itself and
not its members for the type specification. This defines a set of such types
that we explicitly preserve.
Note that strings are not such types because the string inputs we pass in are
values, not types.
"""
def maybe_parse_user_type(t):
"""Try to coerce a user-supplied type directive into a list of types.
This function should be used in all places where a user specifies a type,
for consistency.
The policy for what defines valid user input should be clear from the implementation.
"""
is_type = isinstance(t, type)
is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types)
is_string = isinstance(t, str)
is_iterable = isinstance(t, Iterable)
if is_preserved:
return [t]
elif is_string:
return [t]
elif is_type and not is_iterable:
return [t]
elif is_iterable:
# Recur to validate contained types as well.
ts = t
return tuple(e for t in ts for e in maybe_parse_user_type(t))
else:
# If this raises because `t` cannot be formatted, so be it.
raise TypeError(
'Type specifications must be types or strings. Input: {}'.format(t)
)
def maybe_parse_many_user_types(ts):
# Just a different name to communicate that you're parsing multiple user
# inputs. `maybe_parse_user_type` handles the iterable case anyway.
return maybe_parse_user_type(ts)
def _store_types(dct, bases, destination_name, source_name):
maybe_types = maybe_parse_many_user_types([
d[source_name]
for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d
])
dct[destination_name] = maybe_types
def _merge_invariant_results(result):
verdict = True
data = []
for verd, dat in result:
if not verd:
verdict = False
data.append(dat)
return verdict, tuple(data)
def wrap_invariant(invariant):
# Invariant functions may return the outcome of several tests
# In those cases the results have to be merged before being passed
# back to the client.
def f(*args, **kwargs):
result = invariant(*args, **kwargs)
if isinstance(result[0], bool):
return result
return _merge_invariant_results(result)
return f
def _all_dicts(bases, seen=None):
"""
Yield each class in ``bases`` and each of their base classes.
"""
if seen is None:
seen = set()
for cls in bases:
if cls in seen:
continue
seen.add(cls)
yield cls.__dict__
for b in _all_dicts(cls.__bases__, seen):
yield b
def store_invariants(dct, bases, destination_name, source_name):
# Invariants are inherited
invariants = []
for ns in [dct] + list(_all_dicts(bases)):
try:
invariant = ns[source_name]
except KeyError:
continue
invariants.append(invariant)
if not all(callable(invariant) for invariant in invariants):
raise TypeError('Invariants must be callable')
dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants)
class _CheckedTypeMeta(ABCMeta):
def __new__(mcs, name, bases, dct):
_store_types(dct, bases, '_checked_types', '__type__')
store_invariants(dct, bases, '_checked_invariants', '__invariant__')
def default_serializer(self, _, value):
if isinstance(value, CheckedType):
return value.serialize()
return value
dct.setdefault('__serializer__', default_serializer)
dct['__slots__'] = ()
return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct)
class CheckedTypeError(TypeError):
def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs):
super(CheckedTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.expected_types = expected_types
self.actual_type = actual_type
self.actual_value = actual_value
class CheckedKeyTypeError(CheckedTypeError):
"""
Raised when trying to set a value using a key with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the collection
expected_types -- Allowed types
actual_type -- The non matching type
actual_value -- Value of the variable with the non matching type
"""
pass
class CheckedValueTypeError(CheckedTypeError):
"""
Raised when trying to set a value using a key with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the collection
expected_types -- Allowed types
actual_type -- The non matching type
actual_value -- Value of the variable with the non matching type
"""
pass
def _get_class(type_name):
module_name, class_name = type_name.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
return getattr(module, class_name)
def get_type(typ):
if isinstance(typ, type):
return typ
return _get_class(typ)
def get_types(typs):
return [get_type(typ) for typ in typs]
def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError):
if expected_types:
for e in it:
if not any(isinstance(e, get_type(t)) for t in expected_types):
actual_type = type(e)
msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format(
source_class=source_class.__name__,
expected_types=tuple(get_type(et).__name__ for et in expected_types),
actual_type=actual_type.__name__)
raise exception_type(source_class, expected_types, actual_type, e, msg)
def _invariant_errors(elem, invariants):
return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid]
def _invariant_errors_iterable(it, invariants):
return sum([_invariant_errors(elem, invariants) for elem in it], [])
def optional(*typs):
""" Convenience function to specify that a value may be of any of the types in type 'typs' or None """
return tuple(typs) + (type(None),)
def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False):
if isinstance(source_data, cls):
return source_data
# Recursively apply create methods of checked types if the types of the supplied data
# does not match any of the valid types.
types = get_types(cls._checked_types)
checked_type = next((t for t in types if issubclass(t, CheckedType)), None)
if checked_type:
return cls([checked_type.create(data, ignore_extra=ignore_extra)
if not any(isinstance(data, t) for t in types) else data
for data in source_data])
return cls(source_data)
class CheckedPVector(PythonPVector, CheckedType, metaclass=_CheckedTypeMeta):
"""
A CheckedPVector is a PVector which allows specifying type and invariant checks.
>>> class Positives(CheckedPVector):
... __type__ = (int, float)
... __invariant__ = lambda n: (n >= 0, 'Negative')
...
>>> Positives([1, 2, 3])
Positives([1, 2, 3])
"""
__slots__ = ()
def __new__(cls, initial=()):
if type(initial) == PythonPVector:
return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail)
return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent()
def set(self, key, value):
return self.evolver().set(key, value).persistent()
def append(self, val):
return self.evolver().append(val).persistent()
def extend(self, it):
return self.evolver().extend(it).persistent()
create = classmethod(_checked_type_create)
def serialize(self, format=None):
serializer = self.__serializer__
return list(serializer(format, v) for v in self)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, list(self),)
class Evolver(PythonPVector.Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, vector):
super(CheckedPVector.Evolver, self).__init__(vector)
self._destination_class = destination_class
self._invariant_errors = []
def _check(self, it):
_check_types(it, self._destination_class._checked_types, self._destination_class)
error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
self._invariant_errors.extend(error_data)
def __setitem__(self, key, value):
self._check([value])
return super(CheckedPVector.Evolver, self).__setitem__(key, value)
def append(self, elem):
self._check([elem])
return super(CheckedPVector.Evolver, self).append(elem)
def extend(self, it):
it = list(it)
self._check(it)
return super(CheckedPVector.Evolver, self).extend(it)
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
result = self._orig_pvector
if self.is_dirty() or (self._destination_class != type(self._orig_pvector)):
pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail)
result = self._destination_class(pv)
self._reset(result)
return result
def __repr__(self):
return self.__class__.__name__ + "({0})".format(self.tolist())
__str__ = __repr__
def evolver(self):
return CheckedPVector.Evolver(self.__class__, self)
class CheckedPSet(PSet, CheckedType, metaclass=_CheckedTypeMeta):
"""
A CheckedPSet is a PSet which allows specifying type and invariant checks.
>>> class Positives(CheckedPSet):
... __type__ = (int, float)
... __invariant__ = lambda n: (n >= 0, 'Negative')
...
>>> Positives([1, 2, 3])
Positives([1, 2, 3])
"""
__slots__ = ()
def __new__(cls, initial=()):
if type(initial) is PMap:
return super(CheckedPSet, cls).__new__(cls, initial)
evolver = CheckedPSet.Evolver(cls, pset())
for e in initial:
evolver.add(e)
return evolver.persistent()
def __repr__(self):
return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:]
def __str__(self):
return self.__repr__()
def serialize(self, format=None):
serializer = self.__serializer__
return set(serializer(format, v) for v in self)
create = classmethod(_checked_type_create)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, list(self),)
def evolver(self):
return CheckedPSet.Evolver(self.__class__, self)
class Evolver(PSet._Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, original_set):
super(CheckedPSet.Evolver, self).__init__(original_set)
self._destination_class = destination_class
self._invariant_errors = []
def _check(self, it):
_check_types(it, self._destination_class._checked_types, self._destination_class)
error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
self._invariant_errors.extend(error_data)
def add(self, element):
self._check([element])
self._pmap_evolver[element] = True
return self
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
if self.is_dirty() or self._destination_class != type(self._original_pset):
return self._destination_class(self._pmap_evolver.persistent())
return self._original_pset
class _CheckedMapTypeMeta(type):
def __new__(mcs, name, bases, dct):
_store_types(dct, bases, '_checked_key_types', '__key_type__')
_store_types(dct, bases, '_checked_value_types', '__value_type__')
store_invariants(dct, bases, '_checked_invariants', '__invariant__')
def default_serializer(self, _, key, value):
sk = key
if isinstance(key, CheckedType):
sk = key.serialize()
sv = value
if isinstance(value, CheckedType):
sv = value.serialize()
return sk, sv
dct.setdefault('__serializer__', default_serializer)
dct['__slots__'] = ()
return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct)
# Marker object
_UNDEFINED_CHECKED_PMAP_SIZE = object()
class CheckedPMap(PMap, CheckedType, metaclass=_CheckedMapTypeMeta):
"""
A CheckedPMap is a PMap which allows specifying type and invariant checks.
>>> class IntToFloatMap(CheckedPMap):
... __key_type__ = int
... __value_type__ = float
... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping')
...
>>> IntToFloatMap({1: 1.5, 2: 2.25})
IntToFloatMap({1: 1.5, 2: 2.25})
"""
__slots__ = ()
def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE):
if size is not _UNDEFINED_CHECKED_PMAP_SIZE:
return super(CheckedPMap, cls).__new__(cls, size, initial)
evolver = CheckedPMap.Evolver(cls, pmap())
for k, v in initial.items():
evolver.set(k, v)
return evolver.persistent()
def evolver(self):
return CheckedPMap.Evolver(self.__class__, self)
def __repr__(self):
return self.__class__.__name__ + "({0})".format(str(dict(self)))
__str__ = __repr__
def serialize(self, format=None):
serializer = self.__serializer__
return dict(serializer(format, k, v) for k, v in self.items())
@classmethod
def create(cls, source_data, _factory_fields=None):
if isinstance(source_data, cls):
return source_data
# Recursively apply create methods of checked types if the types of the supplied data
# does not match any of the valid types.
key_types = get_types(cls._checked_key_types)
checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None)
value_types = get_types(cls._checked_value_types)
checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None)
if checked_key_type or checked_value_type:
return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key,
checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value)
for key, value in source_data.items()))
return cls(source_data)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, dict(self),)
class Evolver(PMap._Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, original_map):
super(CheckedPMap.Evolver, self).__init__(original_map)
self._destination_class = destination_class
self._invariant_errors = []
def set(self, key, value):
_check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError)
_check_types([value], self._destination_class._checked_value_types, self._destination_class)
self._invariant_errors.extend(data for valid, data in (invariant(key, value)
for invariant in self._destination_class._checked_invariants)
if not valid)
return super(CheckedPMap.Evolver, self).set(key, value)
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
if self.is_dirty() or type(self._original_pmap) != self._destination_class:
return self._destination_class(self._buckets_evolver.persistent(), self._size)
return self._original_pmap
| 18,372 | Python | 32.836096 | 150 | 0.617897 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_plist.py | from collections.abc import Sequence, Hashable
from numbers import Integral
from functools import reduce
class _PListBuilder(object):
"""
Helper class to allow construction of a list without
having to reverse it in the end.
"""
__slots__ = ('_head', '_tail')
def __init__(self):
self._head = _EMPTY_PLIST
self._tail = _EMPTY_PLIST
def _append(self, elem, constructor):
if not self._tail:
self._head = constructor(elem)
self._tail = self._head
else:
self._tail.rest = constructor(elem)
self._tail = self._tail.rest
return self._head
def append_elem(self, elem):
return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
def append_plist(self, pl):
return self._append(pl, lambda l: l)
def build(self):
return self._head
class _PListBase(object):
__slots__ = ('__weakref__',)
# Selected implementations can be taken straight from the Sequence
# class, other are less suitable. Especially those that work with
# index lookups.
count = Sequence.count
index = Sequence.index
def __reduce__(self):
# Pickling support
return plist, (list(self),)
def __len__(self):
"""
Return the length of the list, computed by traversing it.
This is obviously O(n) but with the current implementation
where a list is also a node the overhead of storing the length
in every node would be quite significant.
"""
return sum(1 for _ in self)
def __repr__(self):
return "plist({0})".format(list(self))
__str__ = __repr__
def cons(self, elem):
"""
Return a new list with elem inserted as new head.
>>> plist([1, 2]).cons(3)
plist([3, 1, 2])
"""
return PList(elem, self)
def mcons(self, iterable):
"""
Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2])
"""
head = self
for elem in iterable:
head = head.cons(elem)
return head
def reverse(self):
"""
Return a reversed version of list. Runs in O(n) where n is the length of the list.
>>> plist([1, 2, 3]).reverse()
plist([3, 2, 1])
Also supports the standard reversed function.
>>> reversed(plist([1, 2, 3]))
plist([3, 2, 1])
"""
result = plist()
head = self
while head:
result = result.cons(head.first)
head = head.rest
return result
__reversed__ = reverse
def split(self, index):
"""
Spilt the list at position specified by index. Returns a tuple containing the
list up until index and the list after the index. Runs in O(index).
>>> plist([1, 2, 3, 4]).split(2)
(plist([1, 2]), plist([3, 4]))
"""
lb = _PListBuilder()
right_list = self
i = 0
while right_list and i < index:
lb.append_elem(right_list.first)
right_list = right_list.rest
i += 1
if not right_list:
# Just a small optimization in the cases where no split occurred
return self, _EMPTY_PLIST
return lb.build(), right_list
def __iter__(self):
li = self
while li:
yield li.first
li = li.rest
def __lt__(self, other):
if not isinstance(other, _PListBase):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
"""
Traverses the lists, checking equality of elements.
This is an O(n) operation, but preserves the standard semantics of list equality.
"""
if not isinstance(other, _PListBase):
return NotImplemented
self_head = self
other_head = other
while self_head and other_head:
if not self_head.first == other_head.first:
return False
self_head = self_head.rest
other_head = other_head.rest
return not self_head and not other_head
def __getitem__(self, index):
# Don't use this this data structure if you plan to do a lot of indexing, it is
# very inefficient! Use a PVector instead!
if isinstance(index, slice):
if index.start is not None and index.stop is None and (index.step is None or index.step == 1):
return self._drop(index.start)
# Take the easy way out for all other slicing cases, not much structural reuse possible anyway
return plist(tuple(self)[index])
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
# NB: O(n)!
index += len(self)
try:
return self._drop(index).first
except AttributeError as e:
raise IndexError("PList index out of range") from e
def _drop(self, count):
if count < 0:
raise IndexError("PList index out of range")
head = self
while count > 0:
head = head.rest
count -= 1
return head
def __hash__(self):
return hash(tuple(self))
def remove(self, elem):
"""
Return new list with first element equal to elem removed. O(k) where k is the position
of the element that is removed.
Raises ValueError if no matching element is found.
>>> plist([1, 2, 1]).remove(1)
plist([2, 1])
"""
builder = _PListBuilder()
head = self
while head:
if head.first == elem:
return builder.append_plist(head.rest)
builder.append_elem(head.first)
head = head.rest
raise ValueError('{0} not found in PList'.format(elem))
class PList(_PListBase):
"""
Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
Element access is O(k) where k is the position of the element in the list. Taking the
length of the list is O(n).
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
create an instance.
Some examples:
>>> x = plist([1, 2])
>>> y = x.cons(3)
>>> x
plist([1, 2])
>>> y
plist([3, 1, 2])
>>> y.first
3
>>> y.rest == x
True
>>> y[:2]
plist([3, 1])
"""
__slots__ = ('first', 'rest')
def __new__(cls, first, rest):
instance = super(PList, cls).__new__(cls)
instance.first = first
instance.rest = rest
return instance
def __bool__(self):
return True
__nonzero__ = __bool__
Sequence.register(PList)
Hashable.register(PList)
class _EmptyPList(_PListBase):
__slots__ = ()
def __bool__(self):
return False
__nonzero__ = __bool__
@property
def first(self):
raise AttributeError("Empty PList has no first")
@property
def rest(self):
return self
Sequence.register(_EmptyPList)
Hashable.register(_EmptyPList)
_EMPTY_PLIST = _EmptyPList()
def plist(iterable=(), reverse=False):
"""
Creates a new persistent list containing all elements of iterable.
Optional parameter reverse specifies if the elements should be inserted in
reverse order or not.
>>> plist([1, 2, 3])
plist([1, 2, 3])
>>> plist([1, 2, 3], reverse=True)
plist([3, 2, 1])
"""
if not reverse:
iterable = list(iterable)
iterable.reverse()
return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
def l(*elements):
"""
Creates a new persistent list containing all arguments.
>>> l(1, 2, 3)
plist([1, 2, 3])
"""
return plist(elements)
| 8,293 | Python | 25.414013 | 106 | 0.566381 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_helpers.py | from functools import wraps
from pyrsistent._pmap import PMap, pmap
from pyrsistent._pset import PSet, pset
from pyrsistent._pvector import PVector, pvector
def freeze(o, strict=True):
"""
Recursively convert simple Python containers into pyrsistent versions
of those containers.
- list is converted to pvector, recursively
- dict is converted to pmap, recursively on values (but not keys)
- set is converted to pset, but not recursively
- tuple is converted to tuple, recursively.
If strict == True (default):
- freeze is called on elements of pvectors
- freeze is called on values of pmaps
Sets and dict keys are not recursively frozen because they do not contain
mutable data by convention. The main exception to this rule is that
dict keys and set elements are often instances of mutable objects that
support hash-by-id, which this function can't convert anyway.
>>> freeze(set([1, 2]))
pset([1, 2])
>>> freeze([1, {'a': 3}])
pvector([1, pmap({'a': 3})])
>>> freeze((1, []))
(1, pvector([]))
"""
typ = type(o)
if typ is dict or (strict and isinstance(o, PMap)):
return pmap({k: freeze(v, strict) for k, v in o.items()})
if typ is list or (strict and isinstance(o, PVector)):
curried_freeze = lambda x: freeze(x, strict)
return pvector(map(curried_freeze, o))
if typ is tuple:
curried_freeze = lambda x: freeze(x, strict)
return tuple(map(curried_freeze, o))
if typ is set:
# impossible to have anything that needs freezing inside a set or pset
return pset(o)
return o
def thaw(o, strict=True):
"""
Recursively convert pyrsistent containers into simple Python containers.
- pvector is converted to list, recursively
- pmap is converted to dict, recursively on values (but not keys)
- pset is converted to set, but not recursively
- tuple is converted to tuple, recursively.
If strict == True (the default):
- thaw is called on elements of lists
- thaw is called on values in dicts
>>> from pyrsistent import s, m, v
>>> thaw(s(1, 2))
{1, 2}
>>> thaw(v(1, m(a=3)))
[1, {'a': 3}]
>>> thaw((1, v()))
(1, [])
"""
typ = type(o)
if isinstance(o, PVector) or (strict and typ is list):
curried_thaw = lambda x: thaw(x, strict)
return list(map(curried_thaw, o))
if isinstance(o, PMap) or (strict and typ is dict):
return {k: thaw(v, strict) for k, v in o.items()}
if typ is tuple:
curried_thaw = lambda x: thaw(x, strict)
return tuple(map(curried_thaw, o))
if isinstance(o, PSet):
# impossible to thaw inside psets or sets
return set(o)
return o
def mutant(fn):
"""
Convenience decorator to isolate mutation to within the decorated function (with respect
to the input arguments).
All arguments to the decorated function will be frozen so that they are guaranteed not to change.
The return value is also frozen.
"""
@wraps(fn)
def inner_f(*args, **kwargs):
return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items())))
return inner_f
| 3,232 | Python | 31.989796 | 102 | 0.641708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/__init__.py | # -*- coding: utf-8 -*-
from pyrsistent._pmap import pmap, m, PMap
from pyrsistent._pvector import pvector, v, PVector
from pyrsistent._pset import pset, s, PSet
from pyrsistent._pbag import pbag, b, PBag
from pyrsistent._plist import plist, l, PList
from pyrsistent._pdeque import pdeque, dq, PDeque
from pyrsistent._checked_types import (
CheckedPMap, CheckedPVector, CheckedPSet, InvariantException, CheckedKeyTypeError,
CheckedValueTypeError, CheckedType, optional)
from pyrsistent._field_common import (
field, PTypeError, pset_field, pmap_field, pvector_field)
from pyrsistent._precord import PRecord
from pyrsistent._pclass import PClass, PClassMeta
from pyrsistent._immutable import immutable
from pyrsistent._helpers import freeze, thaw, mutant
from pyrsistent._transformations import inc, discard, rex, ny
from pyrsistent._toolz import get_in
__all__ = ('pmap', 'm', 'PMap',
'pvector', 'v', 'PVector',
'pset', 's', 'PSet',
'pbag', 'b', 'PBag',
'plist', 'l', 'PList',
'pdeque', 'dq', 'PDeque',
'CheckedPMap', 'CheckedPVector', 'CheckedPSet', 'InvariantException', 'CheckedKeyTypeError', 'CheckedValueTypeError', 'CheckedType', 'optional',
'PRecord', 'field', 'pset_field', 'pmap_field', 'pvector_field',
'PClass', 'PClassMeta',
'immutable',
'freeze', 'thaw', 'mutant',
'get_in',
'inc', 'discard', 'rex', 'ny')
| 1,479 | Python | 29.833333 | 155 | 0.656525 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_toolz.py | """
Functionality copied from the toolz package to avoid having
to add toolz as a dependency.
See https://github.com/pytoolz/toolz/.
toolz is released under BSD licence. Below is the licence text
from toolz as it appeared when copying the code.
--------------------------------------------------------------
Copyright (c) 2013 Matthew Rocklin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of toolz nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import operator
from functools import reduce
def get_in(keys, coll, default=None, no_default=False):
"""
NB: This is a straight copy of the get_in implementation found in
the toolz library (https://github.com/pytoolz/toolz/). It works
with persistent data structures as well as the corresponding
datastructures from the stdlib.
Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
``no_default`` is specified, then it raises KeyError or IndexError.
``get_in`` is a generalization of ``operator.getitem`` for nested data
structures such as dictionaries and lists.
>>> from pyrsistent import freeze
>>> transaction = freeze({'name': 'Alice',
... 'purchase': {'items': ['Apple', 'Orange'],
... 'costs': [0.50, 1.25]},
... 'credit card': '5555-1234-1234-1234'})
>>> get_in(['purchase', 'items', 0], transaction)
'Apple'
>>> get_in(['name'], transaction)
'Alice'
>>> get_in(['purchase', 'total'], transaction)
>>> get_in(['purchase', 'items', 'apple'], transaction)
>>> get_in(['purchase', 'items', 10], transaction)
>>> get_in(['purchase', 'total'], transaction, 0)
0
>>> get_in(['y'], {}, no_default=True)
Traceback (most recent call last):
...
KeyError: 'y'
"""
try:
return reduce(operator.getitem, keys, coll)
except (KeyError, IndexError, TypeError):
if no_default:
raise
return default
| 3,425 | Python | 39.785714 | 75 | 0.676204 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pvector.py | from abc import abstractmethod, ABCMeta
from collections.abc import Sequence, Hashable
from numbers import Integral
import operator
from pyrsistent._transformations import transform
def _bitcount(val):
return bin(val).count("1")
BRANCH_FACTOR = 32
BIT_MASK = BRANCH_FACTOR - 1
SHIFT = _bitcount(BIT_MASK)
def compare_pvector(v, other, operator):
return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other)
def _index_or_slice(index, stop):
if stop is None:
return index
return slice(index, stop)
class PythonPVector(object):
"""
Support structure for PVector that implements structural sharing for vectors using a trie.
"""
__slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__')
def __new__(cls, count, shift, root, tail):
self = super(PythonPVector, cls).__new__(cls)
self._count = count
self._shift = shift
self._root = root
self._tail = tail
# Derived attribute stored for performance
self._tail_offset = self._count - len(self._tail)
return self
def __len__(self):
return self._count
def __getitem__(self, index):
if isinstance(index, slice):
# There are more conditions than the below where it would be OK to
# return ourselves, implement those...
if index.start is None and index.stop is None and index.step is None:
return self
# This is a bit nasty realizing the whole structure as a list before
# slicing it but it is the fastest way I've found to date, and it's easy :-)
return _EMPTY_PVECTOR.extend(self.tolist()[index])
if index < 0:
index += self._count
return PythonPVector._node_for(self, index)[index & BIT_MASK]
def __add__(self, other):
return self.extend(other)
def __repr__(self):
return 'pvector({0})'.format(str(self.tolist()))
def __str__(self):
return self.__repr__()
def __iter__(self):
# This is kind of lazy and will produce some memory overhead but it is the fasted method
# by far of those tried since it uses the speed of the built in python list directly.
return iter(self.tolist())
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq)
def __gt__(self, other):
return compare_pvector(self, other, operator.gt)
def __lt__(self, other):
return compare_pvector(self, other, operator.lt)
def __ge__(self, other):
return compare_pvector(self, other, operator.ge)
def __le__(self, other):
return compare_pvector(self, other, operator.le)
def __mul__(self, times):
if times <= 0 or self is _EMPTY_PVECTOR:
return _EMPTY_PVECTOR
if times == 1:
return self
return _EMPTY_PVECTOR.extend(times * self.tolist())
__rmul__ = __mul__
def _fill_list(self, node, shift, the_list):
if shift:
shift -= SHIFT
for n in node:
self._fill_list(n, shift, the_list)
else:
the_list.extend(node)
def tolist(self):
"""
The fastest way to convert the vector into a python list.
"""
the_list = []
self._fill_list(self._root, self._shift, the_list)
the_list.extend(self._tail)
return the_list
def _totuple(self):
"""
Returns the content as a python tuple.
"""
return tuple(self.tolist())
def __hash__(self):
# Taking the easy way out again...
return hash(self._totuple())
def transform(self, *transformations):
return transform(self, transformations)
def __reduce__(self):
# Pickling support
return pvector, (self.tolist(),)
def mset(self, *args):
if len(args) % 2:
raise TypeError("mset expected an even number of arguments")
evolver = self.evolver()
for i in range(0, len(args), 2):
evolver[args[i]] = args[i+1]
return evolver.persistent()
class Evolver(object):
__slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes',
'_extra_tail', '_cached_leafs', '_orig_pvector')
def __init__(self, v):
self._reset(v)
def __getitem__(self, index):
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
index += self._count + len(self._extra_tail)
if self._count <= index < self._count + len(self._extra_tail):
return self._extra_tail[index - self._count]
return PythonPVector._node_for(self, index)[index & BIT_MASK]
def _reset(self, v):
self._count = v._count
self._shift = v._shift
self._root = v._root
self._tail = v._tail
self._tail_offset = v._tail_offset
self._dirty_nodes = {}
self._cached_leafs = {}
self._extra_tail = []
self._orig_pvector = v
def append(self, element):
self._extra_tail.append(element)
return self
def extend(self, iterable):
self._extra_tail.extend(iterable)
return self
def set(self, index, val):
self[index] = val
return self
def __setitem__(self, index, val):
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
index += self._count + len(self._extra_tail)
if 0 <= index < self._count:
node = self._cached_leafs.get(index >> SHIFT)
if node:
node[index & BIT_MASK] = val
elif index >= self._tail_offset:
if id(self._tail) not in self._dirty_nodes:
self._tail = list(self._tail)
self._dirty_nodes[id(self._tail)] = True
self._cached_leafs[index >> SHIFT] = self._tail
self._tail[index & BIT_MASK] = val
else:
self._root = self._do_set(self._shift, self._root, index, val)
elif self._count <= index < self._count + len(self._extra_tail):
self._extra_tail[index - self._count] = val
elif index == self._count + len(self._extra_tail):
self._extra_tail.append(val)
else:
raise IndexError("Index out of range: %s" % (index,))
def _do_set(self, level, node, i, val):
if id(node) in self._dirty_nodes:
ret = node
else:
ret = list(node)
self._dirty_nodes[id(ret)] = True
if level == 0:
ret[i & BIT_MASK] = val
self._cached_leafs[i >> SHIFT] = ret
else:
sub_index = (i >> level) & BIT_MASK # >>>
ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
return ret
def delete(self, index):
del self[index]
return self
def __delitem__(self, key):
if self._orig_pvector:
# All structural sharing bets are off, base evolver on _extra_tail only
l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist()
l.extend(self._extra_tail)
self._reset(_EMPTY_PVECTOR)
self._extra_tail = l
del self._extra_tail[key]
def persistent(self):
result = self._orig_pvector
if self.is_dirty():
result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail)
self._reset(result)
return result
def __len__(self):
return self._count + len(self._extra_tail)
def is_dirty(self):
return bool(self._dirty_nodes or self._extra_tail)
def evolver(self):
return PythonPVector.Evolver(self)
def set(self, i, val):
# This method could be implemented by a call to mset() but doing so would cause
# a ~5 X performance penalty on PyPy (considered the primary platform for this implementation
# of PVector) so we're keeping this implementation for now.
if not isinstance(i, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__)
if i < 0:
i += self._count
if 0 <= i < self._count:
if i >= self._tail_offset:
new_tail = list(self._tail)
new_tail[i & BIT_MASK] = val
return PythonPVector(self._count, self._shift, self._root, new_tail)
return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail)
if i == self._count:
return self.append(val)
raise IndexError("Index out of range: %s" % (i,))
def _do_set(self, level, node, i, val):
ret = list(node)
if level == 0:
ret[i & BIT_MASK] = val
else:
sub_index = (i >> level) & BIT_MASK # >>>
ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
return ret
@staticmethod
def _node_for(pvector_like, i):
if 0 <= i < pvector_like._count:
if i >= pvector_like._tail_offset:
return pvector_like._tail
node = pvector_like._root
for level in range(pvector_like._shift, 0, -SHIFT):
node = node[(i >> level) & BIT_MASK] # >>>
return node
raise IndexError("Index out of range: %s" % (i,))
def _create_new_root(self):
new_shift = self._shift
# Overflow root?
if (self._count >> SHIFT) > (1 << self._shift): # >>>
new_root = [self._root, self._new_path(self._shift, self._tail)]
new_shift += SHIFT
else:
new_root = self._push_tail(self._shift, self._root, self._tail)
return new_root, new_shift
def append(self, val):
if len(self._tail) < BRANCH_FACTOR:
new_tail = list(self._tail)
new_tail.append(val)
return PythonPVector(self._count + 1, self._shift, self._root, new_tail)
# Full tail, push into tree
new_root, new_shift = self._create_new_root()
return PythonPVector(self._count + 1, new_shift, new_root, [val])
def _new_path(self, level, node):
if level == 0:
return node
return [self._new_path(level - SHIFT, node)]
def _mutating_insert_tail(self):
self._root, self._shift = self._create_new_root()
self._tail = []
def _mutating_fill_tail(self, offset, sequence):
max_delta_len = BRANCH_FACTOR - len(self._tail)
delta = sequence[offset:offset + max_delta_len]
self._tail.extend(delta)
delta_len = len(delta)
self._count += delta_len
return offset + delta_len
def _mutating_extend(self, sequence):
offset = 0
sequence_len = len(sequence)
while offset < sequence_len:
offset = self._mutating_fill_tail(offset, sequence)
if len(self._tail) == BRANCH_FACTOR:
self._mutating_insert_tail()
self._tail_offset = self._count - len(self._tail)
def extend(self, obj):
# Mutates the new vector directly for efficiency but that's only an
# implementation detail, once it is returned it should be considered immutable
l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj)
if l:
new_vector = self.append(l[0])
new_vector._mutating_extend(l[1:])
return new_vector
return self
def _push_tail(self, level, parent, tail_node):
"""
if parent is leaf, insert node,
else does it map to an existing child? ->
node_to_insert = push node one more level
else alloc new path
return node_to_insert placed in copy of parent
"""
ret = list(parent)
if level == SHIFT:
ret.append(tail_node)
return ret
sub_index = ((self._count - 1) >> level) & BIT_MASK # >>>
if len(parent) > sub_index:
ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node)
return ret
ret.append(self._new_path(level - SHIFT, tail_node))
return ret
def index(self, value, *args, **kwargs):
return self.tolist().index(value, *args, **kwargs)
def count(self, value):
return self.tolist().count(value)
def delete(self, index, stop=None):
l = self.tolist()
del l[_index_or_slice(index, stop)]
return _EMPTY_PVECTOR.extend(l)
def remove(self, value):
l = self.tolist()
l.remove(value)
return _EMPTY_PVECTOR.extend(l)
class PVector(metaclass=ABCMeta):
"""
Persistent vector implementation. Meant as a replacement for the cases where you would normally
use a Python list.
Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to
create an instance.
Heavily influenced by the persistent vector available in Clojure. Initially this was more or
less just a port of the Java code for the Clojure vector. It has since been modified and to
some extent optimized for usage in Python.
The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No
updates are done to the original vector. Structural sharing between vectors are applied where possible to save
space and to avoid making complete copies.
This structure corresponds most closely to the built in list type and is intended as a replacement. Where the
semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
for example assignments.
The PVector implements the Sequence protocol and is Hashable.
Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector.
The following are examples of some common operations on persistent vectors:
>>> p = v(1, 2, 3)
>>> p2 = p.append(4)
>>> p3 = p2.extend([5, 6, 7])
>>> p
pvector([1, 2, 3])
>>> p2
pvector([1, 2, 3, 4])
>>> p3
pvector([1, 2, 3, 4, 5, 6, 7])
>>> p3[5]
6
>>> p.set(1, 99)
pvector([1, 99, 3])
>>>
"""
@abstractmethod
def __len__(self):
"""
>>> len(v(1, 2, 3))
3
"""
@abstractmethod
def __getitem__(self, index):
"""
Get value at index. Full slicing support.
>>> v1 = v(5, 6, 7, 8)
>>> v1[2]
7
>>> v1[1:3]
pvector([6, 7])
"""
@abstractmethod
def __add__(self, other):
"""
>>> v1 = v(1, 2)
>>> v2 = v(3, 4)
>>> v1 + v2
pvector([1, 2, 3, 4])
"""
@abstractmethod
def __mul__(self, times):
"""
>>> v1 = v(1, 2)
>>> 3 * v1
pvector([1, 2, 1, 2, 1, 2])
"""
@abstractmethod
def __hash__(self):
"""
>>> v1 = v(1, 2, 3)
>>> v2 = v(1, 2, 3)
>>> hash(v1) == hash(v2)
True
"""
@abstractmethod
def evolver(self):
"""
Create a new evolver for this pvector. The evolver acts as a mutable view of the vector
with "transaction like" semantics. No part of the underlying vector i updated, it is still
fully immutable. Furthermore multiple evolvers created from the same pvector do not
interfere with each other.
You may want to use an evolver instead of working directly with the pvector in the
following cases:
* Multiple updates are done to the same vector and the intermediate results are of no
interest. In this case using an evolver may be a more efficient and easier to work with.
* You need to pass a vector into a legacy function or a function that you have no control
over which performs in place mutations of lists. In this case pass an evolver instance
instead and then create a new pvector from the evolver once the function returns.
The following example illustrates a typical workflow when working with evolvers. It also
displays most of the API (which i kept small by design, you should not be tempted to
use evolvers in excess ;-)).
Create the evolver and perform various mutating updates to it:
>>> v1 = v(1, 2, 3, 4, 5)
>>> e = v1.evolver()
>>> e[1] = 22
>>> _ = e.append(6)
>>> _ = e.extend([7, 8, 9])
>>> e[8] += 1
>>> len(e)
9
The underlying pvector remains the same:
>>> v1
pvector([1, 2, 3, 4, 5])
The changes are kept in the evolver. An updated pvector can be created using the
persistent() function on the evolver.
>>> v2 = e.persistent()
>>> v2
pvector([1, 22, 3, 4, 5, 6, 7, 8, 10])
The new pvector will share data with the original pvector in the same way that would have
been done if only using operations on the pvector.
"""
@abstractmethod
def mset(self, *args):
"""
Return a new vector with elements in specified positions replaced by values (multi set).
Elements on even positions in the argument list are interpreted as indexes while
elements on odd positions are considered values.
>>> v1 = v(1, 2, 3)
>>> v1.mset(0, 11, 2, 33)
pvector([11, 2, 33])
"""
@abstractmethod
def set(self, i, val):
"""
Return a new vector with element at position i replaced with val. The original vector remains unchanged.
Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will
result in an IndexError.
>>> v1 = v(1, 2, 3)
>>> v1.set(1, 4)
pvector([1, 4, 3])
>>> v1.set(3, 4)
pvector([1, 2, 3, 4])
>>> v1.set(-1, 4)
pvector([1, 2, 4])
"""
@abstractmethod
def append(self, val):
"""
Return a new vector with val appended.
>>> v1 = v(1, 2)
>>> v1.append(3)
pvector([1, 2, 3])
"""
@abstractmethod
def extend(self, obj):
"""
Return a new vector with all values in obj appended to it. Obj may be another
PVector or any other Iterable.
>>> v1 = v(1, 2, 3)
>>> v1.extend([4, 5])
pvector([1, 2, 3, 4, 5])
"""
@abstractmethod
def index(self, value, *args, **kwargs):
"""
Return first index of value. Additional indexes may be supplied to limit the search to a
sub range of the vector.
>>> v1 = v(1, 2, 3, 4, 3)
>>> v1.index(3)
2
>>> v1.index(3, 3, 5)
4
"""
@abstractmethod
def count(self, value):
"""
Return the number of times that value appears in the vector.
>>> v1 = v(1, 4, 3, 4)
>>> v1.count(4)
2
"""
@abstractmethod
def transform(self, *transformations):
"""
Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
consists of two parts. One match expression that specifies which elements to transform
and one transformation function that performs the actual transformation.
>>> from pyrsistent import freeze, ny
>>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
... {'author': 'Steve', 'content': 'A slightly longer article'}],
... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
>>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
>>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
>>> very_short_news.articles[0].content
'A short article'
>>> very_short_news.articles[1].content
'A slightly long...'
When nothing has been transformed the original data structure is kept
>>> short_news is news_paper
True
>>> very_short_news is news_paper
False
>>> very_short_news.articles[0] is news_paper.articles[0]
True
"""
@abstractmethod
def delete(self, index, stop=None):
"""
Delete a portion of the vector by index or range.
>>> v1 = v(1, 2, 3, 4, 5)
>>> v1.delete(1)
pvector([1, 3, 4, 5])
>>> v1.delete(1, 3)
pvector([1, 4, 5])
"""
@abstractmethod
def remove(self, value):
"""
Remove the first occurrence of a value from the vector.
>>> v1 = v(1, 2, 3, 2, 1)
>>> v2 = v1.remove(1)
>>> v2
pvector([2, 3, 2, 1])
>>> v2.remove(1)
pvector([2, 3, 2])
"""
_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], [])
PVector.register(PythonPVector)
Sequence.register(PVector)
Hashable.register(PVector)
def python_pvector(iterable=()):
"""
Create a new persistent vector containing the elements in iterable.
>>> v1 = pvector([1, 2, 3])
>>> v1
pvector([1, 2, 3])
"""
return _EMPTY_PVECTOR.extend(iterable)
try:
# Use the C extension as underlying trie implementation if it is available
import os
if os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
pvector = python_pvector
else:
from pvectorc import pvector
PVector.register(type(pvector()))
except ImportError:
pvector = python_pvector
def v(*elements):
"""
Create a new persistent vector containing all parameters to this function.
>>> v1 = v(1, 2, 3)
>>> v1
pvector([1, 2, 3])
"""
return pvector(elements)
| 22,694 | Python | 30.875 | 135 | 0.553935 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pset.py | from collections.abc import Set, Hashable
import sys
from pyrsistent._pmap import pmap
class PSet(object):
"""
Persistent set implementation. Built on top of the persistent map. The set supports all operations
in the Set protocol and is Hashable.
Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset`
to create an instance.
Random access and insert is log32(n) where n is the size of the set.
Some examples:
>>> s = pset([1, 2, 3, 1])
>>> s2 = s.add(4)
>>> s3 = s2.remove(2)
>>> s
pset([1, 2, 3])
>>> s2
pset([1, 2, 3, 4])
>>> s3
pset([1, 3, 4])
"""
__slots__ = ('_map', '__weakref__')
def __new__(cls, m):
self = super(PSet, cls).__new__(cls)
self._map = m
return self
def __contains__(self, element):
return element in self._map
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
def __repr__(self):
if not self:
return 'p' + str(set(self))
return 'pset([{0}])'.format(str(set(self))[1:-1])
def __str__(self):
return self.__repr__()
def __hash__(self):
return hash(self._map)
def __reduce__(self):
# Pickling support
return pset, (list(self),)
@classmethod
def _from_iterable(cls, it, pre_size=8):
return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size))
def add(self, element):
"""
Return a new PSet with element added
>>> s1 = s(1, 2)
>>> s1.add(3)
pset([1, 2, 3])
"""
return self.evolver().add(element).persistent()
def update(self, iterable):
"""
Return a new PSet with elements in iterable added
>>> s1 = s(1, 2)
>>> s1.update([3, 4, 4])
pset([1, 2, 3, 4])
"""
e = self.evolver()
for element in iterable:
e.add(element)
return e.persistent()
def remove(self, element):
"""
Return a new PSet with element removed. Raises KeyError if element is not present.
>>> s1 = s(1, 2)
>>> s1.remove(2)
pset([1])
"""
if element in self._map:
return self.evolver().remove(element).persistent()
raise KeyError("Element '%s' not present in PSet" % repr(element))
def discard(self, element):
"""
Return a new PSet with element removed. Returns itself if element is not present.
"""
if element in self._map:
return self.evolver().remove(element).persistent()
return self
class _Evolver(object):
__slots__ = ('_original_pset', '_pmap_evolver')
def __init__(self, original_pset):
self._original_pset = original_pset
self._pmap_evolver = original_pset._map.evolver()
def add(self, element):
self._pmap_evolver[element] = True
return self
def remove(self, element):
del self._pmap_evolver[element]
return self
def is_dirty(self):
return self._pmap_evolver.is_dirty()
def persistent(self):
if not self.is_dirty():
return self._original_pset
return PSet(self._pmap_evolver.persistent())
def __len__(self):
return len(self._pmap_evolver)
def copy(self):
return self
def evolver(self):
"""
Create a new evolver for this pset. For a discussion on evolvers in general see the
documentation for the pvector evolver.
Create the evolver and perform various mutating updates to it:
>>> s1 = s(1, 2, 3)
>>> e = s1.evolver()
>>> _ = e.add(4)
>>> len(e)
4
>>> _ = e.remove(1)
The underlying pset remains the same:
>>> s1
pset([1, 2, 3])
The changes are kept in the evolver. An updated pmap can be created using the
persistent() function on the evolver.
>>> s2 = e.persistent()
>>> s2
pset([2, 3, 4])
The new pset will share data with the original pset in the same way that would have
been done if only using operations on the pset.
"""
return PSet._Evolver(self)
# All the operations and comparisons you would expect on a set.
#
# This is not very beautiful. If we avoid inheriting from PSet we can use the
# __slots__ concepts (which requires a new style class) and hopefully save some memory.
__le__ = Set.__le__
__lt__ = Set.__lt__
__gt__ = Set.__gt__
__ge__ = Set.__ge__
__eq__ = Set.__eq__
__ne__ = Set.__ne__
__and__ = Set.__and__
__or__ = Set.__or__
__sub__ = Set.__sub__
__xor__ = Set.__xor__
issubset = __le__
issuperset = __ge__
union = __or__
intersection = __and__
difference = __sub__
symmetric_difference = __xor__
isdisjoint = Set.isdisjoint
Set.register(PSet)
Hashable.register(PSet)
_EMPTY_PSET = PSet(pmap())
def pset(iterable=(), pre_size=8):
"""
Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that
used for :py:func:`pmap`.
>>> s1 = pset([1, 2, 3, 2])
>>> s1
pset([1, 2, 3])
"""
if not iterable:
return _EMPTY_PSET
return PSet._from_iterable(iterable, pre_size=pre_size)
def s(*elements):
"""
Create a persistent set.
Takes an arbitrary number of arguments to insert into the new set.
>>> s1 = s(1, 2, 3, 2)
>>> s1
pset([1, 2, 3])
"""
return pset(elements)
| 5,693 | Python | 23.973684 | 102 | 0.540664 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_transformations.py | import re
try:
from inspect import Parameter, signature
except ImportError:
signature = None
from inspect import getfullargspec
_EMPTY_SENTINEL = object()
def inc(x):
""" Add one to the current value """
return x + 1
def dec(x):
""" Subtract one from the current value """
return x - 1
def discard(evolver, key):
""" Discard the element and returns a structure without the discarded elements """
try:
del evolver[key]
except KeyError:
pass
# Matchers
def rex(expr):
""" Regular expression matcher to use together with transform functions """
r = re.compile(expr)
return lambda key: isinstance(key, str) and r.match(key)
def ny(_):
""" Matcher that matches any value """
return True
# Support functions
def _chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def transform(structure, transformations):
r = structure
for path, command in _chunks(transformations, 2):
r = _do_to_path(r, path, command)
return r
def _do_to_path(structure, path, command):
if not path:
return command(structure) if callable(command) else command
kvs = _get_keys_and_values(structure, path[0])
return _update_structure(structure, kvs, path[1:], command)
def _items(structure):
try:
return structure.items()
except AttributeError:
# Support wider range of structures by adding a transform_items() or similar?
return list(enumerate(structure))
def _get(structure, key, default):
try:
if hasattr(structure, '__getitem__'):
return structure[key]
return getattr(structure, key)
except (IndexError, KeyError):
return default
def _get_keys_and_values(structure, key_spec):
if callable(key_spec):
# Support predicates as callable objects in the path
arity = _get_arity(key_spec)
if arity == 1:
# Unary predicates are called with the "key" of the path
# - eg a key in a mapping, an index in a sequence.
return [(k, v) for k, v in _items(structure) if key_spec(k)]
elif arity == 2:
# Binary predicates are called with the key and the corresponding
# value.
return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
else:
# Other arities are an error.
raise ValueError(
"callable in transform path must take 1 or 2 arguments"
)
# Non-callables are used as-is as a key.
return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
if signature is None:
def _get_arity(f):
argspec = getfullargspec(f)
return len(argspec.args) - len(argspec.defaults or ())
else:
def _get_arity(f):
return sum(
1
for p
in signature(f).parameters.values()
if p.default is Parameter.empty
and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
)
def _update_structure(structure, kvs, path, command):
from pyrsistent._pmap import pmap
e = structure.evolver()
if not path and command is discard:
# Do this in reverse to avoid index problems with vectors. See #92.
for k, v in reversed(kvs):
discard(e, k)
else:
for k, v in kvs:
is_empty = False
if v is _EMPTY_SENTINEL:
# Allow expansion of structure but make sure to cover the case
# when an empty pmap is added as leaf node. See #154.
is_empty = True
v = pmap()
result = _do_to_path(v, path, command)
if result is not v or is_empty:
e[k] = result
return e.persistent()
| 3,800 | Python | 26.15 | 86 | 0.598421 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/typing.py | """Helpers for use with type annotation.
Use the empty classes in this module when annotating the types of Pyrsistent
objects, instead of using the actual collection class.
For example,
from pyrsistent import pvector
from pyrsistent.typing import PVector
myvector: PVector[str] = pvector(['a', 'b', 'c'])
"""
from __future__ import absolute_import
try:
from typing import Container
from typing import Hashable
from typing import Generic
from typing import Iterable
from typing import Mapping
from typing import Sequence
from typing import Sized
from typing import TypeVar
__all__ = [
'CheckedPMap',
'CheckedPSet',
'CheckedPVector',
'PBag',
'PDeque',
'PList',
'PMap',
'PSet',
'PVector',
]
T = TypeVar('T')
KT = TypeVar('KT')
VT = TypeVar('VT')
class CheckedPMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class CheckedPSet(Generic[T], Hashable):
pass
class CheckedPVector(Sequence[T], Hashable):
pass
class PBag(Container[T], Iterable[T], Sized, Hashable):
pass
class PDeque(Sequence[T], Hashable):
pass
class PList(Sequence[T], Hashable):
pass
class PMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class PSet(Generic[T], Hashable):
pass
class PVector(Sequence[T], Hashable):
pass
class PVectorEvolver(Generic[T]):
pass
class PMapEvolver(Generic[KT, VT]):
pass
class PSetEvolver(Generic[T]):
pass
except ImportError:
pass
| 1,767 | Python | 20.82716 | 80 | 0.627051 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_precord.py | from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants
from pyrsistent._field_common import (
set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
)
from pyrsistent._pmap import PMap, pmap
class _PRecordMeta(type):
def __new__(mcs, name, bases, dct):
set_fields(dct, bases, name='_precord_fields')
store_invariants(dct, bases, '_precord_invariants', '__invariant__')
dct['_precord_mandatory_fields'] = \
set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
dct['_precord_initial_values'] = \
dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
dct['__slots__'] = ()
return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
class PRecord(PMap, CheckedType, metaclass=_PRecordMeta):
"""
A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
access using subscript notation.
More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent
"""
def __new__(cls, **kwargs):
# Hack total! If these two special attributes exist that means we can create
# ourselves. Otherwise we need to go through the Evolver to create the structures
# for us.
if '_precord_size' in kwargs and '_precord_buckets' in kwargs:
return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets'])
factory_fields = kwargs.pop('_factory_fields', None)
ignore_extra = kwargs.pop('_ignore_extra', False)
initial_values = kwargs
if cls._precord_initial_values:
initial_values = dict((k, v() if callable(v) else v)
for k, v in cls._precord_initial_values.items())
initial_values.update(kwargs)
e = _PRecordEvolver(cls, pmap(pre_size=len(cls._precord_fields)), _factory_fields=factory_fields, _ignore_extra=ignore_extra)
for k, v in initial_values.items():
e[k] = v
return e.persistent()
def set(self, *args, **kwargs):
"""
Set a field in the record. This set function differs slightly from that in the PMap
class. First of all it accepts key-value pairs. Second it accepts multiple key-value
pairs to perform one, atomic, update of multiple fields.
"""
# The PRecord set() can accept kwargs since all fields that have been declared are
# valid python identifiers. Also allow multiple fields to be set in one operation.
if args:
return super(PRecord, self).set(args[0], args[1])
return self.update(kwargs)
def evolver(self):
"""
Returns an evolver of this object.
"""
return _PRecordEvolver(self.__class__, self)
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items()))
@classmethod
def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
"""
Factory method. Will create a new PRecord of the current type and assign the values
specified in kwargs.
:param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
in the set of fields on the PRecord.
"""
if isinstance(kwargs, cls):
return kwargs
if ignore_extra:
kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs}
return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, dict(self),)
def serialize(self, format=None):
"""
Serialize the current PRecord using custom serializer functions for fields where
such have been supplied.
"""
return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items())
class _PRecordEvolver(PMap._Evolver):
__slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra')
def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False):
super(_PRecordEvolver, self).__init__(original_pmap)
self._destination_cls = cls
self._invariant_error_codes = []
self._missing_fields = []
self._factory_fields = _factory_fields
self._ignore_extra = _ignore_extra
def __setitem__(self, key, original_value):
self.set(key, original_value)
def set(self, key, original_value):
field = self._destination_cls._precord_fields.get(key)
if field:
if self._factory_fields is None or field in self._factory_fields:
try:
if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra):
value = field.factory(original_value, ignore_extra=self._ignore_extra)
else:
value = field.factory(original_value)
except InvariantException as e:
self._invariant_error_codes += e.invariant_errors
self._missing_fields += e.missing_fields
return self
else:
value = original_value
check_type(self._destination_cls, field, key, value)
is_ok, error_code = field.invariant(value)
if not is_ok:
self._invariant_error_codes.append(error_code)
return super(_PRecordEvolver, self).set(key, value)
else:
raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
def persistent(self):
cls = self._destination_cls
is_dirty = self.is_dirty()
pm = super(_PRecordEvolver, self).persistent()
if is_dirty or not isinstance(pm, cls):
result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
else:
result = pm
if cls._precord_mandatory_fields:
self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
in (cls._precord_mandatory_fields - set(result.keys())))
if self._invariant_error_codes or self._missing_fields:
raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
'Field invariant failed')
check_global_invariants(result, cls._precord_invariants)
return result
| 7,032 | Python | 40.863095 | 133 | 0.607651 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_pdeque.py | from collections.abc import Sequence, Hashable
from itertools import islice, chain
from numbers import Integral
from pyrsistent._plist import plist
class PDeque(object):
"""
Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
using two persistent lists.
A maximum length can be specified to create a bounded queue.
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
create an instance.
Some examples:
>>> x = pdeque([1, 2, 3])
>>> x.left
1
>>> x.right
3
>>> x[0] == x.left
True
>>> x[-1] == x.right
True
>>> x.pop()
pdeque([1, 2])
>>> x.pop() == x[:-1]
True
>>> x.popleft()
pdeque([2, 3])
>>> x.append(4)
pdeque([1, 2, 3, 4])
>>> x.appendleft(4)
pdeque([4, 1, 2, 3])
>>> y = pdeque([1, 2, 3], maxlen=3)
>>> y.append(4)
pdeque([2, 3, 4], maxlen=3)
>>> y.appendleft(4)
pdeque([4, 1, 2], maxlen=3)
"""
__slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
def __new__(cls, left_list, right_list, length, maxlen=None):
instance = super(PDeque, cls).__new__(cls)
instance._left_list = left_list
instance._right_list = right_list
instance._length = length
if maxlen is not None:
if not isinstance(maxlen, Integral):
raise TypeError('An integer is required as maxlen')
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
instance._maxlen = maxlen
return instance
@property
def right(self):
"""
Rightmost element in dqueue.
"""
return PDeque._tip_from_lists(self._right_list, self._left_list)
@property
def left(self):
"""
Leftmost element in dqueue.
"""
return PDeque._tip_from_lists(self._left_list, self._right_list)
@staticmethod
def _tip_from_lists(primary_list, secondary_list):
if primary_list:
return primary_list.first
if secondary_list:
return secondary_list[-1]
raise IndexError('No elements in empty deque')
def __iter__(self):
return chain(self._left_list, self._right_list.reverse())
def __repr__(self):
return "pdeque({0}{1})".format(list(self),
', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
__str__ = __repr__
@property
def maxlen(self):
"""
Maximum length of the queue.
"""
return self._maxlen
def pop(self, count=1):
"""
Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2])
"""
if count < 0:
return self.popleft(-count)
new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
def popleft(self, count=1):
"""
Return new deque with leftmost element removed. Otherwise functionally
equivalent to pop().
>>> pdeque([1, 2]).popleft()
pdeque([2])
"""
if count < 0:
return self.pop(-count)
new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
@staticmethod
def _pop_lists(primary_list, secondary_list, count):
new_primary_list = primary_list
new_secondary_list = secondary_list
while count > 0 and (new_primary_list or new_secondary_list):
count -= 1
if new_primary_list.rest:
new_primary_list = new_primary_list.rest
elif new_primary_list:
new_primary_list = new_secondary_list.reverse()
new_secondary_list = plist()
else:
new_primary_list = new_secondary_list.reverse().rest
new_secondary_list = plist()
return new_primary_list, new_secondary_list
def _is_empty(self):
return not self._left_list and not self._right_list
def __lt__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
if tuple(self) == tuple(other):
# Sanity check of the length value since it is redundant (there for performance)
assert len(self) == len(other)
return True
return False
def __hash__(self):
return hash(tuple(self))
def __len__(self):
return self._length
def append(self, elem):
"""
Return new deque with elem as the rightmost element.
>>> pdeque([1, 2]).append(3)
pdeque([1, 2, 3])
"""
new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def appendleft(self, elem):
"""
Return new deque with elem as the leftmost element.
>>> pdeque([1, 2]).appendleft(3)
pdeque([3, 1, 2])
"""
new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def _append(self, primary_list, secondary_list, elem):
if self._maxlen is not None and self._length == self._maxlen:
if self._maxlen == 0:
return primary_list, secondary_list, 0
new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
return new_primary_list, new_secondary_list.cons(elem), self._length
return primary_list, secondary_list.cons(elem), self._length + 1
@staticmethod
def _extend_list(the_list, iterable):
count = 0
for elem in iterable:
the_list = the_list.cons(elem)
count += 1
return the_list, count
def _extend(self, primary_list, secondary_list, iterable):
new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
new_secondary_list = secondary_list
current_len = self._length + extend_count
if self._maxlen is not None and current_len > self._maxlen:
pop_len = current_len - self._maxlen
new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
extend_count -= pop_len
return new_primary_list, new_secondary_list, extend_count
def extend(self, iterable):
"""
Return new deque with all elements of iterable appended to the right.
>>> pdeque([1, 2]).extend([3, 4])
pdeque([1, 2, 3, 4])
"""
new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def extendleft(self, iterable):
"""
Return new deque with all elements of iterable appended to the left.
NB! The elements will be inserted in reverse order compared to the order in the iterable.
>>> pdeque([1, 2]).extendleft([3, 4])
pdeque([4, 3, 1, 2])
"""
new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def count(self, elem):
"""
Return the number of elements equal to elem present in the queue
>>> pdeque([1, 2, 1]).count(1)
2
"""
return self._left_list.count(elem) + self._right_list.count(elem)
def remove(self, elem):
"""
Return new deque with first element from left equal to elem removed. If no such element is found
a ValueError is raised.
>>> pdeque([2, 1, 2]).remove(2)
pdeque([1, 2])
"""
try:
return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
except ValueError:
# Value not found in left list, try the right list
try:
# This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
return PDeque(self._left_list,
self._right_list.reverse().remove(elem).reverse(), self._length - 1)
except ValueError as e:
raise ValueError('{0} not found in PDeque'.format(elem)) from e
def reverse(self):
"""
Return reversed deque.
>>> pdeque([1, 2, 3]).reverse()
pdeque([3, 2, 1])
Also supports the standard python reverse function.
>>> reversed(pdeque([1, 2, 3]))
pdeque([3, 2, 1])
"""
return PDeque(self._right_list, self._left_list, self._length)
__reversed__ = reverse
def rotate(self, steps):
"""
Return deque with elements rotated steps steps.
>>> x = pdeque([1, 2, 3])
>>> x.rotate(1)
pdeque([3, 1, 2])
>>> x.rotate(-2)
pdeque([3, 1, 2])
"""
popped_deque = self.pop(steps)
if steps >= 0:
return popped_deque.extendleft(islice(self.reverse(), steps))
return popped_deque.extend(islice(self, -steps))
def __reduce__(self):
# Pickling support
return pdeque, (list(self), self._maxlen)
def __getitem__(self, index):
if isinstance(index, slice):
if index.step is not None and index.step != 1:
# Too difficult, no structural sharing possible
return pdeque(tuple(self)[index], maxlen=self._maxlen)
result = self
if index.start is not None:
result = result.popleft(index.start % self._length)
if index.stop is not None:
result = result.pop(self._length - (index.stop % self._length))
return result
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index >= 0:
return self.popleft(index).left
shifted = len(self) + index
if shifted < 0:
raise IndexError(
"pdeque index {0} out of range {1}".format(index, len(self)),
)
return self.popleft(shifted).left
index = Sequence.index
Sequence.register(PDeque)
Hashable.register(PDeque)
def pdeque(iterable=(), maxlen=None):
"""
Return deque containing the elements of iterable. If maxlen is specified then
len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
>>> pdeque([1, 2, 3])
pdeque([1, 2, 3])
>>> pdeque([1, 2, 3, 4], maxlen=2)
pdeque([3, 4], maxlen=2)
"""
t = tuple(iterable)
if maxlen is not None:
t = t[-maxlen:]
length = len(t)
pivot = int(length / 2)
left = plist(t[:pivot])
right = plist(t[pivot:], reverse=True)
return PDeque(left, right, length, maxlen)
def dq(*elements):
"""
Return deque containing all arguments.
>>> dq(1, 2, 3)
pdeque([1, 2, 3])
"""
return pdeque(elements)
| 12,203 | Python | 31.371353 | 115 | 0.574695 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pyrsistent/_field_common.py | from pyrsistent._checked_types import (
CheckedPMap,
CheckedPSet,
CheckedPVector,
CheckedType,
InvariantException,
_restore_pickle,
get_type,
maybe_parse_user_type,
maybe_parse_many_user_types,
)
from pyrsistent._checked_types import optional as optional_type
from pyrsistent._checked_types import wrap_invariant
import inspect
def set_fields(dct, bases, name):
dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
for k, v in list(dct.items()):
if isinstance(v, _PField):
dct[name][k] = v
del dct[k]
def check_global_invariants(subject, invariants):
error_codes = tuple(error_code for is_ok, error_code in
(invariant(subject) for invariant in invariants) if not is_ok)
if error_codes:
raise InvariantException(error_codes, (), 'Global invariant failed')
def serialize(serializer, format, value):
if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
return value.serialize(format)
return serializer(format, value)
def check_type(destination_cls, field, name, value):
if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
actual_type = type(value)
message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
raise PTypeError(destination_cls, name, field.type, actual_type, message)
def is_type_cls(type_cls, field_type):
if type(field_type) is set:
return True
types = tuple(field_type)
if len(types) == 0:
return False
return issubclass(get_type(types[0]), type_cls)
def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
# ignore_extra param has default False value, for speed purpose no need to propagate False
if not ignore_extra:
return False
if not is_type_cls(type_cls, field.type):
return False
return 'ignore_extra' in inspect.signature(field.factory).parameters
class _PField(object):
__slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
def __init__(self, type, invariant, initial, mandatory, factory, serializer):
self.type = type
self.invariant = invariant
self.initial = initial
self.mandatory = mandatory
self._factory = factory
self.serializer = serializer
@property
def factory(self):
# If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
typ = get_type(tuple(self.type)[0])
if issubclass(typ, CheckedType):
return typ.create
return self._factory
PFIELD_NO_TYPE = ()
PFIELD_NO_INVARIANT = lambda _: (True, None)
PFIELD_NO_FACTORY = lambda x: x
PFIELD_NO_INITIAL = object()
PFIELD_NO_SERIALIZER = lambda _, value: value
def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
"""
Field specification factory for :py:class:`PRecord`.
:param type: a type or iterable with types that are allowed for this field
:param invariant: a function specifying an invariant that must hold for the field
:param initial: value of field if not specified when instantiating the record
:param mandatory: boolean specifying if the field is mandatory or not
:param factory: function called when field is set.
:param serializer: function that returns a serialized version of the field
"""
# NB: We have to check this predicate separately from the predicates in
# `maybe_parse_user_type` et al. because this one is related to supporting
# the argspec for `field`, while those are related to supporting the valid
# ways to specify types.
# Multiple types must be passed in one of the following containers. Note
# that a type that is a subclass of one of these containers, like a
# `collections.namedtuple`, will work as expected, since we check
# `isinstance` and not `issubclass`.
if isinstance(type, (list, set, tuple)):
types = set(maybe_parse_many_user_types(type))
else:
types = set(maybe_parse_user_type(type))
invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
field = _PField(type=types, invariant=invariant_function, initial=initial,
mandatory=mandatory, factory=factory, serializer=serializer)
_check_field_parameters(field)
return field
def _check_field_parameters(field):
for t in field.type:
if not isinstance(t, type) and not isinstance(t, str):
raise TypeError('Type parameter expected, not {0}'.format(type(t)))
if field.initial is not PFIELD_NO_INITIAL and \
not callable(field.initial) and \
field.type and not any(isinstance(field.initial, t) for t in field.type):
raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
if not callable(field.invariant):
raise TypeError('Invariant must be callable')
if not callable(field.factory):
raise TypeError('Factory must be callable')
if not callable(field.serializer):
raise TypeError('Serializer must be callable')
class PTypeError(TypeError):
"""
Raised when trying to assign a value with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the record
field -- Field name
expected_types -- Types allowed for the field
actual_type -- The non matching type
"""
def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
super(PTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.field = field
self.expected_types = expected_types
self.actual_type = actual_type
SEQ_FIELD_TYPE_SUFFIXES = {
CheckedPVector: "PVector",
CheckedPSet: "PSet",
}
# Global dictionary to hold auto-generated field types: used for unpickling
_seq_field_types = {}
def _restore_seq_field_pickle(checked_class, item_type, data):
"""Unpickling function for auto-generated PVec/PSet field types."""
type_ = _seq_field_types[checked_class, item_type]
return _restore_pickle(type_, data)
def _types_to_names(types):
"""Convert a tuple of types to a human-readable string."""
return "".join(get_type(typ).__name__.capitalize() for typ in types)
def _make_seq_field_type(checked_class, item_type, item_invariant):
"""Create a subclass of the given checked class with the given item type."""
type_ = _seq_field_types.get((checked_class, item_type))
if type_ is not None:
return type_
class TheType(checked_class):
__type__ = item_type
__invariant__ = item_invariant
def __reduce__(self):
return (_restore_seq_field_pickle,
(checked_class, item_type, list(self)))
suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
_seq_field_types[checked_class, item_type] = TheType
return TheType
def _sequence_field(checked_class, item_type, optional, initial,
invariant=PFIELD_NO_INVARIANT,
item_invariant=PFIELD_NO_INVARIANT):
"""
Create checked field for either ``PSet`` or ``PVector``.
:param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory.
:return: A ``field`` containing a checked class.
"""
TheType = _make_seq_field_type(checked_class, item_type, item_invariant)
if optional:
def factory(argument, _factory_fields=None, ignore_extra=False):
if argument is None:
return None
else:
return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
else:
factory = TheType.create
return field(type=optional_type(TheType) if optional else TheType,
factory=factory, mandatory=True,
invariant=invariant,
initial=factory(initial))
def pset_field(item_type, optional=False, initial=(),
invariant=PFIELD_NO_INVARIANT,
item_invariant=PFIELD_NO_INVARIANT):
"""
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
return _sequence_field(CheckedPSet, item_type, optional, initial,
invariant=invariant,
item_invariant=item_invariant)
def pvector_field(item_type, optional=False, initial=(),
invariant=PFIELD_NO_INVARIANT,
item_invariant=PFIELD_NO_INVARIANT):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional, initial,
invariant=invariant,
item_invariant=item_invariant)
_valid = lambda item: (True, "")
# Global dictionary to hold auto-generated field types: used for unpickling
_pmap_field_types = {}
def _restore_pmap_field_pickle(key_type, value_type, data):
"""Unpickling function for auto-generated PMap field types."""
type_ = _pmap_field_types[key_type, value_type]
return _restore_pickle(type_, data)
def _make_pmap_field_type(key_type, value_type):
"""Create a subclass of CheckedPMap with the given key and value types."""
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
"""
Create a checked ``PMap`` field.
:param key: The required type for the keys of the map.
:param value: The required type for the values of the map.
:param optional: If true, ``None`` can be used as a value for
this field.
:param invariant: Pass-through to ``field``.
:return: A ``field`` containing a ``CheckedPMap``.
"""
TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
if argument is None:
return None
else:
return TheMap.create(argument)
else:
factory = TheMap.create
return field(mandatory=True, initial=TheMap(),
type=optional_type(TheMap) if optional else TheMap,
factory=factory, invariant=invariant)
| 11,963 | Python | 34.927928 | 125 | 0.65084 |
omniverse-code/kit/exts/omni.kit.property.usd_clipboard_test/omni/kit/property/usd_clipboard_test/tests/__init__.py | from .test_property_context_menu import *
| 42 | Python | 20.49999 | 41 | 0.785714 |
omniverse-code/kit/exts/omni.kit.property.usd_clipboard_test/omni/kit/property/usd_clipboard_test/tests/test_property_context_menu.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
import os
import sys
import unittest
import omni.kit.app
import omni.kit.window.property.managed_frame
from omni.kit.test.async_unittest import AsyncTestCase
import omni.usd
from omni.kit import ui_test
from pxr import Gf
from omni.kit.test_suite.helpers import (
open_stage,
get_test_data_path,
select_prims,
wait_stage_loading,
arrange_windows
)
class PropertyContextMenu(AsyncTestCase):
# Before running each test
async def setUp(self):
await arrange_windows("Stage", 64)
await open_stage(get_test_data_path(__name__, "usd/bound_shapes.usda"))
omni.kit.window.property.managed_frame.reset_collapsed_state()
omni.kit.window.property.managed_frame.set_collapsed_state("Property/Raw USD Properties", False)
# After running each test
async def tearDown(self):
await wait_stage_loading()
omni.kit.window.property.managed_frame.reset_collapsed_state()
# @unittest.skipIf(sys.platform.startswith("linux"), "Pyperclip fails on some TeamCity agents")
async def test_property_context_menu(self):
await ui_test.find("Content").focus()
stage_window = ui_test.find("Stage")
await stage_window.focus()
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
await wait_stage_loading()
# get prim attributes
cube_attr = stage.GetPrimAtPath("/World/Cube").GetAttribute('xformOp:translate')
cone_attr = stage.GetPrimAtPath("/World/Cone").GetAttribute('xformOp:translate')
# verify transforms different
self.assertEqual(cube_attr.Get(), Gf.Vec3d(119.899608, -1.138346, -118.761261))
self.assertEqual(cone_attr.Get(), Gf.Vec3d( 0.0, 0.0, 0.0))
# select cube
await select_prims(["/World/Cube"])
await ui_test.human_delay()
# scroll window to xformOp:translate
ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").widget.scroll_here_y(0.5)
await ui_test.human_delay()
# right click on xformOp:translate
await ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").click(right_click=True)
await ui_test.human_delay()
# context menu copy
await ui_test.select_context_menu("Copy", offset=ui_test.Vec2(10, 10))
# select cone
await select_prims(["/World/Cone"])
await ui_test.human_delay()
# scroll window to xformOp:translate
ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").widget.scroll_here_y(0.5)
await ui_test.human_delay()
# right click on xformOp:translate
await ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").click(right_click=True)
await ui_test.human_delay()
# context menu paste
await ui_test.select_context_menu("Paste", offset=ui_test.Vec2(10, 10))
# verify transforms same
self.assertEqual(cube_attr.Get(), Gf.Vec3d(119.899608, -1.138346, -118.761261))
self.assertEqual(cone_attr.Get(), Gf.Vec3d(119.899608, -1.138346, -118.761261))
async def test_property_context_menu_paste(self):
await ui_test.find("Content").focus()
stage_window = ui_test.find("Stage")
await stage_window.focus()
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
await wait_stage_loading()
# select cube
await select_prims(["/World/Cube"])
await ui_test.human_delay(10)
# scroll window to xformOp:translate
ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").widget.scroll_here_y(0.5)
await ui_test.human_delay()
# verify code on clipbaord is NOT getting executed
omni.kit.clipboard.copy("omni.kit.stage_templates.new_stage()")
# right click on xformOp:translate
await ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").click(right_click=True)
await ui_test.human_delay()
await ui_test.find("Property//Frame/**/Label[*].text=='xformOp:translate'").click()
# if code was executed a new stage will have been created, so verify prims
await ui_test.human_delay(250)
prims = [prim.GetPath().pathString for prim in stage.TraverseAll() if not omni.usd.is_hidden_type(prim)]
prims.sort()
self.assertEqual(prims, ['/World', '/World/Cone', '/World/Cube', '/World/Cylinder', '/World/Looks', '/World/Looks/OmniGlass', '/World/Looks/OmniGlass/Shader', '/World/Looks/OmniPBR', '/World/Looks/OmniPBR/Shader', '/World/Looks/OmniSurface_Plastic', '/World/Looks/OmniSurface_Plastic/Shader', '/World/Sphere', '/World/defaultLight'])
| 5,196 | Python | 41.950413 | 341 | 0.666282 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/setting_menu_container.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["SettingMenuContainer"]
from omni.kit.viewport.menubar.core import (
IconMenuDelegate,
SliderMenuDelegate,
CheckboxMenuDelegate,
SettingModel,
SettingModelWithDefaultValue,
ViewportMenuContainer,
FloatArraySettingColorMenuItem,
menu_is_tearable,
)
from .menu_item.settings_renderer_menu_item import SettingsRendererMenuItem
from .menu_item.settings_transform_manipulator import SettingsTransformManipulator
from .style import UI_STYLE
import carb
import carb.settings
import omni.ui as ui
from omni.ui import color as cl
from typing import Any, Dict, List, Union
from functools import partial
class ViewportSetting:
def __init__(self, key: str, default: Any, set_default: bool = True, read_incoming: bool = False):
settings = carb.settings.get_settings()
if read_incoming:
incoming_default = settings.get(key)
if incoming_default is not None:
default = incoming_default
self.key = key
self.default = default
if set_default:
settings.set_default(self.key, self.default)
def reset(self, settings):
settings.set(self.key, self.default)
class SelectionColorSetting(ViewportSetting):
OUTLINE = "/persistent/app/viewport/outline/color"
INTERSECTION = "/persistent/app/viewport/outline/intersection/color"
def __init__(self, default: Any):
super().__init__(self.OUTLINE, default, False)
self.index = 1020
def reset(self, settings):
float_array = settings.get(self.key)
float_array = float_array[0 : self.index] + self.default + float_array[self.index + len(self.default) :]
carb.settings.get_settings().set(self.OUTLINE, float_array)
carb.settings.get_settings().set(self.INTERSECTION, self.default)
class VIEWPORT_SETTINGS:
NAVIGATION_SPEED = ViewportSetting("/persistent/app/viewport/camMoveVelocity", 5.0)
NAVIGATION_SPEED_MULTAMOUNT = ViewportSetting("/persistent/app/viewport/camVelocityScalerMultAmount", 1.1)
SHOW_SPEED_ON_START = ViewportSetting("/persistent/app/viewport/camShowSpeedOnStart", True)
ADAPTIVE_SPEED = ViewportSetting("/persistent/app/viewport/camVelocityCOINormalization", 0.0)
GAMEPAD_CONTROL = ViewportSetting("/persistent/app/omniverse/gamepadCameraControl", True)
CAMERA_STOP_ON_UP = ViewportSetting("/persistent/app/viewport/camStopOnMouseUp", True)
CAM_UPDATE_CLAMPING = ViewportSetting("/ext/omni.kit.manipulator.camera/clampUpdates", 0.15, read_incoming=True)
INERTIA_ENABLED = ViewportSetting("/persistent/app/viewport/camInertiaEnabled", False)
INERTIA_ANOUNT = ViewportSetting("/persistent/app/viewport/camInertiaAmount", 0.55)
ROTATION_SMOOTH_ENABLED = ViewportSetting("/persistent/app/viewport/camRotSmoothEnabled", True)
ROTATION_SMOOTH_SCALE = ViewportSetting("/persistent/app/viewport/camRotSmoothScale", 20.0)
ROTATION_SMOOTH_ALWAYS = ViewportSetting("/persistent/app/viewport/camRotSmoothAlways", False)
GESTURE_ENABLED = ViewportSetting("/persistent/app/viewport/camGestureEnabled", False)
GESTURE_TIME = ViewportSetting("/persistent/app/viewport/camGestureTime", 0.12)
GESTURE_RADIUS = ViewportSetting("/persistent/app/viewport/camGestureRadius", 20)
SELECTION_LINE_WIDTH = ViewportSetting("/persistent/app/viewport/outline/width", 2)
GRID_LINE_WIDTH = ViewportSetting("/persistent/app/viewport/grid/lineWidth", 1)
GRID_SCALE = ViewportSetting("/persistent/app/viewport/grid/scale", 100.0)
GRID_FADE = ViewportSetting("/persistent/app/viewport/grid/lineFadeOutStartDistance", 10.0)
GIZMO_LINE_WIDTH = ViewportSetting("/persistent/app/viewport/gizmo/lineWidth", 1.0)
GIZMO_SCALE_ENABLED = ViewportSetting("/persistent/app/viewport/gizmo/constantScaleEnabled", True)
GIZMO_SCALE = ViewportSetting("/persistent/app/viewport/gizmo/constantScale", 10.0)
GIZMO_GLOBAL_SCALE = ViewportSetting("/persistent/app/viewport/gizmo/scale", 1.0)
GIZMO_MIN_FADEOUT = ViewportSetting("/persistent/app/viewport/gizmo/minFadeOut", 1.0)
GIZMO_MAX_FADEOUT = ViewportSetting("/persistent/app/viewport/gizmo/maxFadeOut", 50)
UI_BACKGROUND_OPACITY = ViewportSetting("/persistent/app/viewport/ui/background/opacity", 1.0)
UI_BRIGHTNESS = ViewportSetting("/persistent/app/viewport/ui/brightness", 0.84)
OBJECT_CENTRIC = ViewportSetting("/persistent/app/viewport/objectCentricNavigation", 0)
DOUBLE_CLICK_COI = ViewportSetting("/persistent/app/viewport/coiDoubleClick", False)
BBOX_LINE_COLOR = ViewportSetting("/persistent/app/viewport/boundingBoxes/lineColor", [0.886, 0.447, 0.447])
GRID_LINE_COLOR = ViewportSetting("/persistent/app/viewport/grid/lineColor", [0.3, 0.3, 0.3])
OUTLINE_COLOR = SelectionColorSetting([1.0, 0.6, 0.0, 1.0])
LOOK_SPEED_HORIZ = ViewportSetting("/persistent/exts/omni.kit.manipulator.camera/lookSpeed/0", 180.0)
LOOK_SPEED_VERT = ViewportSetting("/persistent/exts/omni.kit.manipulator.camera/lookSpeed/1", 90.0)
TUMBLE_SPEED = ViewportSetting("/persistent/exts/omni.kit.manipulator.camera/tumbleSpeed", 360.0)
ZOOM_SPEED = ViewportSetting("/persistent/exts/omni.kit.manipulator.camera/moveSpeed/2", 1.0)
FLY_IGNORE_VIEW_DIRECTION = ViewportSetting("/persistent/exts/omni.kit.manipulator.camera/flyViewLock", False)
class ViewportSettingModel(SettingModelWithDefaultValue):
def __init__(self, viewport_setting: ViewportSetting, draggable: bool = False):
super().__init__(viewport_setting.key, viewport_setting.default, draggable=draggable)
CAM_VELOCITY_MIN = "/persistent/app/viewport/camVelocityMin"
CAM_VELOCITY_MAX = "/persistent/app/viewport/camVelocityMax"
CAM_VELOCITY_SCALER_MIN = "/persistent/app/viewport/camVelocityScalerMin"
CAM_VELOCITY_SCALER_MAX = "/persistent/app/viewport/camVelocityScalerMax"
SETTING_UI_BRIGHTNESS_MIN = "/app/viewport/ui/minBrightness"
SETTING_UI_BRIGHTNESS_MAX = "/app/viewport/ui/maxBrightness"
BRIGHTNESS_VALUE_RANGE_MIN = 0.25
BRIGHTNESS_VALUE_RANGE_MAX = 1.0
OUTLINE_COLOR_INDEX = 1020
class SelectionColorMenuItem(FloatArraySettingColorMenuItem):
def __init__(self):
setting = VIEWPORT_SETTINGS.OUTLINE_COLOR
super().__init__(
setting.key, setting.default, name="Selection Color", start_index=setting.index, has_reset=True
)
def on_color_changed(self, colors: List[float]) -> None:
# Set the default exterior color
super().on_color_changed(colors)
# Set the interior intersection color too
carb.settings.get_settings().set(VIEWPORT_SETTINGS.OUTLINE_COLOR.INTERSECTION, colors)
class BoundingColorMenuItem(FloatArraySettingColorMenuItem):
def __init__(self):
setting = VIEWPORT_SETTINGS.BBOX_LINE_COLOR
super().__init__(setting.key, setting.default, name="Bounding Box Color", has_reset=True)
class GridColorMenuItem(FloatArraySettingColorMenuItem):
def __init__(self):
setting = VIEWPORT_SETTINGS.GRID_LINE_COLOR
super().__init__(setting.key, setting.default, name="Grid Color", has_reset=True)
class MenuContext:
def __init__(self):
self.__renderer_menu_item: Union[SettingsRendererMenuItem, None] = None
self.__settings = carb.settings.get_settings()
self.__carb_subscriptions = []
@property
def settings(self):
return self.__settings
@property
def renderer_menu_item(self) -> Union[SettingsRendererMenuItem, None]:
return self.__renderer_menu_item
@renderer_menu_item.setter
def renderer_menu_item(self, render_menu_item: Union[SettingsRendererMenuItem, None]) -> None:
if self.__renderer_menu_item:
self.__renderer_menu_item.destroy()
self.__renderer_menu_item = render_menu_item
def add_carb_subscription(self, carb_sub: carb.settings.SubscriptionId):
self.__carb_subscriptions.append(carb_sub)
def destroy(self):
self.renderer_menu_item = None
for sub in self.__carb_subscriptions:
sub.unsubscribe()
self.__carb_subscriptions = []
class SettingMenuContainer(ViewportMenuContainer):
"""The menu with the viewport settings"""
def __init__(self):
super().__init__(
name="Settings",
delegate=IconMenuDelegate("Settings"),
visible_setting_path="/exts/omni.kit.viewport.menubar.settings/visible",
order_setting_path="/exts/omni.kit.viewport.menubar.settings/order",
style=UI_STYLE,
)
self.__menu_context: Dict[str, MenuContext] = {}
settings = carb.settings.get_settings()
settings.set_default(CAM_VELOCITY_MIN, 0.01)
settings.set_default(CAM_VELOCITY_MAX, 50)
settings.set_default(CAM_VELOCITY_SCALER_MIN, 1)
settings.set_default(CAM_VELOCITY_SCALER_MAX, 10)
def destroy(self):
for menu_ctx in self.__menu_context.values():
menu_ctx.destroy()
self.__menu_context = {}
super().destroy()
def build_fn(self, factory: Dict):
ui.Menu(self.name, delegate=self._delegate, on_build_fn=partial(self._build_menu, factory), style=self._style)
def _build_menu(self, factory: Dict) -> None:
viewport_api = factory.get("viewport_api")
if not viewport_api:
return
viewport_api_id = viewport_api.id
menu_ctx = self.__menu_context.get(viewport_api_id)
if menu_ctx:
menu_ctx.destroy()
menu_ctx = MenuContext()
self.__menu_context[viewport_api_id] = menu_ctx
ui.Menu(
"Navigation",
on_build_fn=lambda: self.__build_navigation_menu_items(menu_ctx),
tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.Navigation"),
)
ui.Menu(
"Selection",
on_build_fn=lambda: self.__build_selection_menu_items(menu_ctx),
tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.Selection"),
)
ui.Menu(
"Grid",
on_build_fn=lambda: self.__build_grid_menu_items(menu_ctx),
tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.Grid"),
)
ui.Menu(
"Gizmos",
on_build_fn=lambda: self.__build_gizmo_menu_items(menu_ctx),
tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.Gizmos"),
)
menu_ctx.renderer_menu_item = SettingsRendererMenuItem(
"Viewport", factory=factory, tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.Viewport")
)
ui.Menu(
"Viewport UI",
on_build_fn=lambda: self.__build_ui_menu_items(menu_ctx),
tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.ViewportUI"),
)
SettingsTransformManipulator(
"Manipulator Transform",
factory=factory,
tearable=menu_is_tearable("omni.kit.viewport.menubar.settings.ManipulatorTransform"),
)
ui.Separator()
ui.MenuItem(
"Reset To Defaults",
hide_on_click=False,
triggered_fn=lambda vid=viewport_api_id: self.__reset_settings(vid),
)
ui.Separator()
ui.MenuItem("Preferences", hide_on_click=False, triggered_fn=self._show_viewport_preference)
def __build_navigation_menu_items(self, menu_ctx: MenuContext) -> None:
settings = carb.settings.get_settings()
ui.MenuItem(
"Navigation Speed",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.NAVIGATION_SPEED, draggable=True),
min=settings.get(CAM_VELOCITY_MIN),
max=settings.get(CAM_VELOCITY_MAX),
tooltip="Set the Fly Mode navigation speed",
has_reset=True,
),
)
ui.MenuItem(
"Navigation Speed Scalar",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.NAVIGATION_SPEED_MULTAMOUNT, draggable=True),
min=settings.get(CAM_VELOCITY_SCALER_MIN),
max=settings.get(CAM_VELOCITY_SCALER_MAX),
tooltip="Change the Fly Mode navigation speed by this amount",
has_reset=True,
),
)
ui.MenuItem(
"Lock Navigation Height",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.FLY_IGNORE_VIEW_DIRECTION),
tooltip="Whether forward/backward and up/down movements ignore camera-view direction (similar to left/right strafe)",
has_reset=True,
)
)
ui.MenuItem(
"Gamepad Camera Control",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GAMEPAD_CONTROL),
tooltip="Enable gamepad navigation for this Viewport",
has_reset=True,
),
)
ui.Separator()
ui.MenuItem(
"Object Centric Navigation",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.OBJECT_CENTRIC),
tooltip="Set camera's center of interest to center of object under mouse when camera manipulation begins",
has_reset=True,
),
)
ui.MenuItem(
"Double Click Sets Interest",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.DOUBLE_CLICK_COI),
tooltip="Double click will set the center of interest to the object under mouse." +
"\nEnabling this may make click-to-select less responsive.",
has_reset=True,
)
)
ui.Separator()
self.__build_advanced_navigation_items(menu_ctx)
ui.Separator()
self.__build_navigation_speed_items(menu_ctx)
self.__build_debug_settings(menu_ctx)
def __build_navigation_speed_items(self, menu_ctx: MenuContext):
ui.MenuItem(
"Look Speed Horizontal",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.LOOK_SPEED_HORIZ, draggable=True),
min=0,
max=360,
step=1,
tooltip="Set the Look Mode navigation speed as degrees rotated over a drag across the Viepwort horizonatally.",
has_reset=True,
),
)
ui.MenuItem(
"Look Speed Vertical",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.LOOK_SPEED_VERT, draggable=True),
min=0,
max=180,
step=1,
tooltip="Set the Look Mode navigation speed as degrees rotated over a drag across the Viepwort vertically.",
has_reset=True,
),
)
ui.MenuItem(
"Tumble Speed",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.TUMBLE_SPEED, draggable=True),
min=0,
max=720,
step=1,
tooltip="Set the Tumble Mode navigation speed as degrees rotated over a drag across the Viepwort.",
has_reset=True,
),
)
ui.MenuItem(
"Zoom Speed",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.ZOOM_SPEED, draggable=True),
min=0,
max=2,
tooltip="Set the Zoom Mode navigation speed",
has_reset=True,
),
)
def __build_advanced_navigation_items(self, menu_ctx: MenuContext):
settings = menu_ctx.settings
inertia_enable_model = ViewportSettingModel(VIEWPORT_SETTINGS.INERTIA_ENABLED)
ui.MenuItem(
"Inertia Mode",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=inertia_enable_model,
tooltip="Enable advanced settings to control camera inertia and gestures for mouse manipulation",
has_reset=True,
),
)
inertia_menu_item = ui.MenuItem(
"Camera Inertia",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.INERTIA_ANOUNT, draggable=True),
tooltip="Seconds the inertia is active for",
min=0.0,
max=4.0,
has_reset=True,
),
)
# Show an entry for enabling disabling inertia on all modes if this value is set
inertia_modes = settings.get("/exts/omni.kit.manipulator.camera/inertiaModesEnabled")
inertia_modes_menu_item = None
if inertia_modes:
# Odd setting to control inertai always, but its what View was using, so preserve as it is persistant
legacy_all_interia_model = ViewportSettingModel(VIEWPORT_SETTINGS.ROTATION_SMOOTH_ALWAYS)
inertia_modes_menu_item = ui.MenuItem(
"Inertia For Other Movements",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=legacy_all_interia_model,
tooltip="Apply inertia to other camera movements or only WASD navigation",
has_reset=True,
),
)
def _toggle_inertia_always(model: ui.AbstractValueModel):
if model.as_bool:
# Allow a user specified preference to enable ceratin modes only, otherwise default to all
inertia_modes = settings.get("/app/viewport/inertiaModesEnabled")
inertia_modes = inertia_modes or [1, 1, 1, 1]
else:
inertia_modes = [1, 0, 0, 0]
settings.set("/exts/omni.kit.manipulator.camera/inertiaModesEnabled", inertia_modes)
_toggle_inertia_always(legacy_all_interia_model)
menu_ctx.add_carb_subscription(
legacy_all_interia_model.subscribe_value_changed_fn(_toggle_inertia_always)
)
def __on_inertial_changed(model: ui.AbstractValueModel):
inertia_enabled = model.as_bool
inertia_menu_item.visible = inertia_enabled
if inertia_modes_menu_item:
inertia_modes_menu_item.visible = inertia_enabled
# Sync the state now
__on_inertial_changed(inertia_enable_model)
menu_ctx.add_carb_subscription(
inertia_enable_model.subscribe_value_changed_fn(__on_inertial_changed)
)
def __build_debug_settings(self, menu_ctx: MenuContext):
settings = menu_ctx.settings
_added_initial_separator = False
def add_initial_separator():
nonlocal _added_initial_separator
if not _added_initial_separator:
_added_initial_separator = True
ui.Separator()
if settings.get("/exts/omni.kit.viewport.menubar.settings/show/camera/clamping"):
add_initial_separator()
ui.MenuItem(
"Animation clamp",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.CAM_UPDATE_CLAMPING),
tooltip="Clamp animation to this maximum number of seconds",
min=0.0001,
max=1.0,
has_reset=True,
),
)
def __build_selection_menu_items(self, menu_ctx: MenuContext):
SelectionColorMenuItem()
ui.MenuItem(
"Selection Line Width",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.SELECTION_LINE_WIDTH, draggable=True),
min=1,
max=15,
slider_class=ui.IntSlider,
has_reset=True,
),
)
BoundingColorMenuItem()
def __build_grid_menu_items(self, menu_ctx: MenuContext):
GridColorMenuItem()
ui.MenuItem(
"Grid Line Width",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GRID_LINE_WIDTH, draggable=True),
min=1,
max=10,
slider_class=ui.IntSlider,
has_reset=True,
),
)
ui.MenuItem(
"Grid Size",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GRID_SCALE, draggable=True),
min=1.0,
max=1000.0,
has_reset=True,
),
)
fadeout_model = ViewportSettingModel(VIEWPORT_SETTINGS.GRID_FADE, draggable=True)
def __on_fadeout_changed(model: ui.AbstractValueModel):
carb.settings.get_settings().set("/persistent/app/viewport/grid/lineFadeOutEndDistance", model.as_float * 4)
ui.MenuItem(
"Grid Fade",
hide_on_click=False,
delegate=SliderMenuDelegate(model=fadeout_model, min=0.5, max=50.0, has_reset=True),
)
menu_ctx.add_carb_subscription(
fadeout_model.subscribe_value_changed_fn(__on_fadeout_changed)
)
def __build_ui_menu_items(self, menu_ctx: MenuContext):
def __ui_background_opacity_changed(model: ui.AbstractValueModel) -> None:
alpha = int(model.as_float * 255)
name = "viewport_menubar_background"
color = cl._find(name)
color = (color & 0x00FFFFFF) + (alpha << 24)
cl._store(name, color)
ui_background_opacity_model = ViewportSettingModel(VIEWPORT_SETTINGS.UI_BACKGROUND_OPACITY, draggable=True)
ui.MenuItem(
"UI Background Opacity",
hide_on_click=False,
delegate=SliderMenuDelegate(model=ui_background_opacity_model, min=0.0, max=1.0, has_reset=True),
)
__ui_background_opacity_changed(ui_background_opacity_model)
settings = carb.settings.get_settings()
min_brightness = settings.get(SETTING_UI_BRIGHTNESS_MIN)
max_brightness = settings.get(SETTING_UI_BRIGHTNESS_MAX)
def __ui_brightness_changed(model: ui.AbstractValueModel) -> None:
def __gray_to_color(gray: int):
return 0xFF000000 + (gray << 16) + (gray << 8) + gray
value = (model.as_float - BRIGHTNESS_VALUE_RANGE_MIN) / (
BRIGHTNESS_VALUE_RANGE_MAX - BRIGHTNESS_VALUE_RANGE_MIN
)
light_gray = int(value * 255)
color = __gray_to_color(light_gray)
cl._store("viewport_menubar_light", color)
medium_gray = int(light_gray * 0.539)
color = __gray_to_color(medium_gray)
cl._store("viewport_menubar_medium", color)
ui_brightness_model = ViewportSettingModel(VIEWPORT_SETTINGS.UI_BRIGHTNESS, draggable=True)
ui.MenuItem(
"UI Control Brightness",
hide_on_click=False,
delegate=SliderMenuDelegate(model=ui_brightness_model, min=min_brightness, max=max_brightness, has_reset=True),
)
__ui_brightness_changed(ui_brightness_model)
menu_ctx.add_carb_subscription(
ui_background_opacity_model.subscribe_value_changed_fn(__ui_background_opacity_changed)
)
menu_ctx.add_carb_subscription(
ui_brightness_model.subscribe_value_changed_fn(__ui_brightness_changed)
)
def __build_gizmo_menu_items(self, menu_ctx: MenuContext):
ui.MenuItem(
"Gizmo Line Width",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GIZMO_LINE_WIDTH, draggable=True),
min=1.0,
max=25.0,
has_reset=True,
),
)
scale_enabled_model = ViewportSettingModel(VIEWPORT_SETTINGS.GIZMO_SCALE_ENABLED)
ui.MenuItem(
"Gizmo Constant Scale Enabled",
hide_on_click=False,
delegate=CheckboxMenuDelegate(model=scale_enabled_model, has_reset=True),
)
constant_scale_menu_item = ui.MenuItem(
"Gizmo Constant Scale",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GIZMO_SCALE, draggable=True),
min=0.5,
max=100.0,
has_reset=True,
),
)
global_scale_menu_item = ui.MenuItem(
"Gizmo Camera Scale" if scale_enabled_model.as_bool else "Gizmo Global Scale",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GIZMO_GLOBAL_SCALE, draggable=True),
min=0.01,
max=4.0,
has_reset=True,
),
)
def __on_gizmo_enabled_changed(model: SettingModel):
is_constant_scale = model.as_bool
constant_scale_menu_item.visible = is_constant_scale
global_scale_menu_item.text = "Gizmo Camera Scale" if is_constant_scale else "Gizmo Global Scale"
__on_gizmo_enabled_changed(scale_enabled_model)
menu_ctx.add_carb_subscription(
scale_enabled_model.subscribe_value_changed_fn(__on_gizmo_enabled_changed)
)
ui.MenuItem(
"Gizmo Min FadeOut",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GIZMO_MIN_FADEOUT, draggable=True),
min=1.0,
max=1000.0,
has_reset=True,
),
)
ui.MenuItem(
"Gizmo Max FadeOut",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=ViewportSettingModel(VIEWPORT_SETTINGS.GIZMO_MAX_FADEOUT, draggable=True),
min=1.0,
max=1000.0,
has_reset=True,
),
)
def __reset_settings(self, viewport_api_id: str):
settings = carb.settings.get_settings()
for value in VIEWPORT_SETTINGS.__dict__.values():
if isinstance(value, ViewportSetting):
value.reset(settings)
# Only reset renderer settings of current viewport
menu_ctx = self.__menu_context.get(viewport_api_id)
renderer_menu_item = menu_ctx.renderer_menu_item if menu_ctx else None
if renderer_menu_item:
renderer_menu_item.reset()
def _show_viewport_preference(self) -> None:
try:
import omni.kit.window.preferences as preferences
import asyncio
async def focus_async():
pref_window = ui.Workspace.get_window("Preferences")
if pref_window:
pref_window.focus()
PAGE_TITLE = "Viewport"
inst = preferences.get_instance()
if not inst:
carb.log_error("Preferences extension is not loaded yet")
return
pages = preferences.get_page_list()
for page in pages:
if page.get_title() == PAGE_TITLE:
inst.select_page(page)
# Show the Window
inst.show_preferences_window()
# Force the tab to be the active/focused tab (this currently needs to be done in async)
asyncio.ensure_future(focus_async())
return page
else:
carb.log_error("Viewport Preferences page not found!")
except ImportError:
carb.log_error("omni.kit.window.preferences not enabled!")
| 28,966 | Python | 40.263533 | 133 | 0.617483 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/style.py | from omni.ui import color as cl
from omni.ui import constant as fl
from pathlib import Path
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.joinpath("data").joinpath("icons")
UI_STYLE = {
"Menu.Item.Icon::Settings": {"image_url": f"{ICON_PATH}/viewport_settings.svg"},
"ResolutionLink": {"background_color": 0, "margin": 0, "padding": 2},
"ResolutionLink.Image": {"image_url": f"{ICON_PATH}/link_dark.svg", "margin": 0},
"ResolutionLink.Image:checked": {"image_url": f"{ICON_PATH}/link.svg"},
"ComboBox::ratio": {"background_color": 0x0, "padding": 4, "margin": 0},
"Menu.Item.Button::save": {"padding": 0, "margin": 0, "background_color": 0},
"Menu.Item.Button.Image::save": {"image_url": f"{ICON_PATH}/save.svg", "color": cl.viewport_menubar_light},
"Menu.Item.Button.Image::save:checked": {"color": cl.shade(cl("#0697cd"))},
"Ratio.Background": {"background_color": 0xFF444444, "border_color": 0xFFA1701B, "border_width": 1},
"Resolution.Text": {"color": cl.input_hint},
"Resolution.Name": {"color": cl.viewport_menubar_light},
"Resolution.Del": {"image_url": f"{ICON_PATH}/delete.svg"},
}
cl.save_background = cl.shade(cl("#1F2123"))
cl.input_hint = cl.shade(cl('#5A5A5A'))
SAVE_WINDOW_STYLE = {
"Window": {"secondary_background_color": 0x0},
"Titlebar.Background": {"background_color": cl.save_background},
"Input.Hint": {"color": cl.input_hint},
"Image::close": {"image_url": f"{ICON_PATH}/close.svg"},
"Button": {"background_color": cl.save_background},
}
| 1,585 | Python | 45.647057 | 111 | 0.65489 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/extension.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["ViewportSettingsMenuBarExtension"]
from .setting_menu_container import SettingMenuContainer
import omni.ext
class ViewportSettingsMenuBarExtension(omni.ext.IExt):
"""The Entry Point for the Viewport Settings in Viewport Menu Bar"""
def on_startup(self, ext_id):
self._settings_menu = SettingMenuContainer()
def on_shutdown(self):
self._settings_menu.destroy()
self._settings_menu = None
| 873 | Python | 35.416665 | 76 | 0.761741 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/settings_transform_manipulator.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["SettingsTransformManipulator"]
from omni.kit.viewport.menubar.core import (
CheckboxMenuDelegate,
ComboBoxMenuDelegate,
SliderMenuDelegate,
SettingComboBoxModel,
ComboBoxItem,
SettingModelWithDefaultValue,
ResetHelper,
)
import omni.ui as ui
import carb.settings
from typing import Any, Dict, Tuple, List, Optional, Union
from functools import partial
SETTING_SCALE = "/persistent/exts/omni.kit.manipulator.transform/manipulator/scaleMultiplier"
SETTING_FREE_ROTATION_ENABLED = "/persistent/exts/omni.kit.manipulator.transform/manipulator/freeRotationEnabled"
SETTING_FREE_ROTATION_TYPE = "/persistent/exts/omni.kit.manipulator.transform/manipulator/freeRotationType"
SETTING_INTERSECTION_THICKNESS = "/persistent/exts/omni.kit.manipulator.transform/manipulator/intersectionThickness"
FREE_ROTATION_TYPE_CLAMPED = "Clamped"
FREE_ROTATION_TYPE_CONTINUOUS = "Continuous"
MENU_WIDTH = 350
class _ManipulatorRotationTypeModel(SettingComboBoxModel, ResetHelper):
def __init__(self):
types = [FREE_ROTATION_TYPE_CLAMPED, FREE_ROTATION_TYPE_CONTINUOUS]
super().__init__(SETTING_FREE_ROTATION_TYPE, types)
def _on_current_item_changed(self, item: ComboBoxItem) -> None:
super()._on_current_item_changed(item)
self._update_reset_button()
def get_default(self):
return FREE_ROTATION_TYPE_CLAMPED
def get_value(self):
settings = carb.settings.get_settings()
return settings.get(SETTING_FREE_ROTATION_TYPE)
def restore_default(self) -> None:
current_index = self.current_index
if current_index:
current = current_index.as_int
items = self.get_item_children(None)
# Early exit if the model is already correct
if items[current].value == FREE_ROTATION_TYPE_CLAMPED:
return
# Iterate all items, and select the first match to the real value
for index, item in enumerate(items):
if item.value == FREE_ROTATION_TYPE_CLAMPED:
current_index.set_value(index)
return
class SettingsTransformManipulator(ui.Menu):
"""The menu with the transform manipulator settings"""
def __init__(self, text: str = "", factory: Dict = {}, **kwargs):
settings = carb.settings.get_settings()
settings.set_default_float(SETTING_SCALE, 1.4)
settings.set_default_bool(SETTING_FREE_ROTATION_ENABLED, True)
settings.set_default_string(SETTING_FREE_ROTATION_TYPE, FREE_ROTATION_TYPE_CLAMPED)
settings.set_default_float(SETTING_INTERSECTION_THICKNESS, 10.0)
super().__init__(text, on_build_fn=partial(self.build_fn, factory), **kwargs)
def build_fn(self, factory: Dict):
model = SettingModelWithDefaultValue(SETTING_SCALE, 1.4, draggable=True)
ui.MenuItem(
"Transform Manipulator Scale",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=model,
width=MENU_WIDTH,
min=0.0,
max=25.0,
has_reset=True,
),
)
model = SettingModelWithDefaultValue(SETTING_FREE_ROTATION_ENABLED, True, draggable=True)
ui.MenuItem(
"Enable Free Rotation",
hide_on_click=False,
delegate=CheckboxMenuDelegate(
model=model,
width=MENU_WIDTH,
has_reset=True,
),
)
model = _ManipulatorRotationTypeModel()
ui.MenuItem(
"Free Rotation Type",
hide_on_click=False,
delegate=ComboBoxMenuDelegate(
model=model,
width=MENU_WIDTH,
has_reset=True,
),
)
model = SettingModelWithDefaultValue(SETTING_INTERSECTION_THICKNESS, 10.0, True)
ui.MenuItem(
"Manipulator Intersection Thickness",
hide_on_click=False,
delegate=SliderMenuDelegate(
model=model,
width=MENU_WIDTH,
min=1.0,
max=50.0,
has_reset=True,
),
)
| 4,657 | Python | 34.830769 | 116 | 0.640326 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/settings_renderer_menu_item.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["SettingsRendererMenuItem"]
from .custom_resolution.custom_resolution_menu_item import CustomResolutionMenuItem
from .resolution_collection.model import ComboBoxResolutionModel
from .resolution_collection.menu import ResolutionCollectionMenu
from omni.kit.viewport.menubar.core import (
ViewportMenuItem,
CheckboxMenuDelegate,
ComboBoxMenuDelegate,
ComboBoxModel,
SettingComboBoxModel,
ComboBoxItem,
ResetHelper,
RadioMenuCollection,
)
import omni.ui as ui
import omni.kit.app
import carb
from pxr import Sdf
from typing import Any, Dict, Tuple, List, Optional, Union
from functools import partial
SETTING_APERTURE = "/app/hydra/aperture/conform"
SETTING_RENDER_SCALE_LIST = "/app/renderer/resolution/multiplierList"
def _resolve_viewport_setting(viewport_id: str, setting_name: str, isettings: carb.settings.ISettings,
legacy_key: Union[str, None] = None):
# Resolve a default Viewport setting from the most specific to the most general
# /app/viewport/Viewport/Viewport0/setting => Startup value for this specific Viewport
# /app/viewport/defaults/setting => Startup value targetting all Viewports
# Next check if a non-persitent viewport-specific default exists via toml / start-up settings
dflt_setting_key = f"/app/viewport/{viewport_id}/{setting_name}"
pers_setting_key = "/persistent" + dflt_setting_key
# 1. Get the persistant per-viewport value that is saved (may be non-existant)
cur_value = isettings.get(pers_setting_key)
# 2. Get the per-viewport default that the setting should restore to
dflt_value = isettings.get(f"/app/viewport/{viewport_id}/{setting_name}")
# 3. If there is no per-viewport default, try to restore to a value for all Viewports
if dflt_value is None:
dflt_value = isettings.get(f"/app/viewport/defaults/{setting_name}")
# 4. If still no value to restore to, check for a legacy setting that represnts this
if dflt_value is None:
if legacy_key:
dflt_value = isettings.get(legacy_key)
elif setting_name == "resolution":
width = isettings.get("/app/renderer/resolution/width")
height = isettings.get("/app/renderer/resolution/height")
if (width is not None) and (height is not None):
# When either width or height is 0 or less, Viewport will be set to use UI size
if (width > 0) and (height > 0):
dflt_value = (width, height)
if dflt_value is None:
dflt_value = (0, 0)
if cur_value is None:
cur_value = dflt_value
return (
(pers_setting_key, cur_value),
(dflt_setting_key, dflt_value)
)
class _ViewportResolutionSetter:
"""Simple class that forwards resolution menu item changes to the proper underlying object"""
def __init__(self, factory_dict: dict, fill_viewport: bool):
self.__factory_dict = factory_dict
# Set the Viewport's fill_frame to False as we are controlling it fully
viewport_api = self.viewport_api
if viewport_api and viewport_api.fill_frame:
viewport_api.fill_frame = False
viewport_widget = self.viewport_widget
if viewport_widget:
viewport_widget.expand_viewport = fill_viewport
@property
def viewport_api(self):
return self.__factory_dict.get("viewport_api")
@property
def viewport_widget(self):
return self.__factory_dict.get("layer_provider").viewport_widget
@property
def fill_frame(self) -> bool:
return self.viewport_widget.fill_frame
@property
def fill_viewport(self) -> bool:
return self.viewport_widget.expand_viewport
@fill_viewport.setter
def fill_viewport(self, value: bool):
self.viewport_widget.expand_viewport = value
def set_resolution(self, resolution) -> None:
self.viewport_widget.set_resolution(resolution)
@property
def full_resolution(self) -> Tuple[float, float]:
return self.viewport_widget.full_resolution
class _ComboBoxResolutionScaleModel(SettingComboBoxModel, ResetHelper):
"""The resolution scale model has all the resolution scales and sets the viewport resolution scale"""
def __init__(self, viewport_api, resolution_scale_setting, settings):
self.__viewport_api = viewport_api
# Get the list of available multipliers or a default
values = settings.get(SETTING_RENDER_SCALE_LIST) or [2.0, 1.0, 0.666666666666, 0.5, 0.333333333333, 0.25]
# Check if the legacy per-app multiplier is set and use that if it is
default = resolution_scale_setting[1][1]
self.__default = default if default and default > 0 else 1.0
current_value = resolution_scale_setting[0][1]
# Push current_value into resolution_scale if not set to it already
if (current_value is not None) and (current_value > 0) and (current_value != self.__viewport_api.resolution_scale):
self.__viewport_api.resolution_scale = current_value
SettingComboBoxModel.__init__(
self,
# Set the key to set to to the persistent per-viewport key
setting_path=resolution_scale_setting[0][0],
texts=[str(int(value * 100)) + "%" for value in values],
values=values,
# This is passed to avoid defaulting the per-viewport persistent key to a value so that changes to the
# setting when not adjusted/saved will pick up the new default
current_value=self.__viewport_api.resolution_scale,
)
ResetHelper.__init__(self)
def _on_current_item_changed(self, item: ComboBoxItem) -> None:
super()._on_current_item_changed(item)
self.__viewport_api.resolution_scale = item.value
self._update_reset_button()
# for ResetHelper
def get_default(self):
return self.__default
def restore_default(self) -> None:
if self.__default is not None:
current_index = self.current_index
if current_index:
current = current_index.as_int
items = self.get_item_children(None)
# Early exit if the model is already correct
if items[current].value == self.__default:
return
# Iterate all items, and select the first match to the real value
for index, item in enumerate(items):
if item.value == self.__default:
current_index.set_value(index)
return
def get_value(self):
return self.__viewport_api.resolution_scale
class _ComboBoxApertureFitModel(ComboBoxModel):
"""The aperture model"""
def __init__(self, viewport_api, settings):
self.__viewport_api = viewport_api
values = [0, 1, 2, 3, 4]
texts = ["Match Vertical", "Match Horizontal", "Fit", "Crop", "Stretch"]
current_value = settings.get(SETTING_APERTURE) or 1
super().__init__(texts, values=values, current_value=current_value)
def _on_current_item_changed(self, item: ComboBoxItem) -> None:
# TODO: Add to Python bindings for UsdContext or HydraTexture
# self.__viewport_api.set_aperture_conform_policy(item.value)
pass
class _FillViewportModel(ui.AbstractValueModel, ResetHelper):
def __init__(self, resolution_setter, fill_viewport_settings, isettings: carb.settings.ISettings):
self.__resolution_setter = resolution_setter
# Get the default value that this item should reset/restore to
self.__default = bool(fill_viewport_settings[1][1])
self.__saved_value = self.__default
# This is the per-viewport persistent path this item will save to
self.__setting_path = fill_viewport_settings[0][0]
ui.AbstractValueModel.__init__(self)
ResetHelper.__init__(self)
self.__sub_model = self.subscribe_value_changed_fn(self.__on_value_changed)
self.__sub_setting = isettings.subscribe_to_node_change_events(self.__setting_path, self.__on_setting_changed)
def destroy(self):
self.__sub_model = None
if self.__sub_setting:
carb.settings.get_settings().unsubscribe_to_change_events(self.__sub_setting)
self.__sub_setting = None
def get_value_as_bool(self) -> bool:
return self.__resolution_setter.fill_viewport
def set_value(self, value: bool, save_restore: bool = False):
value = bool(value)
if save_restore:
if value:
value = self.__saved_value
else:
self.__saved_value = self.get_value_as_bool()
if value != self.get_value_as_bool():
self.__resolution_setter.fill_viewport = value
self._value_changed()
# for ResetHelper
def get_default(self):
return self.__default
def restore_default(self) -> None:
self.set_value(self.__default)
def get_value(self):
return self.get_value_as_bool()
def __on_setting_changed(self, *args, **kwargs):
if self.__sub_model:
self.set_value(carb.settings.get_settings().get(self.__setting_path))
def __on_value_changed(self, model: ui.AbstractValueModel):
# Use self.__sub_setting as a signal to process change in carb subscription
settings = carb.settings.get_settings()
model_sub, self.__sub_model = self.__sub_model, None
try:
value = model.as_bool
if bool(settings.get(self.__setting_path)) != value:
settings.set(self.__setting_path, value)
self._update_reset_button()
finally:
# Make sure to put the subscription back
self.__sub_model = model_sub
class SettingsRendererMenuItem(ui.Menu):
"""The menu with the viewport settings"""
def __init__(self, text: str = "", factory: Dict = {}, **kwargs):
self.__resolution_model: Union[ComboBoxResolutionModel, None] = None
self.__render_scale_model: Union[_ComboBoxResolutionScaleModel, None] = None
self.__fill_viewport_model: Union[_FillViewportModel, None] = None
self.__custom_menu_item: Union[CustomResolutionMenuItem, None] = None
self.__viewport_api_id: Union[str, None] = None
super().__init__(text, on_build_fn=partial(self.build_fn, factory), **kwargs)
def build_fn(self, factory: Dict):
# Create the model and the delegate here, not in __init__ to make the
# objects unique per viewport.
viewport_api = factory["viewport_api"]
viewport_api_id = viewport_api.id
isettings = carb.settings.get_settings()
self.__viewport_api_id = viewport_api_id
resolution_settings = _resolve_viewport_setting(viewport_api_id, "resolution", isettings)
fill_viewport_settings = _resolve_viewport_setting(viewport_api_id, "fillViewport", isettings)
resolution_scale_settings = _resolve_viewport_setting(viewport_api_id, "resolutionScale", isettings)
resolution_delegate = _ViewportResolutionSetter(factory, fill_viewport_settings[0][1])
self.__resolution_model = ComboBoxResolutionModel(resolution_delegate, resolution_settings, isettings)
ResolutionCollectionMenu("Render Resolution", self.__resolution_model)
self.__custom_menu_item = CustomResolutionMenuItem(self.__resolution_model, resolution_delegate)
self.__custom_menu_item.resolution = resolution_delegate.full_resolution
self.__render_scale_model = _ComboBoxResolutionScaleModel(viewport_api, resolution_scale_settings, isettings)
ui.MenuItem(
"Render Scale",
delegate=ComboBoxMenuDelegate(model=self.__render_scale_model, has_reset=True),
hide_on_click=False
)
# Requires Python bindings to set this through to the renderer
# ui.MenuItem(
# "Aperture Policy",
# delegate=ComboBoxMenuDelegate(model=_ComboBoxApertureFitModel(viewport_api, settings)),
# hide_on_click=False,
# )
self.__fill_viewport_model = _FillViewportModel(resolution_delegate, fill_viewport_settings, isettings)
self.__fill_viewport_item = ui.MenuItem(
"Fill Viewport",
delegate=CheckboxMenuDelegate(model=self.__fill_viewport_model, width=310, has_reset=True),
hide_on_click=False,
)
# Watch for an index change to disable / enable 'Fill Viewport' checkbox
self.__sub_resolution_index = self.__resolution_model.current_index.subscribe_value_changed_fn(
self.__on_resolution_index_changed
)
# Viewport can be changed externally, watch for any resolution changes to sync back into our models
self.__sub_render_settings = viewport_api.subscribe_to_render_settings_change(
self.__on_render_settings_changed
)
def __del__(self):
self.destroy()
def destroy(self):
self.__sub_render_settings = None
self.__sub_resolution_index = None
if self.__resolution_model:
self.__resolution_model.destroy()
self.__resolution_model = None
if self.__render_scale_model:
self.__render_scale_model.destroy()
self.__render_scale_model = None
if self.__fill_viewport_model:
self.__fill_viewport_model.destroy()
self.__fill_viewport_model = None
super().destroy()
def reset(self) -> None:
# When _default_resolution is None, them fill-frame is default on, off otherwise
if self.__fill_viewport_model:
self.__fill_viewport_model.restore_default()
# Restore resolution scale based on setting
if self.__render_scale_model:
self.__render_scale_model.restore_default()
# Restore resolution scale based on setting
if self.__resolution_model:
self.__resolution_model.restore_default()
def __sync_model(self, combo_model: ComboBoxModel, value: Any, select_first: bool = False):
current_index = combo_model.current_index
# Special case for forcing "Viewport" selection to be checked
if select_first:
if current_index.as_int != 0:
current_index.set_value(0)
return
items = combo_model.get_item_children(None)
if items and items[current_index.as_int].value != value:
# Iterate all items, and select the first match to the real value
index_custom = -1
for index, item in enumerate(items):
if item.value == value:
current_index.set_value(index)
return
if item.model.as_string == "Custom":
index_custom = index
if index_custom != -1:
current_index.set_value(index_custom)
def __on_resolution_index_changed(self, index_model: ui.SimpleIntModel) -> None:
# Enable or disable the 'Fill Viewport' option based on whether using Widget size for render resolution
# XXX: Changing visibility causes the menu to resize, which isn't great
index = index_model.as_int
fill_enabled = index != 0 if index_model else False
if fill_enabled != self.__fill_viewport_item.delegate.enabled:
self.__fill_viewport_model.set_value(fill_enabled, True)
self.__fill_viewport_item.delegate.enabled = fill_enabled
# When fillViewport is turned off, try to restore to last resolution
if not fill_enabled:
resolution = carb.settings.get_settings().get(f"/persistent/app/viewport/{self.__viewport_api_id}/resolution")
if resolution:
self.__sync_model(self.__resolution_model, tuple(resolution))
items = self.__resolution_model.get_item_children(None)
if index >= 0 and index < len(items):
item = items[index]
self.__custom_menu_item.resolution = item.value
def __on_render_settings_changed(self, camera_path: Sdf.Path, resolution: Tuple[int, int], viewport_api):
full_resolution = viewport_api.full_resolution
if self.__custom_menu_item.resolution != full_resolution:
# Update the custom_menu_item resolution entry boxes.
self.__custom_menu_item.resolution = full_resolution
# Sync the resolution to any existing settings (accounting for "Viewport" special case)
self.__sync_model(self.__resolution_model, full_resolution, self.__resolution_model.fill_frame)
# Sync the resolution scale menu item
self.__sync_model(self.__render_scale_model, viewport_api.resolution_scale)
| 17,346 | Python | 42.47619 | 126 | 0.644414 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/resolution_collection/model.py | import asyncio
import carb.settings
import omni.kit.app
from omni.kit.viewport.menubar.core import ComboBoxItem, SettingComboBoxModel, ResetHelper
from typing import Tuple, List, Optional
SETTING_RESOLUTION_LIST = "/app/renderer/resolution/list"
SETTING_CUSTOM_RESOLUTION_LIST = "/persistent/app/renderer/resolution/custom/list"
NAME_RESOLUTIONS = {
"Icon": (512, 512),
"Square": (1024, 1024),
"SD": (1280, 960),
"HD720P": (1280, 720),
"HD1080P": (1920, 1080),
"2K": (2048, 1080),
"1440P": (2560, 1440),
"UHD": (3840, 2160),
"Ultra Wide": (3440, 1440),
"Super Ultra Wide": (3840, 1440),
"5K Wide": (5120, 2880),
}
class ResolutionComboBoxItem(ComboBoxItem):
def __init__(self, resolution: Tuple[int, int], name: Optional[str] = None, custom: bool = False ) -> None:
self.resolution = resolution
self.name = name if name else self.get_name_from_resolution(resolution)
text = f"{resolution[0]}x{resolution[1]}" if self.is_valid_resolution() else self.name
self.custom = custom
super().__init__(text, resolution if resolution else "")
def get_name_from_resolution(self, resolution: Tuple[int, int]) -> str:
for name in NAME_RESOLUTIONS:
if NAME_RESOLUTIONS[name] == resolution:
return name
return ""
def is_valid_resolution(self):
return self.resolution and self.resolution[0] > 0 and self.resolution[1] > 0
class ComboBoxResolutionModel(SettingComboBoxModel, ResetHelper):
"""The resolution model has all the resolutions and sets the viewport resolution"""
def __init__(self, resolution_setter, resolution_setting, settings):
# Parse the incoming resolution list via settings
self.__resolution_setter = resolution_setter
# Set the default restore to value based on the resolved default pref-key
self.__default = resolution_setting[1][1]
self.__default = tuple(self.__default) if self.__default else (0, 0)
self.__custom_items: List[ResolutionComboBoxItem] = []
# XXX: For test-suite which passes None!
full_resolution = resolution_setter.full_resolution if resolution_setter else (0, 0)
values = None
try:
sttg_values = settings.get(SETTING_RESOLUTION_LIST)
if sttg_values is not None:
num_values = len(sttg_values)
if num_values > 0 and num_values % 2 == 0:
values = [(sttg_values[i*2 + 0], sttg_values[i*2 + 1]) for i in range(int(num_values / 2))]
else:
raise RuntimeError(f"Resolution list has invalid length of {num_values}")
except Exception as e:
import traceback
carb.log_error(f"{e}")
carb.log_error(f"{traceback.format_exc()}")
if values is None:
values = [(3840, 2160), (2560, 1440), (2048, 1080), (1920, 1080), (1280, 720), (1024, 1024), (512, 512)]
SettingComboBoxModel.__init__(
self,
# Set the key to set to to the persistent per-viewport key
setting_path=resolution_setting[0][0],
# Filled in below
texts=[],
values=[],
# Set the current value to the resolved persistent per-viewport value
# This is passed to avoid defaulting the per-viewport persitent key to a value so that changes to the
# setting when not adjusted/saved will pick up the new default
current_value=full_resolution,
)
ResetHelper.__init__(self)
self._items.append(ResolutionComboBoxItem((0, 0), name="Viewport"))
for value in values:
self._items.append(ResolutionComboBoxItem(value))
# Separator
self._items.append(ResolutionComboBoxItem(None))
self._items.append(ResolutionComboBoxItem((-1, -1), "Custom"))
# Custom is the last one
self.__index_custom = len(self._items) - 1
current = self._get_current_index_by_value(full_resolution)
self.current_index.set_value(current)
self.__update_setting = omni.kit.app.SettingChangeSubscription(SETTING_CUSTOM_RESOLUTION_LIST, self.__on_custom_change)
self.__on_custom_change(None, carb.settings.ChangeEventType.CHANGED)
def destroy(self):
self.__update_setting = None
self.__resolution_setter = None
self.__custom_items = []
def _on_current_item_changed(self, item: ResolutionComboBoxItem) -> None:
value = item.value
if value[0] >= 0 and value[1] >= 0:
super()._on_current_item_changed(item)
if self.__resolution_setter:
self.__resolution_setter.set_resolution(value)
self._update_reset_button()
def get_item_children(self, item) -> List[ResolutionComboBoxItem]:
#return super().get_item_children(item)
if item is None:
items = []
items.extend(self._items)
items.extend(self.__custom_items)
return items
else:
return []
# for ResetHelper
def get_default(self):
return self.__default
def restore_default(self) -> None:
if self.__default is None:
return
current_index = self.current_index
if current_index:
current = current_index.as_int
items = self.get_item_children(None)
# Early exit if the model is already correct
if items[current].value == self.__default:
return
# Iterate all items, and select the first match to the real value
for index, item in enumerate(items):
if item.value == self.__default:
current_index.set_value(index)
return
current_index.set_value(3)
def get_value(self) -> Optional[Tuple[int, int]]:
if self.__resolution_setter:
return self.__resolution_setter.full_resolution
return None
def is_custom(self, resolution: Tuple[int, int]) -> bool:
for custom in self.__custom_items:
if custom.value == resolution:
return True
return False
@property
def fill_frame(self) -> bool:
return self.__resolution_setter.fill_frame if self.__resolution_setter else False
def __on_custom_change(self, value, event_type) -> None:
async def __refresh_custom():
# It is strange that sometimes it is triggered with not all fields updated.
# Update a frame to make sure full information filled
await omni.kit.app.get_app().next_update_async()
self.__custom_items = []
custom_list = carb.settings.get_settings().get(SETTING_CUSTOM_RESOLUTION_LIST) or []
if custom_list:
# Separator
self.__custom_items.append(ResolutionComboBoxItem(None))
for custom in custom_list:
name = custom.pop("name", "")
width = custom.pop("width", -1)
height = custom.pop("height", -1)
if name and width > 0 and height > 0:
self.__custom_items.append(ResolutionComboBoxItem((width, height), name=name, custom=True))
self._item_changed(None)
if self.__resolution_setter:
current = self._get_current_index_by_value(self.__resolution_setter.full_resolution, default=self.__index_custom)
self.current_index.set_value(current)
asyncio.ensure_future(__refresh_custom())
| 7,629 | Python | 39.157895 | 129 | 0.602307 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/resolution_collection/menu.py | from .model import ResolutionComboBoxItem, ComboBoxResolutionModel
import carb.settings
import omni.ui as ui
from omni.kit.viewport.menubar.core import RadioMenuCollection, ViewportMenuDelegate, AbstractWidgetMenuDelegate
import math
from typing import List, Union
import weakref
SETTING_CUSTOM_RESOLUTION_LIST = "/persistent/app/renderer/resolution/custom/list"
DEFAULT_RATIOS = {
"16:9": float(16)/9,
"1:1": 1,
"32:9": float(32)/9,
"4:3": float(4)/3,
"21:9": float(21)/9,
}
class ResolutionCollectionDelegate(AbstractWidgetMenuDelegate):
def __init__(self, model: ComboBoxResolutionModel):
# don't use content_clipping as submenu hovering becomes inconsistent
super().__init__(model=model, has_reset=True, content_clipping=False)
self.__resolution_label: Union[ui.Label, None] = None
index_model = model.get_item_value_model(None, 0)
self.__sub_index_change = index_model.subscribe_value_changed_fn(
lambda m, this=weakref.proxy(self): this.__on_index_changed(m)
)
def destroy(self):
self.__sub_index_change = None
def build_widget(self, item: ui.MenuHelper):
ui.Spacer(width=4)
ui.Label(item.text, width=0)
ui.Spacer()
self.__resolution_label = ui.Label(self.__get_current_resolution(), width=70)
def __get_current_resolution(self):
index = self._model.get_item_value_model(None, 0).as_int
items: List[ResolutionComboBoxItem] = self._model.get_item_children(None)
if index >= 0 and index < len(items):
return items[index].name
else:
return "Unknown"
def __on_index_changed(self, model: ui.SimpleIntModel) -> None:
if self.__resolution_label:
self.__resolution_label.text = self.__get_current_resolution()
class ResolutionCollectionMenu(RadioMenuCollection):
ITEM_HEIGHT = 20
def __init__(self, text: str, model: ComboBoxResolutionModel):
super().__init__(
text,
model,
delegate = ResolutionCollectionDelegate(model),
)
self.__custom_menu_items = {}
def build_menu_item(self, item: ResolutionComboBoxItem) -> ui.MenuItem:
if item.resolution is None:
return ui.Separator(
delegate=ui.MenuDelegate(
on_build_item=lambda _: ui.Line(
height=0, alignment=ui.Alignment.V_CENTER, style_type_name_override="Menu.Separator"
)
)
)
else:
menu_item = ui.MenuItem(
item.name,
delegate = ViewportMenuDelegate(build_custom_widgets=lambda d, m, i=item: self.__build_resolution_menuitem_widgets(i))
)
if item.custom:
self.__custom_menu_items[item.name] = menu_item
return menu_item
def __build_resolution_menuitem_widgets(self, item: ResolutionComboBoxItem):
if item.is_valid_resolution():
ui.Spacer()
ui.Spacer(width=20)
ui.Label(item.model.as_string, width=80, style_type_name_override="Resolution.Text")
with ui.HStack(width=60):
ratio = float(item.resolution[0]) / item.resolution[1]
width = self.ITEM_HEIGHT * ratio
with ui.ZStack(width=width):
ui.Rectangle(style_type_name_override="Ratio.Background")
ui.Label(self.get_ratio_text(ratio), alignment=ui.Alignment.CENTER, style_type_name_override="Ratio.Text")
ui.Spacer()
if item.custom:
with ui.VStack(content_clipping=1, width=0):
ui.Image(width=20, style_type_name_override="Resolution.Del", mouse_pressed_fn=lambda x, y, b, f, i=item: self.__delete_resolution(i))
else:
ui.Spacer(width=20)
def get_ratio_text(self, ratio: float):
found = [key for (key, value) in DEFAULT_RATIOS.items() if math.isclose(value, ratio, rel_tol=1e-2)]
if found:
return found[0]
else:
return f"{ratio: .2f}:1"
def __delete_resolution(self, item: ResolutionComboBoxItem):
settings = carb.settings.get_settings()
custom_list = settings.get(SETTING_CUSTOM_RESOLUTION_LIST) or []
for custom in custom_list:
name = custom["name"]
if name == item.name:
custom_list.remove(custom)
settings.set(SETTING_CUSTOM_RESOLUTION_LIST, custom_list)
if item.name in self.__custom_menu_items:
self.__custom_menu_items[item.name].visible = False
| 4,698 | Python | 38.487395 | 154 | 0.603874 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/custom_resolution/save_window.py | import omni.ui as ui
from typing import Tuple, Callable
from ...style import SAVE_WINDOW_STYLE
class SaveWindow(ui.Window):
"""
Window to save custom resolution.
"""
PADDING = 8
def __init__(self, resolution: Tuple[int, int], on_save_fn: Callable[[str, Tuple[int, int]], bool]):
self.name_model = ui.SimpleStringModel()
self.__resolution = resolution
self.__on_save_fn = on_save_fn
flags = ui.WINDOW_FLAGS_NO_TITLE_BAR | ui.WINDOW_FLAGS_NO_RESIZE | ui.WINDOW_FLAGS_NO_MOVE | ui.WINDOW_FLAGS_MODAL
super().__init__(f"###Resoluiton Save", width=400, height=180, flags=flags, auto_resize=False, padding_x=0, padding_y=0)
self.frame.set_style(SAVE_WINDOW_STYLE)
self.frame.set_build_fn(self.__build_ui)
def __del__(self):
self.__sub_begin_edit = None
self.destroy()
def __build_ui(self):
with self.frame:
with ui.VStack(height=0):
self._build_titlebar()
ui.Spacer(height=30)
self._build_input()
ui.Spacer(height=30)
self._build_buttons()
ui.Spacer(height=15)
def _build_titlebar(self):
with ui.ZStack(height=0):
ui.Rectangle(style_tyle_name_override="Titlebar.Background")
with ui.VStack():
ui.Spacer(height=self.PADDING)
with ui.HStack():
ui.Spacer(width=self.PADDING)
ui.Label("Save Custom Viewport Resolution", width=0, style_type_name_override="Titlebar.Title")
ui.Spacer()
ui.Image(width=20, height=20, mouse_released_fn=lambda x, y, b, f: self.__on_cancel(), name="close")
ui.Spacer(width=self.PADDING)
ui.Spacer(height=self.PADDING)
def _build_input(self):
with ui.HStack():
ui.Spacer()
with ui.ZStack(width=160):
name_input = ui.StringField(self.name_model)
hint_label = ui.Label("Type Name", style_type_name_override="Input.Hint")
ui.Spacer(width=20)
ui.Label(f"{self.__resolution[0]} x {self.__resolution[1]}")
ui.Spacer()
name_input.focus_keyboard()
def __hide_hint():
hint_label.visible = False
self.__sub_begin_edit = self.name_model.subscribe_begin_edit_fn(lambda m: __hide_hint())
def _build_buttons(self):
with ui.HStack():
ui.Spacer()
ui.Button("Save", width=80, clicked_fn=self.__on_save)
ui.Spacer(width=20)
ui.Button("Cancel", width=80, clicked_fn=self.__on_cancel)
ui.Spacer()
def __on_save(self) -> None:
if self.__on_save_fn(self.name_model.as_string, self.__resolution):
self.visible = False
def __on_cancel(self) -> None:
self.visible = False
| 2,934 | Python | 35.6875 | 128 | 0.557941 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/custom_resolution/custom_resolution_menu_item.py | import omni.ui as ui
from typing import Tuple
from .custom_resolution_delegate import CustomResolutionDelegate
class CustomResolutionMenuItem(ui.MenuItem):
"""
Menu item to edit/save custom resolution.
"""
def __init__(self, res_model, res_setter):
self.__delegate = CustomResolutionDelegate(res_model, res_setter)
ui.MenuItem(
"Custom Resolution",
delegate=self.__delegate,
hide_on_click=False,
)
super().__init__("Custom Resolution")
def destroy(self):
self.__delegate.destroy()
@property
def resolution(self) -> Tuple[int, int]:
return self.__delegate.resolution
@resolution.setter
def resolution(self, res: Tuple[int, int]) -> None:
self.__delegate.resolution = res
| 803 | Python | 26.724137 | 73 | 0.633873 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/menu_item/custom_resolution/custom_resolution_delegate.py | from omni.kit.viewport.menubar.core import AbstractWidgetMenuDelegate
import omni.ui as ui
import omni.kit.app
from .save_window import SaveWindow
import carb.settings
from typing import List, Optional, Callable, Tuple
import weakref
import math
import asyncio
SETTING_CUSTOM_RESOLUTION_LIST = "/persistent/app/renderer/resolution/custom/list"
SETTING_MIN_RESOLUTION = "/exts/omni.kit.viewport.menubar.settings/min_resolution"
class RatioItem(ui.AbstractItem):
def __init__(self, text: str, value: float) -> None:
super().__init__()
self.model = ui.SimpleStringModel(text)
self.value = value
class RatioModel(ui.AbstractItemModel):
"""
The model used for ratio combobox
"""
def __init__(self):
super().__init__()
# List items
self.__default_items = [
RatioItem("16:9", 16.0/9),
RatioItem("4:3", 4.0/3),
RatioItem("1:1", 1.0)
]
self.__custom_item: Optional[RatioItem] = None
# Current value
self.current_index = ui.SimpleIntModel(-1)
self._sub = self.current_index.subscribe_value_changed_fn(
lambda _, this=weakref.proxy(self): this.__on_index_changed()
)
def destroy(self):
self._sub = None
self.current_index = None
@property
def ratio(self) -> float:
items = self.get_item_children(None)
return items[self.current_index.as_int].value
@ratio.setter
def ratio(self, value: float) -> None:
found = [index for (index, item) in enumerate(self.__default_items) if math.isclose(item.value, value, rel_tol=1e-2)]
if found:
self.__custom_item = None
self.current_index.set_value(found[0])
self._item_changed(None)
else:
ratio_text = f"{value: .2f}:1"
self.__custom_item = RatioItem(ratio_text, value)
self.current_index.set_value(0)
self._item_changed(None)
def subscribe_ratio_changed_fn(self, on_ratio_changed_fn: Callable[[float], None]):
def __on_sub_index_changed(this, callback):
current_index = this.current_index.as_int
items = this.get_item_children(None)
callback(items[current_index].value)
return self.current_index.subscribe_value_changed_fn(
lambda _, this=weakref.proxy(self), callback=on_ratio_changed_fn: __on_sub_index_changed(this, callback)
)
def get_item_children(self, item) -> List[RatioItem]:
items = []
if self.__custom_item:
items.append(self.__custom_item)
items.extend(self.__default_items)
return items
def get_item_value_model(self, item, column_id):
if item is None:
return self.current_index
return item.model
def __on_index_changed(self):
self._item_changed(None)
class CustomResolutionDelegate(AbstractWidgetMenuDelegate):
"""
Delegate to edit/save custom resoltion.
"""
def __init__(self, resolution_model, resolution_setter):
super().__init__(width=310, has_reset=False)
self.__resolution_model = resolution_model
self.__resolution_setter = resolution_setter
self.__link_button: Optional[ui.Button] = None
self.__save_button: Optional[ui.Button] = None
self.__save_window: Optional[SaveWindow] = None
self.__settings = carb.settings.get_settings()
(self.__resolution_min_width, self.__resolution_min_height) = self.__settings.get(SETTING_MIN_RESOLUTION) or [64, 64]
self.width_model = ui.SimpleIntModel(1920)
self.__sub_width_begin_edit = self.width_model.subscribe_begin_edit_fn(lambda _: self.__on_begin_edit())
self.__sub_width_end_edit = self.width_model.subscribe_end_edit_fn(lambda _: self.__on_width_end_edit())
self.height_model = ui.SimpleIntModel(1080)
self.__sub_height_begin_edit = self.height_model.subscribe_begin_edit_fn(lambda _: self.__on_begin_edit())
self.__sub_height_end_edit = self.height_model.subscribe_end_edit_fn(lambda _: self.__on_height_end_edit())
self.ratio_model = RatioModel()
self.__sub_ratio_change = None
self.__subscribe_ratio_change()
def destroy(self):
self.__sub_ratio_change = None
self.__sub_width_begin_edit = None
self.__sub_width_end_edit = None
self.__sub_height_begin_edit = None
self.__sub_height_end_edit = None
@property
def resolution(self) -> Tuple[int, int]:
return (self.width_model.as_int, self.height_model.as_int)
@resolution.setter
def resolution(self, res: Tuple[int, int]) -> None:
if res[0] == -1 and res[1] == -1:
# "Custom" selected
self.__update_save_image_state()
elif res[0] > 0 and res[1] > 0:
if self.width_model.as_int == res[0] and self.height_model.as_int == res[1]:
return
was_r_subscibed = self.__subscribe_ratio_change(enable=False)
self.ratio_model.ratio = res[0] / res[1]
self.width_model.set_value(res[0])
self.height_model.set_value(res[1])
self.__subscribe_ratio_change(enable=was_r_subscibed)
self.__update_save_image_state()
def build_widget(self, item: ui.MenuHelper):
with ui.VStack(spacing=0):
ui.Spacer(height=0, spacing=4)
with ui.HStack():
ui.Spacer(width=8)
ui.IntField(self.width_model, width=60, height=20)
ui.Spacer(width=10)
self.__link_button = ui.Button(
width=35,
image_height=20,
image_width=24,
checked=True,
clicked_fn=self.__on_link_clicked,
style_type_name_override="ResolutionLink",
)
ui.Spacer(width=10)
ui.IntField(self.height_model, width=60, height=20)
ui.Spacer(width=10)
ui.ComboBox(self.ratio_model, name="ratio")
ui.Spacer(width=10)
with ui.VStack(width=0, content_clipping=True):
self.__save_button = ui.Button(
style_type_name_override="Menu.Item.Button",
name="save",
width=20,
height=20,
image_width=20,
image_height=20,
clicked_fn=self.__save
)
ui.Spacer(width=4)
with ui.HStack():
ui.Spacer(width=8)
ui.Label("Width", alignment=ui.Alignment.LEFT, width=60)
ui.Spacer(width=54)
ui.Label("Height", alignment=ui.Alignment.LEFT, width=60)
ui.Spacer()
def __on_width_changed(self, model):
width = model.as_int
if width < self.__resolution_min_width:
self.__post_resolution_warning()
model.set_value(self.__resolution_min_width)
width = model.as_int
if self.__link_button:
if self.__link_button.checked:
height = int(width/self.ratio_model.ratio)
if height < self.__resolution_min_height:
# Height is too small, change width to match the min height
self.__post_resolution_warning()
height = self.__resolution_min_height
width = int(height * self.ratio_model.ratio)
model.set_value(width)
if height != self.height_model.as_int:
self.height_model.set_value(height)
else:
self.ratio_model.ratio = float(width) / self.height_model.as_int
self.__set_render_resolution(self.resolution)
self.__update_save_image_state()
def __on_height_changed(self, model):
height = model.as_int
if height < self.__resolution_min_height:
self.__post_resolution_warning()
model.set_value(self.__resolution_min_height)
height = model.as_int
if self.__link_button:
if self.__link_button.checked:
width = int(height * self.ratio_model.ratio)
if width < self.__resolution_min_width:
# Width is too small, change height to match min width
self.__post_resolution_warning()
width = self.__resolution_min_width
height = int(width / self.ratio_model.ratio)
model.set_value(height)
if width != self.width_model.as_int:
self.width_model.set_value(width)
else:
self.ratio_model.ratio = float(self.width_model.as_int) / height
self.__set_render_resolution(self.resolution)
self.__update_save_image_state()
def __on_ratio_changed(self, ratio: float):
height = int(self.width_model.as_int/self.ratio_model.ratio)
if height != self.height_model.as_int:
self.height_model.set_value(height)
self.__set_render_resolution(self.resolution)
self.__update_save_image_state()
def __on_link_clicked(self):
self.__link_button.checked = not self.__link_button.checked
def __subscribe_ratio_change(self, enable: bool = True) -> bool:
was_subscribed = self.__sub_ratio_change is not None
if enable:
if not was_subscribed:
self.__sub_ratio_change = self.ratio_model.subscribe_ratio_changed_fn(self.__on_ratio_changed)
elif was_subscribed:
self.__sub_ratio_change = None
return was_subscribed
def __save(self):
if self.__save_button.checked:
if self.__save_window:
self.__save_window = None
self.__save_window = SaveWindow(self.resolution, self.__on_save_resolution)
def __update_save_image_state(self):
if not self.__save_button:
return
for item in self.__resolution_model.get_item_children(None):
if self.resolution == item.value:
self.__save_button.checked = False
break
else:
self.__save_button.checked = True
def __on_save_resolution(self, new_name: str, resolution: Tuple[int, int]) -> bool:
custom_list = self.__settings.get(SETTING_CUSTOM_RESOLUTION_LIST) or []
for custom in custom_list:
name = custom["name"]
if name == new_name:
carb.log_warn("f{new_name} already exists!")
return False
custom_list.append(
{
"name": new_name,
"width": resolution[0],
"height": resolution[1]
}
)
self.__settings.set(SETTING_CUSTOM_RESOLUTION_LIST, custom_list)
self.__save_button.checked = False
return True
def __set_render_resolution(self, resolution: Tuple[int, int]):
async def __delay_async(res: Tuple[int, int]):
# Delay a frame to make sure current changes from UI are saved
await omni.kit.app.get_app().next_update_async()
self.__resolution_setter.set_resolution(res)
asyncio.ensure_future(__delay_async(resolution))
def __on_begin_edit(self):
self.__saved_width = self.width_model.as_int
self.__saved_height = self.height_model.as_int
def __on_width_end_edit(self):
if self.width_model.as_int <= 0:
self.width_model.set_value(self.__saved_width)
self.__on_width_changed(self.width_model)
def __on_height_end_edit(self):
if self.height_model.as_int <= 0:
self.height_model.set_value(self.__saved_height)
self.__on_height_changed(self.height_model)
def __post_resolution_warning(self):
try:
import omni.kit.notification_manager as nm
nm.post_notification(f"Resolution cannot be lower than {self.__resolution_min_width}x{self.__resolution_min_height}", status=nm.NotificationStatus.WARNING)
except ImportError:
carb.log_warn(f"Resolution cannot be lower than {self.__resolution_min_width}x{self.__resolution_min_height}") | 12,445 | Python | 38.891026 | 167 | 0.57268 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/tests/test_custom_resolution.py | import carb.settings
import omni.kit.test
from ..menu_item.resolution_collection.model import ComboBoxResolutionModel, ResolutionComboBoxItem
SETTING_CUSTOM_RESOLUTION_LIST = "/persistent/app/renderer/resolution/custom/list"
class TestCustomResolution(omni.kit.test.AsyncTestCase):
async def setUp(self):
self.__settings = carb.settings.get_settings()
# Useles fake data that needs to go to ComboBoxResolutionModel
resolution_setter = None
resolution_settings = (("setting", (0,0)), ("setting", (0,0)))
self.__model = ComboBoxResolutionModel(None, resolution_settings, self.__settings)
super().setUp()
async def tearDown(self):
self.__settings.set(SETTING_CUSTOM_RESOLUTION_LIST, [])
super().tearDown()
async def test_custom_resolutions(self):
items = self.__model.get_item_children(None)
num_items = len(items)
self.__settings.set(SETTING_CUSTOM_RESOLUTION_LIST, [{"name": "test", "width": 100, "height": 200}])
for _ in range(2):
await omni.kit.app.get_app().next_update_async()
items = self.__model.get_item_children(None)
self.assertEqual(num_items + 2, len(items))
new_item: ResolutionComboBoxItem = items[-1]
self.assertEqual(new_item.name, "test")
self.assertEqual(new_item.resolution, (100, 200))
self.assertTrue(new_item.custom)
self.__settings.set(SETTING_CUSTOM_RESOLUTION_LIST, [])
for _ in range(2):
await omni.kit.app.get_app().next_update_async()
items = self.__model.get_item_children(None)
self.assertEqual(num_items, len(items))
| 1,666 | Python | 40.674999 | 108 | 0.657263 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/tests/__init__.py | from .test_ui import *
from .test_custom_resolution import *
| 61 | Python | 19.66666 | 37 | 0.754098 |
omniverse-code/kit/exts/omni.kit.viewport.menubar.settings/omni/kit/viewport/menubar/settings/tests/test_ui.py | import omni.kit.test
from re import I
from omni.ui.tests.test_base import OmniUiTest
import omni.kit.ui_test as ui_test
from omni.kit.ui_test import Vec2
import omni.usd
import omni.kit.app
from omni.kit.test.teamcity import is_running_in_teamcity
from pathlib import Path
import carb.input
import asyncio
import unittest
import sys
CURRENT_PATH = Path(__file__).parent
TEST_DATA_PATH = CURRENT_PATH.parent.parent.parent.parent.parent.parent.joinpath("data").joinpath("tests")
TEST_WIDTH, TEST_HEIGHT = 600, 600
class TestSettingMenuWindow(OmniUiTest):
async def setUp(self):
self._golden_img_dir = TEST_DATA_PATH.absolute().joinpath("golden_img").absolute()
await self.create_test_area(width=TEST_WIDTH, height=TEST_HEIGHT)
await omni.kit.app.get_app().next_update_async()
async def test_navigation(self):
await self.__show_subitem("menubar_setting_navigation.png", 86)
async def test_selection(self):
await self.__show_subitem("menubar_setting_selection.png", 106)
async def test_grid(self):
await self.__show_subitem("menubar_setting_grid.png", 126)
async def test_gizmo(self):
await self.__show_subitem("menubar_setting_gizmo.png", 146)
@unittest.skipIf(
(sys.platform == "linux" and is_running_in_teamcity()),
"OM-64377: Delegate for RadioMenuCollection does not work in Linux",
)
async def test_viewport(self):
await self.__show_subitem("menubar_setting_viewport.png", 166)
async def test_viewport_ui(self):
await self.__show_subitem("menubar_setting_viewport_ui.png", 186)
async def test_viewport_manipulate(self):
await self.__show_subitem("menubar_setting_viewport_manipulator.png", 206)
async def test_reset_item(self):
settings = carb.settings.get_settings()
cam_vel = settings.get("/persistent/app/viewport/camMoveVelocity")
in_enabled = settings.get("/persistent/app/viewport/camInertiaEnabled")
settings.set("/persistent/app/viewport/camMoveVelocity", cam_vel * 2)
settings.set("/persistent/app/viewport/camInertiaEnabled", not in_enabled)
try:
await self.__do_ui_test(ui_test.emulate_mouse_click, 225)
self.assertEqual(settings.get("/persistent/app/viewport/camMoveVelocity"), cam_vel)
self.assertEqual(settings.get("/persistent/app/viewport/camInertiaEnabled"), in_enabled)
finally:
settings.set("/persistent/app/viewport/camMoveVelocity", cam_vel)
settings.set("/persistent/app/viewport/camInertiaEnabled", in_enabled)
async def __show_subitem(self, golden_img_name: str, y: int) -> None:
async def gloden_compare():
await self.finalize_test(golden_img_dir=self._golden_img_dir, golden_img_name=golden_img_name)
await self.__do_ui_test(gloden_compare, y)
async def __do_ui_test(self, test_operation, y: int, frame_wait: int = 3) -> None:
# Enable mouse input
app = omni.kit.app.get_app()
app_window = omni.appwindow.get_default_app_window()
for device in [carb.input.DeviceType.MOUSE]:
app_window.set_input_blocking_state(device, None)
try:
await ui_test.emulate_mouse_move(Vec2(20, 46), human_delay_speed=4)
await ui_test.emulate_mouse_click()
await ui_test.emulate_mouse_move(Vec2(20, y))
for _ in range(frame_wait):
await app.next_update_async()
await test_operation()
finally:
for _ in range(frame_wait):
await app.next_update_async()
await ui_test.emulate_mouse_move(Vec2(300, 26))
await ui_test.emulate_mouse_click()
for _ in range(frame_wait):
await app.next_update_async()
| 3,830 | Python | 36.930693 | 106 | 0.661358 |
omniverse-code/kit/exts/omni.kit.window.status_bar/omni/kit/window/status_bar/tests/test_status_bar.py | ## Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
from omni.ui.tests.test_base import OmniUiTest
import carb
import asyncio
class TestStatusBar(OmniUiTest):
# Before running each test
async def setUp(self):
self.name_progress = carb.events.type_from_string("omni.kit.window.status_bar@progress")
self.name_activity = carb.events.type_from_string("omni.kit.window.status_bar@activity")
self.message_bus = omni.kit.app.get_app().get_message_bus_event_stream()
self.message_bus.push(self.name_activity, payload={"text": ""})
self.message_bus.push(self.name_progress, payload={"progress": "-1"})
# After running each test
async def tearDown(self):
pass
async def test_general(self):
await self.create_test_area(256, 64)
async def log():
# Delayed log because self.finalize_test logs things
carb.log_warn("StatusBar test")
asyncio.ensure_future(log())
await self.finalize_test()
async def test_activity(self):
await self.create_test_area(512, 64)
async def log():
# Delayed log because self.finalize_test logs things
carb.log_warn("StatusBar test")
# Test activity name with spaces URL-encoded
self.message_bus.push(self.name_activity, payload={"text": "MFC%20For%20NvidiaAnimated.usd"})
self.message_bus.push(self.name_progress, payload={"progress": "0.2"})
asyncio.ensure_future(log())
await self.finalize_test()
| 1,941 | Python | 36.346153 | 101 | 0.686244 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/extension.py | __all__ = ["PrimitiveMeshExtension"]
import omni
from .mesh_actions import register_actions, deregister_actions
from pxr import Usd
class PrimitiveMeshExtension(omni.ext.IExt):
def __init__(self):
super().__init__()
def on_startup(self, ext_id):
self._ext_name = omni.ext.get_extension_name(ext_id)
self._mesh_generator = None
try:
from .generator import MeshGenerator
self._mesh_generator = MeshGenerator()
self._mesh_generator.register_menu()
except ImportError:
pass
register_actions(self._ext_name, PrimitiveMeshExtension, lambda: self._mesh_generator)
def on_shutdown(self):
deregister_actions(self._ext_name)
if self._mesh_generator:
self._mesh_generator.destroy()
| 814 | Python | 27.103447 | 94 | 0.635135 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/__init__.py | from .evaluators import get_geometry_mesh_prim_list, AbstractShapeEvaluator
from .command import CreateMeshPrimCommand, CreateMeshPrimWithDefaultXformCommand
from .extension import PrimitiveMeshExtension
| 204 | Python | 50.249987 | 81 | 0.887255 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/mesh_actions.py | import omni.usd
import omni.kit.commands
import omni.kit.actions.core
from .evaluators import get_geometry_mesh_prim_list
def register_actions(extension_id, cls, get_self_fn):
def create_mesh_prim(prim_type):
usd_context = omni.usd.get_context()
with omni.kit.usd.layers.active_authoring_layer_context(usd_context):
omni.kit.commands.execute("CreateMeshPrimWithDefaultXform", prim_type=prim_type, above_ground=True)
# actions
for prim in get_geometry_mesh_prim_list():
omni.kit.actions.core.get_action_registry().register_action(
extension_id,
f"create_mesh_prim_{prim.lower()}",
lambda p=prim: create_mesh_prim(p),
display_name=f"Create Mesh Prim {prim}",
description=f"Create Mesh Prim {prim}",
tag="Create Mesh Prim",
)
if get_self_fn() is not None:
omni.kit.actions.core.get_action_registry().register_action(
extension_id,
"show_setting_window",
get_self_fn().show_setting_window,
display_name="Show Settings Window",
description="Show Settings Window",
tag="Show Settings Window",
)
def deregister_actions(extension_id):
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.deregister_all_actions_for_extension(extension_id)
| 1,391 | Python | 34.692307 | 111 | 0.646298 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/generator.py | import carb.settings
import omni
import omni.kit
from omni import ui
from .evaluators import _get_all_evaluators, get_geometry_mesh_prim_list
from omni.kit.menu.utils import MenuItemDescription, remove_menu_items, add_menu_items
class MeshGenerator:
def __init__(self):
self._settings = carb.settings.get_settings()
self._window = None
self._mesh_setting_ui = {}
self._current_setting_index = 0
self._mesh_menu_list = []
def destroy(self):
self._window = None
remove_menu_items(self._mesh_menu_list, "Create")
def register_menu(self):
sub_menu = []
for prim in get_geometry_mesh_prim_list():
sub_menu.append(MenuItemDescription(name=prim, onclick_action=("omni.kit.primitive.mesh", f"create_mesh_prim_{prim.lower()}")))
sub_menu.append(MenuItemDescription())
sub_menu.append(MenuItemDescription(name="Settings", onclick_action=("omni.kit.primitive.mesh", "show_setting_window")))
self._mesh_menu_list = [
MenuItemDescription(name="Mesh", glyph="menu_prim.svg", sub_menu=sub_menu)
]
add_menu_items(self._mesh_menu_list, "Create")
def on_primitive_type_selected(self, model, item):
names = get_geometry_mesh_prim_list()
old_mesh_name = names[self._current_setting_index]
if old_mesh_name in self._mesh_setting_ui:
self._mesh_setting_ui[old_mesh_name].visible = False
idx = model.get_item_value_model().as_int
mesh_name = names[idx]
if mesh_name in self._mesh_setting_ui:
self._mesh_setting_ui[old_mesh_name].visible = False
self._mesh_setting_ui[mesh_name].visible = True
self._current_setting_index = idx
def show_setting_window(self):
flags = ui.WINDOW_FLAGS_NO_COLLAPSE | ui.WINDOW_FLAGS_NO_SCROLLBAR
if not self._window:
self._window = ui.Window(
"Mesh Generation Settings",
ui.DockPreference.DISABLED,
width=400,
height=260,
flags=flags,
padding_x=0,
padding_y=0,
)
with self._window.frame:
with ui.VStack(height=0):
ui.Spacer(width=0, height=20)
with ui.HStack(height=0):
ui.Spacer(width=20, height=0)
ui.Label("Primitive Type", name="text", height=0)
model = ui.ComboBox(0, *get_geometry_mesh_prim_list(), name="primitive_type").model
model.add_item_changed_fn(self.on_primitive_type_selected)
ui.Spacer(width=20, height=0)
ui.Spacer(width=0, height=10)
ui.Separator(height=0, name="text")
ui.Spacer(width=0, height=10)
with ui.ZStack(height=0):
mesh_names = get_geometry_mesh_prim_list()
for i in range(len(mesh_names)):
mesh_name = mesh_names[i]
stack = ui.VStack(spacing=0)
self._mesh_setting_ui[mesh_name] = stack
with stack:
ui.Spacer(height=20)
evaluator_class = _get_all_evaluators()[mesh_name]
evaluator_class.build_setting_ui()
ui.Spacer(height=5)
if i != 0:
stack.visible = False
ui.Spacer(width=0, height=20)
with ui.HStack(height=0):
ui.Spacer()
ui.Button(
"Create",
alignment=ui.Alignment.H_CENTER,
name="create",
width=120,
height=0,
mouse_pressed_fn=lambda *args: self._create_shape(),
)
ui.Button(
"Reset Settings",
alignment=ui.Alignment.H_CENTER,
name="reset",
width=120,
height=0,
mouse_pressed_fn=lambda *args: self._reset_settings(),
)
ui.Spacer()
self._current_setting_index = 0
self._window.visible = True
def _create_shape(self):
names = get_geometry_mesh_prim_list()
mesh_type = names[self._current_setting_index]
usd_context = omni.usd.get_context()
with omni.kit.usd.layers.active_authoring_layer_context(usd_context):
omni.kit.commands.execute("CreateMeshPrimWithDefaultXform", prim_type=mesh_type, above_ground=True)
def _reset_settings(self):
names = get_geometry_mesh_prim_list()
mesh_type = names[self._current_setting_index]
evaluator_class = _get_all_evaluators()[mesh_type]
evaluator_class.reset_setting()
| 5,205 | Python | 39.992126 | 139 | 0.510086 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/command.py | __all__ = ["CreateMeshPrimWithDefaultXformCommand", "CreateMeshPrimCommand"]
import omni
import carb.settings
from pxr import UsdGeom, Usd, Vt, Kind, Sdf, Gf
from .evaluators import _get_all_evaluators
PERSISTENT_SETTINGS_PREFIX = "/persistent"
class CreateMeshPrimWithDefaultXformCommand(omni.kit.commands.Command):
def __init__(self, prim_type: str, **kwargs):
"""
Creates primitive.
Args:
prim_type (str): It supports Plane/Sphere/Cone/Cylinder/Disk/Torus/Cube.
kwargs:
object_origin (Gf.Vec3f): Position of mesh center in stage units.
u_patches (int): The number of patches to tessellate U direction.
v_patches (int): The number of patches to tessellate V direction.
w_patches (int): The number of patches to tessellate W direction.
It only works for Cone/Cylinder/Cube.
half_scale (float): Half size of mesh in centimeters. Default is None, which means it's controlled by settings.
u_verts_scale (int): Tessellation Level of U. It's a multiplier of u_patches.
v_verts_scale (int): Tessellation Level of V. It's a multiplier of v_patches.
w_verts_scale (int): Tessellation Level of W. It's a multiplier of w_patches.
It only works for Cone/Cylinder/Cube.
For Cone/Cylinder, it's to tessellate the caps.
For Cube, it's to tessellate along z-axis.
above_ground (bool): It will offset the center of mesh above the ground plane if it's True,
False otherwise. It's False by default. This param only works when param object_origin is not given.
Otherwise, it will be ignored.
"""
self._prim_type = prim_type[0:1].upper() + prim_type[1:].lower()
self._usd_context = omni.usd.get_context(kwargs.get("context_name", ""))
self._selection = self._usd_context.get_selection()
self._stage = self._usd_context.get_stage()
self._settings = carb.settings.get_settings()
self._default_path = kwargs.get("prim_path", None)
self._select_new_prim = kwargs.get("select_new_prim", True)
self._prepend_default_prim = kwargs.get("prepend_default_prim", True)
self._above_round = kwargs.get("above_ground", False)
self._attributes = {**kwargs}
# Supported mesh types should have an associated evaluator class
self._evaluator_class = _get_all_evaluators()[prim_type]
assert isinstance(self._evaluator_class, type)
def do(self):
self._prim_path = None
if self._default_path:
path = omni.usd.get_stage_next_free_path(self._stage, self._default_path, self._prepend_default_prim)
else:
path = omni.usd.get_stage_next_free_path(self._stage, "/" + self._prim_type, self._prepend_default_prim)
mesh = UsdGeom.Mesh.Define(self._stage, path)
prim = mesh.GetPrim()
defaultXformOpType = self._settings.get(PERSISTENT_SETTINGS_PREFIX + "/app/primCreation/DefaultXformOpType")
defaultRotationOrder = self._settings.get(
PERSISTENT_SETTINGS_PREFIX + "/app/primCreation/DefaultRotationOrder"
)
defaultXformOpOrder = self._settings.get(
PERSISTENT_SETTINGS_PREFIX + "/app/primCreation/DefaultXformOpOrder"
)
defaultXformPrecision = self._settings.get(
PERSISTENT_SETTINGS_PREFIX + "/app/primCreation/DefaultXformOpPrecision"
)
vec3_type = Sdf.ValueTypeNames.Double3 if defaultXformPrecision == "Double" else Sdf.ValueTypeNames.Float3
quat_type = Sdf.ValueTypeNames.Quatd if defaultXformPrecision == "Double" else Sdf.ValueTypeNames.Quatf
up_axis = UsdGeom.GetStageUpAxis(self._stage)
self._attributes["up_axis"] = up_axis
half_scale = self._attributes.get("half_scale", None)
if half_scale is None or half_scale <= 0.0:
half_scale = self._evaluator_class.get_default_half_scale()
object_origin = self._attributes.get("object_origin", None)
if object_origin is None and self._above_round:
# To move the mesh above the ground.
if self._prim_type != "Disk" and self._prim_type != "Plane":
if self._prim_type != "Torus":
offset = half_scale
else:
# The tube of torus is half of the half_scale.
offset = half_scale / 2.0
# Scale it to make sure it matches stage units.
units = UsdGeom.GetStageMetersPerUnit(mesh.GetPrim().GetStage())
if Gf.IsClose(units, 0.0, 1e-6):
units = 0.01
scale = 0.01 / units
offset = offset * scale
if up_axis == "Y":
object_origin = Gf.Vec3f(0.0, offset, 0.0)
else:
object_origin = Gf.Vec3f(0.0, 0.0, offset)
else:
object_origin = Gf.Vec3f(0.0)
elif isinstance(object_origin, list):
object_origin = Gf.Vec3f(*object_origin)
else:
object_origin = Gf.Vec3f(0.0)
default_translate = Gf.Vec3d(object_origin) if defaultXformPrecision == "Double" else object_origin
default_euler = Gf.Vec3d(0.0, 0.0, 0.0) if defaultXformPrecision == "Double" else Gf.Vec3f(0.0, 0.0, 0.0)
default_scale = Gf.Vec3d(1.0, 1.0, 1.0) if defaultXformPrecision == "Double" else Gf.Vec3f(1.0, 1.0, 1.0)
default_orient = (
Gf.Quatd(1.0, Gf.Vec3d(0.0, 0.0, 0.0))
if defaultXformPrecision == "Double"
else Gf.Quatf(1.0, Gf.Vec3f(0.0, 0.0, 0.0))
)
mat4_type = Sdf.ValueTypeNames.Matrix4d # there is no Matrix4f in SdfValueTypeNames
if defaultXformOpType == "Scale, Rotate, Translate":
attr_translate = prim.CreateAttribute("xformOp:translate", vec3_type, False)
attr_translate.Set(default_translate)
attr_rotate_name = "xformOp:rotate" + defaultRotationOrder
attr_rotate = prim.CreateAttribute(attr_rotate_name, vec3_type, False)
attr_rotate.Set(default_euler)
attr_scale = prim.CreateAttribute("xformOp:scale", vec3_type, False)
attr_scale.Set(default_scale)
attr_order = prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.TokenArray, False)
attr_order.Set(["xformOp:translate", attr_rotate_name, "xformOp:scale"])
if defaultXformOpType == "Scale, Orient, Translate":
attr_translate = prim.CreateAttribute("xformOp:translate", vec3_type, False)
attr_translate.Set(default_translate)
attr_rotate = prim.CreateAttribute("xformOp:orient", quat_type, False)
attr_rotate.Set(default_orient)
attr_scale = prim.CreateAttribute("xformOp:scale", vec3_type, False)
attr_scale.Set(default_scale)
attr_order = prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.TokenArray, False)
attr_order.Set(["xformOp:translate", "xformOp:orient", "xformOp:scale"])
if defaultXformOpType == "Transform":
attr_matrix = prim.CreateAttribute("xformOp:transform", mat4_type, False)
attr_matrix.Set(Gf.Matrix4d(1.0))
attr_order = prim.CreateAttribute("xformOpOrder", Sdf.ValueTypeNames.TokenArray, False)
attr_order.Set(["xformOp:transform"])
self._prim_path = path
if self._select_new_prim:
self._selection.set_prim_path_selected(path, True, False, True, True)
self._define_mesh(mesh)
return self._prim_path
def undo(self):
if self._prim_path:
self._stage.RemovePrim(self._prim_path)
def _define_mesh(self, mesh):
evaluator = self._evaluator_class(self._attributes)
points = []
normals = []
sts = []
point_indices = []
face_vertex_counts = []
points, normals, sts, point_indices, face_vertex_counts = evaluator.eval(**self._attributes)
units = UsdGeom.GetStageMetersPerUnit(mesh.GetPrim().GetStage())
if Gf.IsClose(units, 0.0, 1e-6):
units = 0.01
# Scale points to make sure it's already in centimeters
scale = 0.01 / units
points = [point * scale for point in points]
mesh.GetPointsAttr().Set(Vt.Vec3fArray(points))
mesh.GetNormalsAttr().Set(Vt.Vec3fArray(normals))
mesh.GetFaceVertexIndicesAttr().Set(point_indices)
mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts)
mesh.SetNormalsInterpolation("faceVarying")
prim = mesh.GetPrim()
# https://github.com/PixarAnimationStudios/USD/commit/592b4d39edf5daf0534d467e970c95462a65d44b
# UsdGeom.Imageable.CreatePrimvar deprecated in v19.03 and removed in v22.08
sts_primvar = UsdGeom.PrimvarsAPI(prim).CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray)
sts_primvar.SetInterpolation("faceVarying")
sts_primvar.Set(Vt.Vec2fArray(sts))
mesh.CreateSubdivisionSchemeAttr("none")
attr = prim.GetAttribute(UsdGeom.Tokens.extent)
if attr:
bounds = UsdGeom.Boundable.ComputeExtentFromPlugins(UsdGeom.Boundable(prim), Usd.TimeCode.Default())
if bounds:
attr.Set(bounds)
# set the new prim as the active selection
if self._select_new_prim:
self._selection.set_selected_prim_paths([prim.GetPath().pathString], False)
# For back compatibility.
class CreateMeshPrimCommand(CreateMeshPrimWithDefaultXformCommand):
def __init__(self, prim_type: str, **kwargs):
super().__init__(prim_type, **kwargs)
omni.kit.commands.register(CreateMeshPrimCommand)
omni.kit.commands.register(CreateMeshPrimWithDefaultXformCommand)
| 9,980 | Python | 44.995391 | 123 | 0.62505 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/cone.py | import math
from .utils import (
get_int_setting, build_int_slider, modify_winding_order,
transform_point, inverse_u, inverse_v, generate_disk
)
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
from typing import List, Tuple
class ConeEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/cone/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/cone/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/cone/v_scale"
SETTING_W_SCALE = "/persistent/app/mesh_generator/shapes/cone/w_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
self.radius = 1.0
self.height = 2.0
# The sequence must be kept in the same as generate_circle_points
# in the u direction to share points with the cap.
def _eval(self, up_axis, u, v) -> Tuple[Gf.Vec3f, Gf.Vec3f]:
theta = u * 2.0 * math.pi
x = (1 - v) * math.cos(theta)
h = v * self.height - 1
if up_axis == "Y":
z = (1 - v) * math.sin(theta)
point = Gf.Vec3f(x, h, z)
dpdu = Gf.Vec3f(-2.0 * math.pi * z, 0.0, 2.0 * math.pi * x)
dpdv = Gf.Vec3f(-x / (1 - v), self.height, -z / (1 - v))
normal = dpdv ^ dpdu
normal = normal.GetNormalized()
else:
y = (1 - v) * math.sin(theta)
point = Gf.Vec3f(x, y, h)
dpdu = Gf.Vec3f(-2.0 * math.pi * y, 2.0 * math.pi * x, 0)
dpdv = Gf.Vec3f(-x / (1 - v), -y / (1 - v), self.height)
normal = dpdu ^ dpdv
normal = normal.GetNormalized()
return point, normal
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(ConeEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(ConeEvaluator.SETTING_V_SCALE, 3)
num_w_verts_scale = kwargs.get("w_verts_scale", None)
if num_w_verts_scale is None or num_w_verts_scale <= 0:
num_w_verts_scale = get_int_setting(ConeEvaluator.SETTING_W_SCALE, 1)
num_u_verts_scale = max(num_u_verts_scale, 1)
num_v_verts_scale = max(num_v_verts_scale, 1)
num_w_verts_scale = max(num_w_verts_scale, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
u_patches = kwargs.get("u_patches", 64)
v_patches = kwargs.get("v_patches", 1)
w_patches = kwargs.get("w_patches", 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
w_patches = w_patches * num_w_verts_scale
u_patches = max(int(u_patches), 3)
v_patches = max(int(v_patches), 1)
w_patches = max(int(w_patches), 1)
accuracy = 0.00001
u_delta = 1.0 / u_patches
v_delta = (1.0 - accuracy) / v_patches
num_u_verts = u_patches
num_v_verts = v_patches + 1
points: List[Gf.Vec3f] = []
point_normals: List[Gf.Vec3f] = []
normals: List[Gf.Vec3f] = []
sts: List[Gf.Vec2f] = []
face_indices: List[int] = []
face_vertex_counts: List[int] = []
for j in range(num_v_verts):
for i in range(num_u_verts):
u = i * u_delta
v = j * v_delta
point, normal = self._eval(up_axis, u, v)
point = transform_point(point, origin, half_scale)
points.append(point)
point_normals.append(normal)
def calc_index(i, j):
i = i if i < num_u_verts else 0
base_index = j * num_u_verts
point_index = base_index + i
return point_index
def get_uv(i, j):
u = 1 - i * u_delta if i < num_u_verts else 0.0
v = j * v_delta if j != num_v_verts - 1 else 1.0
return Gf.Vec2f(u, v)
for j in range(v_patches):
for i in range(u_patches):
vindex00 = calc_index(i, j)
vindex10 = calc_index(i + 1, j)
vindex11 = calc_index(i + 1, j + 1)
vindex01 = calc_index(i, j + 1)
uv00 = get_uv(i, j)
uv10 = get_uv(i + 1, j)
uv11 = get_uv(i + 1, j + 1)
uv01 = get_uv(i, j + 1)
# Right-hand order
if up_axis == "Y":
sts.extend([uv00, uv01, uv11, uv10])
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
normals.extend(
[
point_normals[vindex00],
point_normals[vindex01],
point_normals[vindex11],
point_normals[vindex10],
]
)
else:
sts.extend([inverse_u(uv00), inverse_u(uv10), inverse_u(uv11), inverse_u(uv01)])
face_indices.extend((vindex00, vindex10, vindex11, vindex01))
normals.extend(
[
point_normals[vindex00],
point_normals[vindex10],
point_normals[vindex11],
point_normals[vindex01],
]
)
face_vertex_counts.append(4)
# Add hat
if up_axis == "Y":
bottom_center_point = Gf.Vec3f(0, -1, 0)
top_center_point = Gf.Vec3f(0, 1 - accuracy, 0)
else:
bottom_center_point = Gf.Vec3f(0, 0, -1)
top_center_point = Gf.Vec3f(0, 0, 1 - accuracy)
def add_hat(center_point, rim_points_start_index, w_patches, invert_wind_order=False):
bt_points, _, bt_sts, bt_face_indices, bt_face_vertex_counts = generate_disk(
center_point, u_patches, w_patches, origin, half_scale, up_axis
)
# Total points before adding hat
total_points = len(points)
# Skips shared points
points.extend(bt_points[num_u_verts:])
if invert_wind_order:
modify_winding_order(bt_face_vertex_counts, bt_sts)
for st in bt_sts:
sts.append(inverse_v(st))
else:
sts.extend(bt_sts)
face_vertex_counts.extend(bt_face_vertex_counts)
normals.extend([center_point] * len(bt_face_indices))
# Remapping cap points
for i, index in enumerate(bt_face_indices):
if index >= num_u_verts:
bt_face_indices[i] += total_points - num_u_verts
else:
bt_face_indices[i] += rim_points_start_index
if invert_wind_order:
modify_winding_order(bt_face_vertex_counts, bt_face_indices)
face_indices.extend(bt_face_indices)
# Add top hat to close shape
top_hat_start_index = len(points) - num_u_verts
add_hat(top_center_point, top_hat_start_index, 1)
# Add bottom hat to close shape
add_hat(bottom_center_point, 0, w_patches, True)
return points, normals, sts, face_indices, face_vertex_counts
@staticmethod
def build_setting_ui():
from omni import ui
ConeEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", ConeEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
ConeEvaluator._u_scale_slider = build_int_slider(
"U Verts Scale", ConeEvaluator.SETTING_U_SCALE, 1, 1, 10,
"Tessellation Level in Horizontal Direction"
)
ui.Spacer(height=5)
ConeEvaluator._v_scale_slider = build_int_slider(
"V Verts Scale", ConeEvaluator.SETTING_V_SCALE, 1, 1, 10, "Tessellation Level in Vertical Direction"
)
ui.Spacer(height=5)
ConeEvaluator._w_scale_slider = build_int_slider(
"W Verts Scale", ConeEvaluator.SETTING_W_SCALE, 1, 1, 10, "Tessellation Level of Bottom Cap"
)
@staticmethod
def reset_setting():
ConeEvaluator._half_scale_slider.set_value(ConeEvaluator.get_default_half_scale())
ConeEvaluator._u_scale_slider.set_value(1)
ConeEvaluator._v_scale_slider.set_value(1)
ConeEvaluator._w_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(ConeEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 9,127 | Python | 38.008547 | 112 | 0.5315 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/__init__.py | __all__ = ["get_geometry_mesh_prim_list", "AbstractShapeEvaluator"]
import re
from .abstract_shape_evaluator import AbstractShapeEvaluator
from .cone import ConeEvaluator
from .disk import DiskEvaluator
from .cube import CubeEvaluator
from .cylinder import CylinderEvaluator
from .sphere import SphereEvaluator
from .torus import TorusEvaluator
from .plane import PlaneEvaluator
_all_evaluators = {}
def _get_all_evaluators():
global _all_evaluators
if not _all_evaluators:
evaluator_classes = list(filter(lambda x: re.search(r".+Evaluator$", x), globals().keys()))
evaluator_classes.remove(AbstractShapeEvaluator.__name__)
for evaluator in evaluator_classes:
name = re.sub(r"(.*)Evaluator$", r"\1", evaluator)
_all_evaluators[name] = globals()[f"{name}Evaluator"]
return _all_evaluators
def get_geometry_mesh_prim_list():
names = list(_get_all_evaluators().keys())
names.sort()
return names
| 973 | Python | 27.647058 | 99 | 0.706064 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/abstract_shape_evaluator.py | from typing import List, Tuple
from pxr import Gf
class AbstractShapeEvaluator: # pragma: no cover
def __init__(self, attributes: dict):
self._attributes = attributes
def eval(self, **kwargs) -> Tuple[
List[Gf.Vec3f], List[Gf.Vec3f],
List[Gf.Vec2f], List[int], List[int]
]:
"""It must be implemented to return tuple
[points, normals, uvs, face_indices, face_vertex_counts], where:
* points and normals are array of Gf.Vec3f.
* uvs are array of Gf.Vec2f that represents uv coordinates.
* face_indexes are array of int that represents face indices.
* face_vertex_counts are array of int that represents vertex count per face.
* Normals and uvs must be face varying.
"""
raise NotImplementedError("Eval must be implemented for this shape.")
@staticmethod
def build_setting_ui():
pass
@staticmethod
def reset_setting():
pass
@staticmethod
def get_default_half_scale():
return 50
| 1,039 | Python | 28.714285 | 84 | 0.636189 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/utils.py | import math
import carb.settings
from pxr import Gf
from typing import List, Tuple
from numbers import Number
def _save_settings(model, setting):
value = model.get_value_as_int()
carb.settings.get_settings().set(setting, value)
def build_int_slider(name, setting, default_value, min_value, max_value, tooltip=None):
from omni import ui
layout = ui.HStack(height=0)
with layout:
ui.Spacer(width=20, height=0)
ui.Label(name, height=0, name="text")
model = ui.IntSlider(name="text", min=min_value, max=max_value, height=0, aligment=ui.Alignment.LEFT).model
value = get_int_setting(setting, default_value)
model.set_value(value)
ui.Spacer(width=20, height=0)
model.add_value_changed_fn(lambda m: _save_settings(m, setting))
if tooltip:
layout.set_tooltip(tooltip)
return model
def inverse_u(uv) -> Gf.Vec2f:
return Gf.Vec2f(1 - uv[0], uv[1])
def inverse_v(uv) -> Gf.Vec2f:
return Gf.Vec2f(uv[0], 1 - uv[1])
def inverse_uv(uv) -> Gf.Vec2f:
return Gf.Vec2f(1 - uv[0], 1 - uv[1])
def transform_point(point: Gf.Vec3f, origin: Gf.Vec3f, half_scale: float) -> Gf.Vec3f:
return half_scale * point + origin
def generate_circle_points(
up_axis, num_points, delta, center_point=Gf.Vec3f(0.0)
) -> Tuple[List[Gf.Vec3f], List[Gf.Vec2f]]:
points: List[Gf.Vec3f] = []
point_sts: List[Gf.Vec2f] = []
for i in range(num_points):
theta = i * delta * math.pi * 2
if up_axis == "Y":
point = Gf.Vec3f(math.cos(theta), 0.0, math.sin(theta))
st = Gf.Vec2f(1.0 - point[0] / 2.0, (1.0 + point[2]) / 2.0)
else:
point = Gf.Vec3f(math.cos(theta), math.sin(theta), 0.0)
st = Gf.Vec2f((1.0 - point[0]) / 2.0, (1.0 + point[1]) / 2.0)
point_sts.append(st)
points.append(point + center_point)
return points, point_sts
def get_int_setting(key, default_value):
settings = carb.settings.get_settings()
settings.set_default(key, default_value)
value = settings.get_as_int(key)
return value
def generate_disk(
center_point: Gf.Vec3f, u_patches: int, v_patches: int,
origin: Gf.Vec3f, half_scale: float, up_axis="Y"
) -> Tuple[List[Gf.Vec3f], List[Gf.Vec3f], List[Gf.Vec2f], List[int], List[int]]:
u_delta = 1.0 / u_patches
v_delta = 1.0 / v_patches
num_u_verts = u_patches
num_v_verts = v_patches + 1
points: List[Gf.Vec3f] = []
normals: List[Gf.Vec3f] = []
sts: List[Gf.Vec2f] = []
face_indices: List[int] = []
face_vertex_counts: List[int] = []
center_point = transform_point(center_point, origin, half_scale)
circle_points, _ = generate_circle_points(up_axis, u_patches, 1.0 / u_patches)
for i in range(num_v_verts - 1):
v = v_delta * i
for j in range(num_u_verts):
point = transform_point(circle_points[j], (0, 0, 0), half_scale * (1 - v))
points.append(point + center_point)
# Center point
points.append(center_point)
def calc_index(i, j):
ii = i if i < num_u_verts else 0
base_index = j * num_u_verts
if j == num_v_verts - 1:
return base_index
else:
return base_index + ii
def get_uv(i, j):
vindex = calc_index(i, j)
# Ensure all axis to be [-1, 1]
point = (points[vindex] - origin) / half_scale
if up_axis == "Y":
st = (Gf.Vec2f(-point[0], -point[2]) + Gf.Vec2f(1, 1)) / 2
else:
st = (Gf.Vec2f(point[0], point[1]) + Gf.Vec2f(1)) / 2
return st
# Generating quads or triangles of the center
for j in range(v_patches):
for i in range(u_patches):
vindex00 = calc_index(i, j)
vindex10 = calc_index(i + 1, j)
vindex11 = calc_index(i + 1, j + 1)
vindex01 = calc_index(i, j + 1)
uv00 = get_uv(i, j)
uv10 = get_uv(i + 1, j)
uv11 = get_uv(i + 1, j + 1)
uv01 = get_uv(i, j + 1)
# Right-hand order
if up_axis == "Y":
if vindex11 == vindex01:
sts.extend([inverse_u(uv00), inverse_u(uv01), inverse_u(uv10)])
face_indices.extend((vindex00, vindex01, vindex10))
else:
sts.extend([inverse_u(uv00), inverse_u(uv01), inverse_u(uv11), inverse_u(uv10)])
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
normal = Gf.Vec3f(0.0, 1.0, 0.0)
else:
if vindex11 == vindex01:
sts.extend([uv00, uv10, uv01])
face_indices.extend((vindex00, vindex10, vindex01))
else:
sts.extend([uv00, uv10, uv11, uv01])
face_indices.extend((vindex00, vindex10, vindex11, vindex01))
normal = Gf.Vec3f(0.0, 0.0, 1.0)
if vindex11 == vindex01:
face_vertex_counts.append(3)
normals.extend([normal] * 3)
else:
face_vertex_counts.append(4)
normals.extend([normal] * 4)
return points, normals, sts, face_indices, face_vertex_counts
def generate_plane(origin, half_scale, u_patches, v_patches, up_axis):
if isinstance(half_scale, Number):
[w, h, d] = half_scale, half_scale, half_scale
else:
[w, h, d] = half_scale
[x, y, z] = origin[0], origin[1], origin[2]
num_u_verts = u_patches + 1
num_v_verts = v_patches + 1
points = []
normals = []
sts = []
face_indices = []
face_vertex_counts = []
u_delta = 1.0 / u_patches
v_delta = 1.0 / v_patches
if up_axis == "Y":
w_delta = 2.0 * w * u_delta
h_delta = 2.0 * d * v_delta
bottom_left = Gf.Vec3f(x - w, y, z - d)
for i in range(num_v_verts):
for j in range(num_u_verts):
point = bottom_left + Gf.Vec3f(j * w_delta, 0.0, i * h_delta)
points.append(point)
elif up_axis == "Z":
w_delta = 2.0 * w / u_patches
h_delta = 2.0 * h / v_patches
bottom_left = Gf.Vec3f(x - w, y - h, z)
for i in range(num_v_verts):
for j in range(num_u_verts):
point = bottom_left + Gf.Vec3f(j * w_delta, i * h_delta, 0.0)
points.append(point)
else: # X up
w_delta = 2.0 * h / u_patches
h_delta = 2.0 * d / v_patches
bottom_left = Gf.Vec3f(x, y - h, z - d)
for i in range(num_v_verts):
for j in range(num_u_verts):
point = bottom_left + Gf.Vec3f(0, j * w_delta, i * h_delta)
points.append(point)
def calc_index(i, j):
ii = i if i < num_u_verts else 0
jj = j if j < num_v_verts else 0
return jj * num_u_verts + ii
def get_uv(i, j):
u = i * u_delta if i < num_u_verts else 1.0
if up_axis == "Y":
v = 1 - j * v_delta if j < num_v_verts else 0.0
else:
v = j * v_delta if j < num_v_verts else 1.0
return Gf.Vec2f(u, v)
# Generating quads
for j in range(v_patches):
for i in range(u_patches):
vindex00 = calc_index(i, j)
vindex10 = calc_index(i + 1, j)
vindex11 = calc_index(i + 1, j + 1)
vindex01 = calc_index(i, j + 1)
uv00 = get_uv(i, j)
uv10 = get_uv(i + 1, j)
uv11 = get_uv(i + 1, j + 1)
uv01 = get_uv(i, j + 1)
# Right-hand order
if up_axis == "Y":
sts.extend([uv00, uv01, uv11, uv10])
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
normal = Gf.Vec3f(0.0, 1.0, 0.0)
elif up_axis == "Z":
sts.extend([uv00, uv10, uv11, uv01])
face_indices.extend((vindex00, vindex10, vindex11, vindex01))
normal = Gf.Vec3f(0.0, 0.0, 1.0)
else: # X
sts.extend([uv00, uv01, uv11, uv10])
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
normal = Gf.Vec3f(0.0, 1.0, 0.0)
face_vertex_counts.append(4)
normals.extend([normal] * 4)
return points, normals, sts, face_indices, face_vertex_counts
def modify_winding_order(face_counts, face_indices):
total = 0
for count in face_counts:
if count >= 3:
start = total + 1
end = total + count
face_indices[start:end] = face_indices[start:end][::-1]
total += count
| 8,670 | Python | 32.608527 | 115 | 0.529873 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/plane.py | from .utils import get_int_setting, build_int_slider, inverse_u, generate_plane
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
class PlaneEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/plane/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/plane/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/plane/v_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(PlaneEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(PlaneEvaluator.SETTING_V_SCALE, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
half_scale = [half_scale, half_scale, half_scale]
u_patches = kwargs.get("u_patches", 1)
v_patches = kwargs.get("v_patches", 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
u_patches = max(int(u_patches), 1)
v_patches = max(int(v_patches), 1)
return generate_plane(origin, half_scale, u_patches, v_patches, up_axis)
@staticmethod
def build_setting_ui():
from omni import ui
PlaneEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", PlaneEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
PlaneEvaluator._u_scale_slider = build_int_slider("U Verts Scale", PlaneEvaluator.SETTING_U_SCALE, 1, 1, 10)
ui.Spacer(height=5)
PlaneEvaluator._v_scale_slider = build_int_slider("V Verts Scale", PlaneEvaluator.SETTING_V_SCALE, 1, 1, 10)
@staticmethod
def reset_setting():
PlaneEvaluator._half_scale_slider.set_value(PlaneEvaluator.get_default_half_scale())
PlaneEvaluator._u_scale_slider.set_value(1)
PlaneEvaluator._v_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(PlaneEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 2,585 | Python | 39.406249 | 116 | 0.649903 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/cylinder.py | from .utils import (
get_int_setting, build_int_slider, modify_winding_order,
generate_circle_points, transform_point, inverse_u, inverse_v, generate_disk
)
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
from typing import List
class CylinderEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/cylinder/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/cylinder/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/cylinder/v_scale"
SETTING_W_SCALE = "/persistent/app/mesh_generator/shapes/cylinder/w_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(CylinderEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(CylinderEvaluator.SETTING_V_SCALE, 1)
num_w_verts_scale = kwargs.get("w_verts_scale", None)
if num_w_verts_scale is None or num_w_verts_scale <= 0:
num_w_verts_scale = get_int_setting(CylinderEvaluator.SETTING_W_SCALE, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
u_patches = kwargs.get("u_patches", 32)
v_patches = kwargs.get("v_patches", 1)
w_patches = kwargs.get("w_patches", 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
w_patches = w_patches * num_w_verts_scale
u_patches = max(int(u_patches), 3)
v_patches = max(int(v_patches), 1)
w_patches = max(int(w_patches), 1)
u_delta = 1.0 / (u_patches if u_patches != 0 else 1)
v_delta = 1.0 / (v_patches if v_patches != 0 else 1)
# open meshes need an extra vert on the end to create the last patch
# closed meshes reuse the vert at index 0 to close their final patch
num_u_verts = u_patches
num_v_verts = v_patches + 1
points: List[Gf.Vec3f] = []
normals: List[Gf.Vec3f] = []
sts: List[Gf.Vec2f] = []
face_indices: List[int] = []
face_vertex_counts: List[int] = []
# generate circle points
circle_points, _ = generate_circle_points(up_axis, num_u_verts, u_delta)
for j in range(num_v_verts):
for i in range(num_u_verts):
v = j * v_delta
point = circle_points[i]
if up_axis == "Y":
point[1] = 2.0 * (v - 0.5)
else:
point[2] = 2.0 * (v - 0.5)
point = transform_point(point, origin, half_scale)
points.append(point)
def calc_index(i, j):
ii = i if i < num_u_verts else 0
jj = j if j < num_v_verts else 0
return jj * num_u_verts + ii
def get_uv(i, j):
u = 1 - i * u_delta if i < num_u_verts else 0.0
v = j * v_delta if j < num_v_verts else 1.0
return Gf.Vec2f(u, v)
for j in range(v_patches):
for i in range(u_patches):
vindex00 = calc_index(i, j)
vindex10 = calc_index(i + 1, j)
vindex11 = calc_index(i + 1, j + 1)
vindex01 = calc_index(i, j + 1)
uv00 = get_uv(i, j)
uv10 = get_uv(i + 1, j)
uv11 = get_uv(i + 1, j + 1)
uv01 = get_uv(i, j + 1)
p00 = points[vindex00]
p10 = points[vindex10]
p11 = points[vindex11]
p01 = points[vindex01]
# Right-hand order
if up_axis == "Y":
sts.extend([uv00, uv01, uv11, uv10])
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
normals.append(Gf.Vec3f(p00[0], 0, p00[2]))
normals.append(Gf.Vec3f(p01[0], 0, p01[2]))
normals.append(Gf.Vec3f(p11[0], 0, p11[2]))
normals.append(Gf.Vec3f(p10[0], 0, p10[2]))
else:
sts.extend([inverse_u(uv00), inverse_u(uv10), inverse_u(uv11), inverse_u(uv01)])
face_indices.extend((vindex00, vindex10, vindex11, vindex01))
normals.append(Gf.Vec3f(p00[0], p00[1], 0))
normals.append(Gf.Vec3f(p10[0], p10[1], 0))
normals.append(Gf.Vec3f(p11[0], p11[1], 0))
normals.append(Gf.Vec3f(p01[0], p01[1], 0))
face_vertex_counts.append(4)
# Add hat
if up_axis == "Y":
bottom_center_point = Gf.Vec3f(0, -1, 0)
top_center_point = Gf.Vec3f(0, 1, 0)
else:
bottom_center_point = Gf.Vec3f(0, 0, -1)
top_center_point = Gf.Vec3f(0, 0, 1)
def add_hat(center_point, rim_points_start_index, w_patches, invert_wind_order=False):
bt_points, _, bt_sts, bt_face_indices, bt_face_vertex_counts = generate_disk(
center_point, u_patches, w_patches, origin, half_scale, up_axis
)
total_points = len(points)
# Skips shared points
points.extend(bt_points[num_u_verts:])
if invert_wind_order:
modify_winding_order(bt_face_vertex_counts, bt_sts)
for st in bt_sts:
sts.append(inverse_v(st))
else:
sts.extend(bt_sts)
face_vertex_counts.extend(bt_face_vertex_counts)
normals.extend([center_point] * len(bt_face_indices))
# Remapping cap points
for i, index in enumerate(bt_face_indices):
if index >= num_u_verts:
bt_face_indices[i] += total_points - num_u_verts
else:
bt_face_indices[i] += rim_points_start_index
if invert_wind_order:
modify_winding_order(bt_face_vertex_counts, bt_face_indices)
face_indices.extend(bt_face_indices)
top_hat_start_index = len(points) - num_u_verts
# Add bottom hat to close shape
add_hat(bottom_center_point, 0, w_patches, True)
# Add top hat to close shape
add_hat(top_center_point, top_hat_start_index, w_patches)
return points, normals, sts, face_indices, face_vertex_counts
@staticmethod
def build_setting_ui():
from omni import ui
CylinderEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", CylinderEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
CylinderEvaluator._u_scale_slider = build_int_slider(
"U Verts Scale", CylinderEvaluator.SETTING_U_SCALE, 1, 1, 10,
"Tessellation Level in Horizontal Direction"
)
ui.Spacer(height=5)
CylinderEvaluator._v_scale_slider = build_int_slider(
"V Verts Scale", CylinderEvaluator.SETTING_V_SCALE, 1, 1, 10,
"Tessellation Level in Vertical Direction"
)
ui.Spacer(height=5)
CylinderEvaluator._w_scale_slider = build_int_slider(
"W Verts Scale", CylinderEvaluator.SETTING_W_SCALE, 1, 1, 10,
"Tessellation Level of Bottom and Top Caps"
)
@staticmethod
def reset_setting():
CylinderEvaluator._half_scale_slider.set_value(CylinderEvaluator.get_default_half_scale())
CylinderEvaluator._u_scale_slider.set_value(1)
CylinderEvaluator._v_scale_slider.set_value(1)
CylinderEvaluator._w_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(CylinderEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 8,285 | Python | 40.019802 | 100 | 0.555462 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/sphere.py | import math
from .utils import get_int_setting, build_int_slider
from .utils import transform_point
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
class SphereEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/shpere/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/sphere/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/sphere/v_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
def _eval(self, u, v, up_axis):
theta = u * 2.0 * math.pi
phi = (v - 0.5) * math.pi
cos_phi = math.cos(phi)
if up_axis == "Y":
x = cos_phi * math.cos(theta)
y = math.sin(phi)
z = cos_phi * math.sin(theta)
else:
x = cos_phi * math.cos(theta)
y = cos_phi * math.sin(theta)
z = math.sin(phi)
return Gf.Vec3f(x, y, z)
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(SphereEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(SphereEvaluator.SETTING_V_SCALE, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
u_patches = kwargs.get("u_patches", 32)
v_patches = kwargs.get("v_patches", 16)
num_u_verts_scale = max(num_u_verts_scale, 1)
num_v_verts_scale = max(num_v_verts_scale, 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
u_patches = max(int(u_patches), 3)
v_patches = max(int(v_patches), 2)
u_delta = 1.0 / u_patches
v_delta = 1.0 / v_patches
num_u_verts = u_patches
num_v_verts = v_patches + 1
points = []
normals = []
sts = []
face_indices = []
face_vertex_counts = []
if up_axis == "Y":
bottom_point = Gf.Vec3f(0.0, -1.0, 0.0)
else:
bottom_point = Gf.Vec3f(0.0, 0.0, -1.0)
point = transform_point(bottom_point, origin, half_scale)
points.append(point)
for j in range(1, num_v_verts - 1):
v = j * v_delta
for i in range(num_u_verts):
u = i * u_delta
point = self._eval(u, v, up_axis)
point = transform_point(point, origin, half_scale)
points.append(Gf.Vec3f(point))
if up_axis == "Y":
top_point = Gf.Vec3f(0.0, 1.0, 0.0)
else:
top_point = Gf.Vec3f(0.0, 0.0, 1.0)
point = transform_point(top_point, origin, half_scale)
points.append(point)
def calc_index(i, j):
if j == 0:
return 0
elif j == num_v_verts - 1:
return len(points) - 1
else:
i = i if i < num_u_verts else 0
return (j - 1) * num_u_verts + i + 1
def get_uv(i, j):
if up_axis == "Y":
u = 1 - i * u_delta
v = j * v_delta
else:
u = i * u_delta
v = j * v_delta
return Gf.Vec2f(u, v)
# Generate body
for j in range(v_patches):
for i in range(u_patches):
# Index 0 is the bottom hat point
vindex00 = calc_index(i, j)
vindex10 = calc_index(i + 1, j)
vindex11 = calc_index(i + 1, j + 1)
vindex01 = calc_index(i, j + 1)
st00 = get_uv(i, j)
st10 = get_uv(i + 1, j)
st11 = get_uv(i + 1, j + 1)
st01 = get_uv(i, j + 1)
p0 = points[vindex00]
p1 = points[vindex10]
p2 = points[vindex11]
p3 = points[vindex01]
# Use face varying uv
if up_axis == "Y":
if vindex11 == vindex01:
sts.extend([st00, st01, st10])
face_indices.extend((vindex00, vindex01, vindex10))
face_vertex_counts.append(3)
normals.extend([p0, p3, p1])
elif vindex00 == vindex10:
sts.extend([st00, st01, st11])
face_indices.extend((vindex00, vindex01, vindex11))
face_vertex_counts.append(3)
normals.extend([p0, p3, p2])
else:
sts.extend([st00, st01, st11, st10])
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
face_vertex_counts.append(4)
normals.extend([p0, p3, p2, p1])
else:
if vindex11 == vindex01:
sts.extend([st00, st10, st01])
face_indices.extend((vindex00, vindex10, vindex01))
face_vertex_counts.append(3)
normals.extend([p0, p1, p3])
elif vindex00 == vindex10:
sts.extend([st00, st11, st01])
face_indices.extend((vindex00, vindex11, vindex01))
face_vertex_counts.append(3)
normals.extend([p0, p2, p3])
else:
sts.extend([st00, st10, st11, st01])
face_indices.extend((vindex00, vindex10, vindex11, vindex01))
face_vertex_counts.append(4)
normals.extend([p0, p1, p2, p3])
return points, normals, sts, face_indices, face_vertex_counts
@staticmethod
def build_setting_ui():
from omni import ui
SphereEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", SphereEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
SphereEvaluator._u_scale_slider = build_int_slider(
"U Verts Scale", SphereEvaluator.SETTING_U_SCALE, 1, 1, 10
)
ui.Spacer(height=5)
SphereEvaluator._v_scale_slider = build_int_slider(
"V Verts Scale", SphereEvaluator.SETTING_V_SCALE, 1, 1, 10
)
@staticmethod
def reset_setting():
SphereEvaluator._half_scale_slider.set_value(SphereEvaluator.get_default_half_scale())
SphereEvaluator._u_scale_slider.set_value(1)
SphereEvaluator._v_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(SphereEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 7,142 | Python | 36.397906 | 96 | 0.506301 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/cube.py | from .utils import get_int_setting, build_int_slider, generate_plane, modify_winding_order
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
class CubeEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/cube/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/cube/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/cube/v_scale"
SETTING_W_SCALE = "/persistent/app/mesh_generator/shapes/cube/w_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(CubeEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(CubeEvaluator.SETTING_V_SCALE, 1)
num_w_verts_scale = kwargs.get("w_verts_scale", None)
if num_w_verts_scale is None or num_w_verts_scale <= 0:
num_w_verts_scale = get_int_setting(CubeEvaluator.SETTING_W_SCALE, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
u_patches = kwargs.get("u_patches", 1)
v_patches = kwargs.get("v_patches", 1)
w_patches = kwargs.get("w_patches", 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
w_patches = w_patches * num_w_verts_scale
u_patches = max(int(u_patches), 1)
v_patches = max(int(v_patches), 1)
w_patches = max(int(w_patches), 1)
[x, y, z] = origin
(
xy_plane_points, xy_plane_normals, xy_plane_sts,
xy_plane_face_indices, xy_plane_face_vertex_counts
) = generate_plane(Gf.Vec3f(x, y, z + half_scale), half_scale, u_patches, v_patches, "Z")
(
xz_plane_points, xz_plane_normals, xz_plane_sts,
xz_plane_face_indices, xz_plane_face_vertex_counts
) = generate_plane(Gf.Vec3f(x, y - half_scale, z), half_scale, u_patches, w_patches, "Y")
(
yz_plane_points, yz_plane_normals, yz_plane_sts,
yz_plane_face_indices, yz_plane_face_vertex_counts
) = generate_plane(Gf.Vec3f(x - half_scale, y, z), half_scale, v_patches, w_patches, "X")
points = []
normals = []
sts = []
face_indices = []
face_vertex_counts = []
# XY planes
points.extend(xy_plane_points)
normals.extend([Gf.Vec3f(0, 0, 1)] * len(xy_plane_normals))
sts.extend(xy_plane_sts)
face_indices.extend(xy_plane_face_indices)
face_vertex_counts.extend(xy_plane_face_vertex_counts)
total_indices = len(points)
plane_points = [point + Gf.Vec3f(0, 0, -2.0 * half_scale) for point in xy_plane_points]
points.extend(plane_points)
normals.extend([Gf.Vec3f(0, 0, -1)] * len(xy_plane_normals))
modify_winding_order(xy_plane_face_vertex_counts, xy_plane_sts)
plane_sts = [Gf.Vec2f(1 - st[0], st[1]) for st in xy_plane_sts]
sts.extend(plane_sts)
plane_face_indices = [index + total_indices for index in xy_plane_face_indices]
modify_winding_order(xy_plane_face_vertex_counts, plane_face_indices)
face_indices.extend(plane_face_indices)
face_vertex_counts.extend(xy_plane_face_vertex_counts)
# xz planes
total_indices = len(points)
plane_points = [point + Gf.Vec3f(0, 2.0 * half_scale, 0) for point in xz_plane_points]
points.extend(plane_points)
normals.extend([Gf.Vec3f(0, 1, 0)] * len(xz_plane_normals))
sts.extend(xz_plane_sts)
plane_face_indices = [index + total_indices for index in xz_plane_face_indices]
face_indices.extend(plane_face_indices)
face_vertex_counts.extend(xz_plane_face_vertex_counts)
total_indices = len(points)
points.extend(xz_plane_points)
normals.extend([Gf.Vec3f(0, -1, 0)] * len(xz_plane_normals))
modify_winding_order(xz_plane_face_vertex_counts, xz_plane_sts)
plane_sts = [Gf.Vec2f(st[0], 1 - st[1]) for st in xz_plane_sts]
sts.extend(plane_sts)
plane_face_indices = [index + total_indices for index in xz_plane_face_indices]
modify_winding_order(xz_plane_face_vertex_counts, plane_face_indices)
face_indices.extend(plane_face_indices)
face_vertex_counts.extend(xz_plane_face_vertex_counts)
# yz planes
total_indices = len(points)
points.extend(yz_plane_points)
normals.extend([Gf.Vec3f(-1, 0, 0)] * len(yz_plane_normals))
plane_sts = [Gf.Vec2f(st[1], st[0]) for st in yz_plane_sts]
sts.extend(plane_sts)
plane_face_indices = [index + total_indices for index in yz_plane_face_indices]
face_indices.extend(plane_face_indices)
face_vertex_counts.extend(yz_plane_face_vertex_counts)
total_indices = len(points)
plane_points = [point + Gf.Vec3f(2.0 * half_scale, 0, 0) for point in yz_plane_points]
points.extend(plane_points)
normals.extend([Gf.Vec3f(1, 0, 0)] * len(yz_plane_normals))
modify_winding_order(yz_plane_face_vertex_counts, yz_plane_sts)
plane_sts = [Gf.Vec2f(1 - st[1], st[0]) for st in yz_plane_sts]
sts.extend(plane_sts)
plane_face_indices = [index + total_indices for index in yz_plane_face_indices]
modify_winding_order(yz_plane_face_vertex_counts, plane_face_indices)
face_indices.extend(plane_face_indices)
face_vertex_counts.extend(yz_plane_face_vertex_counts)
# Welds the edges of cube
keep = [True] * len(points)
index_remap = [-1] * len(points)
keep_points = []
for i in range(0, len(points)):
if not keep[i]:
continue
keep_points.append(points[i])
index_remap[i] = len(keep_points) - 1
for j in range(i + 1, len(points)):
if Gf.IsClose(points[j], points[i], 1e-6):
keep[j] = False
index_remap[j] = len(keep_points) - 1
for i in range(len(face_indices)):
face_indices[i] = index_remap[face_indices[i]]
return keep_points, normals, sts, face_indices, face_vertex_counts
@staticmethod
def build_setting_ui():
from omni import ui
CubeEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", CubeEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
CubeEvaluator._u_scale_slider = build_int_slider(
"U Verts Scale", CubeEvaluator.SETTING_U_SCALE, 1, 1, 10,
"Tessellation Level along X Axis"
)
ui.Spacer(height=5)
CubeEvaluator._v_scale_slider = build_int_slider(
"V Verts Scale", CubeEvaluator.SETTING_V_SCALE, 1, 1, 10,
"Tessellation Level along Y Axis"
)
ui.Spacer(height=5)
CubeEvaluator._w_scale_slider = build_int_slider(
"W Verts Scale", CubeEvaluator.SETTING_W_SCALE, 1, 1, 10,
"Tessellation Level along Z Axis"
)
@staticmethod
def reset_setting():
CubeEvaluator._half_scale_slider.set_value(CubeEvaluator.get_default_half_scale())
CubeEvaluator._u_scale_slider.set_value(1)
CubeEvaluator._v_scale_slider.set_value(1)
CubeEvaluator._w_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(CubeEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 7,997 | Python | 41.770053 | 97 | 0.614731 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/disk.py | from .utils import get_int_setting, build_int_slider
from .utils import generate_disk
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
class DiskEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/disk/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/disk/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/disk/v_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(DiskEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(DiskEvaluator.SETTING_V_SCALE, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
# Disk will be approximated by quads composed from inner circle
# to outer circle. The parameter `u_patches` means the segments
# of circle. And v_patches means the number of segments (circles)
# in radius direction.
u_patches = kwargs.get("u_patches", 32)
v_patches = kwargs.get("v_patches", 1)
num_u_verts_scale = max(num_u_verts_scale, 1)
num_v_verts_scale = max(num_v_verts_scale, 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
u_patches = max(int(u_patches), 3)
v_patches = max(int(v_patches), 1)
center_point = Gf.Vec3f(0.0)
return generate_disk(center_point, u_patches, v_patches, origin, half_scale, up_axis)
@staticmethod
def build_setting_ui():
from omni import ui
DiskEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", DiskEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
DiskEvaluator._u_scale_slider = build_int_slider("U Verts Scale", DiskEvaluator.SETTING_U_SCALE, 1, 1, 10)
ui.Spacer(height=5)
DiskEvaluator._v_scale_slider = build_int_slider("V Verts Scale", DiskEvaluator.SETTING_V_SCALE, 1, 1, 10)
@staticmethod
def reset_setting():
DiskEvaluator._half_scale_slider.set_value(DiskEvaluator.get_default_half_scale())
DiskEvaluator._u_scale_slider.set_value(1)
DiskEvaluator._v_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(DiskEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 2,917 | Python | 39.527777 | 114 | 0.649983 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/evaluators/torus.py | import math
from .utils import get_int_setting, build_int_slider
from .utils import transform_point
from .abstract_shape_evaluator import AbstractShapeEvaluator
from pxr import Gf
class TorusEvaluator(AbstractShapeEvaluator):
SETTING_OBJECT_HALF_SCALE = "/persistent/app/mesh_generator/shapes/torus/object_half_scale"
SETTING_U_SCALE = "/persistent/app/mesh_generator/shapes/torus/u_scale"
SETTING_V_SCALE = "/persistent/app/mesh_generator/shapes/torus/v_scale"
def __init__(self, attributes: dict):
super().__init__(attributes)
self.hole_radius = 1.0
self.tube_radius = 0.5
def _eval(self, up_axis, u, v):
theta = u * 2.0 * math.pi
phi = v * 2.0 * math.pi - 0.5 * math.pi
rad_cos_phi = self.tube_radius * math.cos(phi)
cos_theta = math.cos(theta)
sin_phi = math.sin(phi)
sin_theta = math.sin(theta)
x = (self.hole_radius + rad_cos_phi) * cos_theta
nx = self.hole_radius * cos_theta
if up_axis == "Y":
y = self.tube_radius * sin_phi
z = (self.hole_radius + rad_cos_phi) * sin_theta
ny = 0
nz = self.hole_radius * sin_theta
else:
y = (self.hole_radius + rad_cos_phi) * sin_theta
z = self.tube_radius * sin_phi
ny = self.hole_radius * sin_theta
nz = 0
point = Gf.Vec3f(x, y, z)
# construct the normal by creating a vector from the center point of the tube to the surface
normal = Gf.Vec3f(x - nx, y - ny, z - nz)
normal = normal.GetNormalized()
return point, normal
def eval(self, **kwargs):
half_scale = kwargs.get("half_scale", None)
if half_scale is None or half_scale <= 0:
half_scale = self.get_default_half_scale()
num_u_verts_scale = kwargs.get("u_verts_scale", None)
if num_u_verts_scale is None or num_u_verts_scale <= 0:
num_u_verts_scale = get_int_setting(TorusEvaluator.SETTING_U_SCALE, 1)
num_v_verts_scale = kwargs.get("v_verts_scale", None)
if num_v_verts_scale is None or num_v_verts_scale <= 0:
num_v_verts_scale = get_int_setting(TorusEvaluator.SETTING_V_SCALE, 1)
up_axis = kwargs.get("up_axis", "Y")
origin = Gf.Vec3f(0.0)
u_patches = kwargs.get("u_patches", 32)
v_patches = kwargs.get("v_patches", 32)
num_u_verts_scale = max(num_u_verts_scale, 1)
num_v_verts_scale = max(num_v_verts_scale, 1)
u_patches = u_patches * num_u_verts_scale
v_patches = v_patches * num_v_verts_scale
u_patches = max(int(u_patches), 3)
v_patches = max(int(v_patches), 3)
u_delta = 1.0 / u_patches
v_delta = 1.0 / v_patches
num_u_verts = u_patches
num_v_verts = v_patches
points = []
point_normals = []
sts = []
face_indices = []
face_vertex_counts = []
for j in range(num_v_verts):
v = j * v_delta
for i in range(num_u_verts):
u = i * u_delta
point, point_normal = self._eval(up_axis, u, v)
point = transform_point(point, origin, half_scale)
points.append(point)
point_normals.append(point_normal)
def calc_index(i, j):
ii = i if i < num_u_verts else 0
jj = j if j < num_v_verts else 0
return jj * num_u_verts + ii
def get_uv(i, j):
if up_axis == "Y":
u = 1 - i * u_delta if i < num_u_verts else 0.0
else:
u = i * u_delta if i < num_u_verts else 1.0
v = j * v_delta if j < num_v_verts else 1.0
return Gf.Vec2f(u, v)
# Last patch from last vert to first vert to close shape
normals = []
for j in range(v_patches):
for i in range(u_patches):
vindex00 = calc_index(i, j)
vindex10 = calc_index(i + 1, j)
vindex11 = calc_index(i + 1, j + 1)
vindex01 = calc_index(i, j + 1)
# Use face varying uv
face_vertex_counts.append(4)
if up_axis == "Y":
sts.append(get_uv(i, j))
sts.append(get_uv(i, j + 1))
sts.append(get_uv(i + 1, j + 1))
sts.append(get_uv(i + 1, j))
face_indices.extend((vindex00, vindex01, vindex11, vindex10))
normals.extend(
[
point_normals[vindex00],
point_normals[vindex01],
point_normals[vindex11],
point_normals[vindex10],
]
)
else:
sts.append(get_uv(i, j))
sts.append(get_uv(i + 1, j))
sts.append(get_uv(i + 1, j + 1))
sts.append(get_uv(i, j + 1))
face_indices.extend((vindex00, vindex10, vindex11, vindex01))
normals.extend(
[
point_normals[vindex00],
point_normals[vindex10],
point_normals[vindex11],
point_normals[vindex01],
]
)
return points, normals, sts, face_indices, face_vertex_counts
@staticmethod
def build_setting_ui():
from omni import ui
TorusEvaluator._half_scale_slider = build_int_slider(
"Object Half Scale", TorusEvaluator.SETTING_OBJECT_HALF_SCALE, 50, 10, 1000
)
ui.Spacer(height=5)
TorusEvaluator._u_scale_slider = build_int_slider("U Verts Scale", TorusEvaluator.SETTING_U_SCALE, 1, 1, 10)
ui.Spacer(height=5)
TorusEvaluator._v_scale_slider = build_int_slider("V Verts Scale", TorusEvaluator.SETTING_V_SCALE, 1, 1, 10)
@staticmethod
def reset_setting():
TorusEvaluator._half_scale_slider.set_value(TorusEvaluator.get_default_half_scale())
TorusEvaluator._u_scale_slider.set_value(1)
TorusEvaluator._v_scale_slider.set_value(1)
@staticmethod
def get_default_half_scale():
half_scale = get_int_setting(TorusEvaluator.SETTING_OBJECT_HALF_SCALE, 50)
return half_scale
| 6,485 | Python | 36.275862 | 116 | 0.523516 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/tests/__init__.py | from .test_mesh_prims import *
| 31 | Python | 14.999993 | 30 | 0.741935 |
omniverse-code/kit/exts/omni.kit.primitive.mesh/omni/kit/primitive/mesh/tests/test_mesh_prims.py | import omni.kit.test
import omni.usd
import omni.kit.app
import omni.kit.primitive.mesh
import omni.kit.commands
import omni.kit.actions.core
from pathlib import Path
from pxr import Gf, Kind, Sdf, Usd, UsdGeom, UsdShade
EXTENSION_FOLDER_PATH = Path(omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__))
TEST_DATA_PATH = EXTENSION_FOLDER_PATH.joinpath("data/tests")
# NOTE: those tests belong to omni.kit.primitive.mesh extension.
class TestMeshPrims(omni.kit.test.AsyncTestCase):
async def test_tessellation_params(self):
test_data = {
"Cube": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1, "w_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2, "w_verts_scale": 1},
},
{
"params": {"half_scale": 400, "u_verts_scale": 2, "v_verts_scale": 2, "w_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1, "w_verts_scale": 1,
"u_patches": 2, "v_patches": 2, "w_patches": 2
},
},
],
"Cone": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1, "w_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2, "w_verts_scale": 1},
},
{
"params": {"half_scale": 400, "u_verts_scale": 2, "v_verts_scale": 2, "w_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1, "w_verts_scale": 1,
"u_patches": 2, "v_patches": 2, "w_patches": 2
},
},
],
"Cylinder": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1, "w_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2, "w_verts_scale": 1},
},
{
"params": {"half_scale": 400, "u_verts_scale": 2, "v_verts_scale": 2, "w_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1, "w_verts_scale": 1,
"u_patches": 2, "v_patches": 2, "w_patches": 2
},
},
],
"Disk": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1,
"u_patches": 2, "v_patches": 2
},
},
],
"Plane": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1,
"u_patches": 2, "v_patches": 2
},
},
],
"Sphere": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1,
"u_patches": 2, "v_patches": 2
},
},
],
"Torus": [
{
"params": {"half_scale": 100, "u_verts_scale": 2, "v_verts_scale": 1},
},
{
"params": {"half_scale": 200, "u_verts_scale": 2, "v_verts_scale": 2},
},
{
"params": {
"half_scale": 100, "u_verts_scale": 1, "v_verts_scale": 1,
"u_patches": 2, "v_patches": 2
},
},
],
}
golden_file = TEST_DATA_PATH.joinpath("golden.usd")
golden_stage = Usd.Stage.Open(str(golden_file))
self.assertTrue(golden_stage)
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
for prim_type, test_cases in test_data.items():
for test_case in test_cases:
params = test_case["params"]
result, path = omni.kit.commands.execute(
"CreateMeshPrim", prim_type=prim_type, above_ground=True, **params
)
self.assertTrue(result)
mesh_prim = stage.GetPrimAtPath(path)
self.assertTrue(mesh_prim)
golden_prim = golden_stage.GetPrimAtPath(path)
self.assertTrue(golden_prim)
property_names = mesh_prim.GetPropertyNames()
golden_property_names = golden_prim.GetPropertyNames()
self.assertEqual(property_names, golden_property_names)
path = Sdf.Path(path)
for property_name in property_names:
property_path = path.AppendProperty(property_name)
prop = mesh_prim.GetPropertyAtPath(property_path)
golden_prop = golden_prim.GetPropertyAtPath(property_path)
# Skips relationship
if hasattr(prop, "GetTypeName"):
self.assertTrue(prop.GetTypeName(), golden_prop.GetTypeName())
self.assertEqual(prop.Get(), golden_prop.Get())
async def test_mesh_prims(self):
"""Test all mesh generator prims."""
for y_axis in [True, False]:
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
axis = UsdGeom.Tokens.y if y_axis else UsdGeom.Tokens.z
UsdGeom.SetStageUpAxis(stage, axis)
for prim_type in omni.kit.primitive.mesh.get_geometry_mesh_prim_list():
result, path = omni.kit.commands.execute("CreateMeshPrim", prim_type=prim_type, above_ground=True)
self.assertTrue(result)
def check_exist():
prim = stage.GetPrimAtPath(path)
attr = prim.GetAttribute(UsdGeom.Tokens.extent)
self.assertTrue(attr and attr.Get())
self.assertTrue(prim)
self.assertTrue(prim.IsA(UsdGeom.Mesh))
self.assertTrue(prim.IsA(UsdGeom.Xformable))
mesh_prim = UsdGeom.Mesh(prim)
points = mesh_prim.GetPointsAttr().Get()
face_indices = mesh_prim.GetFaceVertexIndicesAttr().Get()
normals = mesh_prim.GetNormalsAttr().Get()
face_counts = mesh_prim.GetFaceVertexCountsAttr().Get()
total = 0
for face_count in face_counts:
total += face_count
unique_indices = set(face_indices)
self.assertTrue(len(points) == len(unique_indices))
self.assertTrue(total == len(normals))
self.assertTrue(total == len(face_indices))
def check_does_not_exist():
self.assertFalse(stage.GetPrimAtPath(path))
check_exist()
omni.kit.undo.undo()
check_does_not_exist()
omni.kit.undo.redo()
check_exist()
omni.kit.undo.undo()
check_does_not_exist()
async def test_meshes_creation_from_menu(self):
import omni.kit.ui_test as ui_test
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
for prim_type in omni.kit.primitive.mesh.get_geometry_mesh_prim_list():
await ui_test.menu_click(f"Create/Mesh/{prim_type.capitalize()}")
path = f"/{prim_type}"
def check_exist():
prim = stage.GetPrimAtPath(path)
self.assertTrue(prim)
def check_does_not_exist():
self.assertFalse(stage.GetPrimAtPath(path))
check_exist()
omni.kit.undo.undo()
check_does_not_exist()
omni.kit.undo.redo()
check_exist()
omni.kit.undo.undo()
check_does_not_exist()
async def test_mesh_settings(self):
import omni.kit.ui_test as ui_test
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
await ui_test.menu_click("Create/Mesh/Settings")
window = ui_test.find("Mesh Generation Settings")
self.assertTrue(window)
await window.focus()
primitive_type_combobox = window.find("**/ComboBox[*].name=='primitive_type'")
self.assertTrue(primitive_type_combobox)
create_button = window.find("**/Button[*].name=='create'")
self.assertTrue(create_button)
model = primitive_type_combobox.model
value_model = model.get_item_value_model()
for i, prim_type in enumerate(omni.kit.primitive.mesh.get_geometry_mesh_prim_list()):
value_model.set_value(i)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await create_button.click()
path = f"/{prim_type}"
self.assertTrue(stage.GetPrimAtPath(path))
async def test_actions(self):
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
for prim_type in omni.kit.primitive.mesh.get_geometry_mesh_prim_list():
omni.kit.actions.core.execute_action(
"omni.kit.primitive.mesh",
f"create_mesh_prim_{prim_type.lower()}"
)
path = f"/{prim_type}"
def check_exist():
prim = stage.GetPrimAtPath(path)
self.assertTrue(prim)
def check_does_not_exist():
self.assertFalse(stage.GetPrimAtPath(path))
check_exist()
omni.kit.undo.undo()
check_does_not_exist()
omni.kit.undo.redo()
check_exist()
omni.kit.undo.undo()
check_does_not_exist()
| 11,354 | Python | 38.702797 | 115 | 0.468822 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/animation.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['AnimationEventStream']
import carb
import omni.kit.app
import traceback
from typing import Any, Callable
class AnimationEventStream:
__g_instance = None
@staticmethod
def get_instance():
if AnimationEventStream.__g_instance is None:
AnimationEventStream.__g_instance = [AnimationEventStream(), 1]
else:
AnimationEventStream.__g_instance[1] = AnimationEventStream.__g_instance[1] + 1
return AnimationEventStream.__g_instance[0]
def __init__(self):
self.__event_sub = None
self.__callbacks = {}
def __del__(self):
self.destroy()
def destroy(self):
if AnimationEventStream.__g_instance and AnimationEventStream.__g_instance[0] == self:
AnimationEventStream.__g_instance[1] = AnimationEventStream.__g_instance[1] - 1
if AnimationEventStream.__g_instance[1] > 0:
return
AnimationEventStream.__g_instance = None
self.__event_sub = None
self.__callbacks = {}
def __on_event(self, e: carb.events.IEvent):
dt = e.payload['dt']
for _, callbacks in self.__callbacks.items():
for cb_fn in callbacks:
try:
cb_fn(dt)
except Exception:
carb.log_error(traceback.format_exc())
def __init(self):
if self.__event_sub:
return
self.__event_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(
self.__on_event,
name="omni.kit.manipulator.camera.AnimationEventStream",
order=omni.kit.app.UPDATE_ORDER_PYTHON_ASYNC_FUTURE_END_UPDATE
)
def add_animation(self, animation_fn: Callable, key: Any, remove_others: bool = True):
if remove_others:
self.__callbacks[key] = [animation_fn]
else:
prev_fns = self.__callbacks.get(key) or []
if prev_fns:
prev_fns.append(animation_fn)
else:
self.__callbacks[key] = [animation_fn]
self.__init()
def remove_animation(self, key: Any, animation_fn: Callable = None):
if animation_fn:
prev_fns = self.__callbacks.get(key)
if prev_fns:
try:
prev_fns.remove(animation_fn)
except ValueError:
pass
else:
prev_fns = None
if not prev_fns:
try:
del self.__callbacks[key]
except KeyError:
pass
if not self.__callbacks:
self.__event_sub = None
| 3,114 | Python | 31.447916 | 103 | 0.582531 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/viewport_camera_manipulator.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .model import CameraManipulatorModel, _flatten_matrix, _optional_bool, _optional_int
from .usd_camera_manipulator import (
UsdCameraManipulator,
KIT_COI_ATTRIBUTE,
KIT_LOOKTHROUGH_ATTRIBUTE,
KIT_CAMERA_LOCK_ATTRIBUTE,
_compute_local_transform
)
from omni.ui import scene as sc
from pxr import Usd, UsdGeom, Sdf, Gf
import carb
import math
__all__ = ['ViewportCameraManipulator']
# More advanced implementation for a Viewport that can use picked objects and -look through- arbitrary scene items
#
def _check_for_camera_forwarding(imageable: UsdGeom.Imageable):
# Look for the relationship setup via LookAtCommand
prim = imageable.GetPrim()
look_through = prim.GetRelationship(KIT_LOOKTHROUGH_ATTRIBUTE).GetForwardedTargets()
if look_through:
stage = prim.GetStage()
# Loop over all targets (should really be only one) and see if we can get a valid UsdGeom.Imageable
for target in look_through:
target_prim = stage.GetPrimAtPath(target)
if not target_prim:
continue
target_imageable = UsdGeom.Imageable(target_prim)
if target_imageable:
return target_imageable
carb.log_warn(f'{prim.GetPath()} was set up for look-thorugh, but no valid prim was found for targets: {look_through}')
return imageable
def _setup_center_of_interest(model: sc.AbstractManipulatorModel, prim: Usd.Prim, time: Usd.TimeCode,
object_centric: int = 0, viewport_api=None, mouse=None):
def get_center_of_interest():
coi_attr = prim.GetAttribute(KIT_COI_ATTRIBUTE)
if not coi_attr or not coi_attr.IsAuthored():
# Use UsdGeomCamera.focusDistance is present
distance = 0
fcs_dist = prim.GetAttribute('focusDistance')
if fcs_dist and fcs_dist.IsAuthored():
distance = fcs_dist.Get(time)
# distance 0 is invalid, so create the atribute based on length from origin
if not fcs_dist or distance == 0:
origin = Gf.Matrix4d(*model.get_as_floats('initial_transform')).Transform((0, 0, 0))
distance = origin.GetLength()
coi_attr = prim.CreateAttribute(KIT_COI_ATTRIBUTE, Sdf.ValueTypeNames.Vector3d, True, Sdf.VariabilityUniform)
coi_attr.Set(Gf.Vec3d(0, 0, -distance))
# Make sure COI isn't ridiculously low
coi_val = coi_attr.Get()
length = coi_val.GetLength()
if length < 0.000001 or not math.isfinite(length):
coi_val = Gf.Vec3d(0, 0, -100)
return coi_val
def query_completed(path, pos, *args):
# Reset center-of-interest if there's an obect and world-space position
if path and pos:
# Convert carb value to Gf.Vec3d
pos = Gf.Vec3d(pos.x, pos.y, pos.z)
# Object centric 1 will use the object-center, so replace pos with the UsdGeom.Imageable's (0, 0, 0) coord
if object_centric == 1:
picked_prim = prim.GetStage().GetPrimAtPath(path)
imageable = UsdGeom.Imageable(picked_prim) if picked_prim else None
if imageable:
pos = imageable.ComputeLocalToWorldTransform(time).Transform(Gf.Vec3d(0, 0, 0))
if math.isfinite(pos[0]) and math.isfinite(pos[1]) and math.isfinite(pos[2]):
inv_xform = Gf.Matrix4d(*model.get_as_floats('transform')).GetInverse()
coi = inv_xform.Transform(pos)
model.set_floats('center_of_interest_picked', [pos[0], pos[1], pos[2]])
# Also need to trigger a recomputation of ndc_speed based on our new center of interest
coi_item = model.get_item('center_of_interest')
model.set_floats(coi_item, [coi[0], coi[1], coi[2]])
model._item_changed(coi_item)
# Re-enable all movement that we previouly disabled
model.set_ints('disable_pan', [disable_pan])
model.set_ints('disable_tumble', [disable_tumble])
model.set_ints('disable_look', [disable_look])
model.set_ints('disable_zoom', [disable_zoom])
coi = get_center_of_interest()
model.set_floats('center_of_interest', [coi[0], coi[1], coi[2]])
if object_centric != 0:
# Map the NDC co-ordinates to a viewport's texture-space
mouse, viewport_api = viewport_api.map_ndc_to_texture_pixel(mouse)
if (mouse is None) or (viewport_api is None):
object_centric = 0
if object_centric == 0:
model.set_floats('center_of_interest_picked', [])
return
# Block all movement until the query completes
disable_pan = _optional_bool(model, 'disable_pan')
disable_tumble = _optional_bool(model, 'disable_tumble')
disable_look = _optional_bool(model, 'disable_look')
disable_zoom = _optional_bool(model, 'disable_zoom')
model.set_ints('disable_pan', [1])
model.set_ints('disable_tumble', [1])
model.set_ints('disable_look', [1])
model.set_ints('disable_zoom', [1])
# Start the query
viewport_api.request_query(mouse, query_completed)
class ViewportCameraManipulator(UsdCameraManipulator):
def __init__(self, viewport_api, bindings: dict = None, *args, **kwargs):
super().__init__(bindings, viewport_api.usd_context_name)
self.__viewport_api = viewport_api
# def view_changed(*args):
# return
# from .gesturebase import set_frame_delivered
# set_frame_delivered(True)
# self.__vc_change = viewport_api.subscribe_to_frame_change(view_changed)
def _on_began(self, model: CameraManipulatorModel, mouse):
# We need a viewport and a stage to start. If either are missing disable any further processing.
viewport_api = self.__viewport_api
stage = viewport_api.stage if viewport_api else None
settings = carb.settings.get_settings()
# Store the viewport_id in the model for use later if necessary
model.set_ints('viewport_id', [viewport_api.id if viewport_api else 0])
if not stage:
# TODO: Could we forward this to adjust the viewport_api->omni.scene.ui ?
model.set_ints('disable_tumble', [1])
model.set_ints('disable_look', [1])
model.set_ints('disable_pan', [1])
model.set_ints('disable_zoom', [1])
model.set_ints('disable_fly', [1])
return
cam_path = viewport_api.camera_path
if hasattr(model, '_set_animation_key'):
model._set_animation_key(cam_path)
time = viewport_api.time
cam_prim = stage.GetPrimAtPath(cam_path)
cam_imageable = UsdGeom.Imageable(cam_prim)
camera = UsdGeom.Camera(cam_prim) if cam_imageable else None
if not cam_imageable or not cam_imageable.GetPrim().IsValid():
raise RuntimeError('ViewportCameraManipulator with an invalid UsdGeom.Imageable or Usd.Prim')
# Push the viewport's projection into the model
projection = _flatten_matrix(viewport_api.projection)
model.set_floats('projection', projection)
# Check if we should actaully keep camera at identity and forward our movements to another object
target_imageable = _check_for_camera_forwarding(cam_imageable)
local_xform, parent_xform = _compute_local_transform(target_imageable, time)
model.set_floats('initial_transform', _flatten_matrix(local_xform))
model.set_floats('transform', _flatten_matrix(local_xform))
# Setup the model if the camera is orthographic (where for Usd we must edit apertures)
# We do this before center-of-interest query to get disabled-state pushed into the model
if camera:
orthographic = int(camera.GetProjectionAttr().Get(time) == 'orthographic')
if orthographic:
model.set_floats('initial_aperture', [camera.GetHorizontalApertureAttr().Get(time),
camera.GetVerticalApertureAttr().Get(time)])
else:
orthographic = int(projection[15] == 1 if projection else False)
model.set_floats('initial_aperture', [])
up_axis = UsdGeom.GetStageUpAxis(stage)
if up_axis == UsdGeom.Tokens.x:
up_axis = Gf.Vec3d(1, 0, 0)
elif up_axis == UsdGeom.Tokens.y:
up_axis = Gf.Vec3d(0, 1, 0)
elif up_axis == UsdGeom.Tokens.z:
up_axis = Gf.Vec3d(0, 0, 1)
if not bool(settings.get("exts/omni.kit.manipulator.camera/forceStageUp")):
up_axis = parent_xform.TransformDir(up_axis).GetNormalized()
model.set_floats('up_axis', [up_axis[0], up_axis[1], up_axis[2]])
# Disable undo for implict cameras. This might be better handled with custom meta-data / attribute long term
disable_undo = cam_path.pathString in ['/OmniverseKit_Persp', '/OmniverseKit_Front', '/OmniverseKit_Right', '/OmniverseKit_Top']
model.set_ints('disable_undo', [int(disable_undo)])
# Test whether this camera is locked
cam_lock = cam_prim.GetAttribute(KIT_CAMERA_LOCK_ATTRIBUTE)
if cam_lock and cam_lock.Get():
model.set_ints('disable_tumble', [1])
model.set_ints('disable_look', [1])
model.set_ints('disable_pan', [1])
model.set_ints('disable_zoom', [1])
model.set_ints('disable_fly', [1])
else:
model.set_ints('orthographic', [orthographic])
model.set_ints('disable_tumble', [orthographic])
model.set_ints('disable_look', [orthographic])
model.set_ints('disable_pan', [0])
model.set_ints('disable_zoom', [0])
model.set_ints('disable_fly', [0])
# Extract the camera's center of interest, from a property or world-space query
# model.set_ints('object_centric_movement', [1])
object_centric = settings.get('/exts/omni.kit.manipulator.camera/objectCentric/type') or 0
object_centric = _optional_int(self.model, 'object_centric_movement', object_centric)
_setup_center_of_interest(model, target_imageable.GetPrim(), time, object_centric, viewport_api, mouse)
# Setup the model for command execution on key-framed data
had_transform_at_key = False
if not time.IsDefault():
xformable = UsdGeom.Xformable(target_imageable)
if xformable:
for xformOp in xformable.GetOrderedXformOps():
had_transform_at_key = time in xformOp.GetTimeSamples()
if had_transform_at_key:
break
model.set_ints('had_transform_at_key', [had_transform_at_key])
# Set the pan/zoom speed equivalent to the world space travel of the mouse
model.set_floats('world_speed', [1, 1, 1])
# Make a full drag across the viewport equal to a 180 tumble
uv_space = viewport_api.map_ndc_to_texture((1, 1))[0]
model.set_floats('rotation_speed', [((v * 2.0) - 1.0) for v in uv_space] + [1])
# Tell the USD manipulator the context and prim to operate on
self._set_context(viewport_api.usd_context_name, target_imageable.GetPath())
def destroy(self):
self.__vc_change = None
self.__viewport_api = None
super().destroy()
import omni.kit.app
import time
class ZoomEvents:
__instances = set()
@staticmethod
def get_instance(viewport_api):
instance = None
for inst in ZoomEvents.__instances:
if inst.__viewport_api == viewport_api:
instance = inst
break
if instance is None:
instance = ZoomEvents(viewport_api)
ZoomEvents.__instances.add(instance)
else:
instance.__mark_time()
return instance
def __init__(self, viewport_api):
self.__viewport_api = viewport_api
self.__mouse = [0, 0]
self.__manipulator = ViewportCameraManipulator(viewport_api, bindings={'ZoomGesture': 'LeftButton'})
self.__manipulator.on_build()
self.__zoom_gesture = self.__manipulator._screen.gestures[0]
self.__zoom_gesture._disable_flight()
self.__zoom_gesture.on_began(self.__mouse)
# 1030
if hasattr(omni.kit.app, 'UPDATE_ORDER_PYTHON_ASYNC_FUTURE_END_UPDATE'):
update_order = omni.kit.app.UPDATE_ORDER_PYTHON_ASYNC_FUTURE_END_UPDATE
else:
update_order = 50
self.__event_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(
self.__on_event, name="omni.kit.manipulator.camera.ZoomEvents", order=update_order
)
def update(self, x, y):
self.__mark_time()
coi = Gf.Vec3d(*self.__manipulator.model.get_as_floats('center_of_interest'))
scale = math.log10(max(10, coi.GetLength())) / 40
self.__mouse = (self.__mouse[0] + x * scale, self.__mouse[1] + y * scale)
self.__zoom_gesture.on_changed(self.__mouse)
self.__mark_time()
def __mark_time(self):
self.__last_time = time.time()
def __time_since_last(self):
return time.time() - self.__last_time
def __on_event(self, e: carb.events.IEvent):
delta = self.__time_since_last()
if delta > 0.1:
self.destroy()
def destroy(self):
self.__event_sub = None
self.__zoom_gesture.on_ended()
self.__manipulator.destroy()
try:
ZoomEvents.__instances.remove(self)
except KeyError:
pass
# Helper function to do single a zoom-operation, from a scroll-wheel for example
def _zoom_operation(x, y, viewport_api):
if not viewport_api:
return None
instance = ZoomEvents.get_instance(viewport_api)
instance.update(x, y)
return True
| 14,470 | Python | 43.118902 | 136 | 0.622391 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/__init__.py | # Expose these for easier import via from omni.kit.manipulator.camera import XXX
from .manipulator import SceneViewCameraManipulator, CameraManipulatorBase, adjust_center_of_interest
from .usd_camera_manipulator import UsdCameraManipulator
from .viewport_camera_manipulator import ViewportCameraManipulator
| 308 | Python | 50.499992 | 101 | 0.863636 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/flight_mode.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['FlightModeKeyboard', 'get_keyboard_input']
from .model import CameraManipulatorModel, _accumulate_values, _optional_floats
from omni.ui import scene as sc
import omni.appwindow
from pxr import Gf
import carb
import carb.input
class FlightModeValues:
def __init__(self):
self.__xyz_values = (
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
)
def update(self, i0, i1, value) -> bool:
self.__xyz_values[i0][i1] = value
total = 0
for values in self.__xyz_values:
values[2] = values[1] - values[0]
total += values[2] != 0
return total != 0
@property
def value(self):
return (
self.__xyz_values[0][2],
self.__xyz_values[1][2],
self.__xyz_values[2][2]
)
class FlightModeKeyboard:
__g_char_map = None
@staticmethod
def get_char_map():
if not FlightModeKeyboard.__g_char_map:
key_char_map = {
'w': (2, 0),
's': (2, 1),
'a': (0, 0),
'd': (0, 1),
'q': (1, 0),
'e': (1, 1),
}
carb_key_map = {eval(f'carb.input.KeyboardInput.{ascii_val.upper()}'): index for ascii_val, index in key_char_map.items()}
FlightModeKeyboard.__g_char_map = carb_key_map
for k, v in FlightModeKeyboard.__g_char_map.items():
yield k, v
def __init__(self):
self.__input = None
self.__model = None
self.__stop_events = False
self.__keyboard_sub = None
self.__initial_speed = None
self.__current_adjusted_speed = 1
def init(self, model, iinput, mouse, mouse_button, app_window) -> None:
self.__model = model
if self.__input is None:
self.__input = iinput
self.__keyboard = app_window.get_keyboard()
self.__keyboard_sub = iinput.subscribe_to_keyboard_events(self.__keyboard, self.__on_key)
self.__mouse = mouse
# XXX: This isn't working
# self.__mouse_sub = iinput.subscribe_to_mouse_events(mouse, self.__on_mouse)
# So just query the state on key-down
self.__mouse_button = mouse_button
self.__key_index = {k: v for k, v in FlightModeKeyboard.get_char_map()}
self.__values = FlightModeValues()
# Setup for modifier keys adjusting speed
self.__settings = carb.settings.get_settings()
# Shift or Control can modify flight speed, get the current state
self.__setup_speed_modifiers()
# Need to update all input key states on start
for key, index in self.__key_index.items():
# Read the key and update the value. Update has to occur whether key is down or not as numeric field
# might have text focus; causing carbonite not to deliver __on_key messages
key_val = self.__input.get_keyboard_value(self.__keyboard, key)
self.__values.update(*index, 1 if key_val else 0)
# Record whether a previous invocation had started external events
prev_stop = self.__stop_events
# Test if any interesting key-pair result in a value
key_down = any(self.__values.value)
# If a key is no longer down, it may have not gotten to __on_key subscription if a numeric entry id focused
# In that case there is no more key down so kill any external trigger
if prev_stop and not key_down:
prev_stop = False
self.__model._stop_external_events()
self.__stop_events = key_down or prev_stop
self.__model.set_floats('fly', self.__values.value)
if self.__stop_events:
self.__model._start_external_events(True)
def _cancel(self) -> bool:
return self.__input.get_mouse_value(self.__mouse, self.__mouse_button) == 0 if self.__input else True
@property
def active(self) -> bool:
"""Returns if Flight mode is active or not"""
return bool(self.__stop_events)
def __adjust_speed_modifiers(self, cur_speed_mod: float, prev_speed_mod: float):
# Get the current state from
initial_speed = self.__settings.get('/persistent/app/viewport/camMoveVelocity') or 1
# Undo any previos speed modification based on key state
if prev_speed_mod and prev_speed_mod != 1:
initial_speed /= prev_speed_mod
# Store the unadjusted values for restoration later (camMoveVelocity may change underneath modifiers)
self.__initial_speed = initial_speed
# Set the new speed if it is different
cur_speed = initial_speed * cur_speed_mod
self.__settings.set('/persistent/app/viewport/camMoveVelocity', cur_speed)
def __setup_speed_modifiers(self):
# Default to legacy value of modifying speed by doubling / halving
self.__speed_modifier_amount = self.__settings.get('/exts/omni.kit.manipulator.camera/flightMode/keyModifierAmount')
if not self.__speed_modifier_amount:
return
# Store the current_adjusted_speed as inital_speed
prev_speed_mod = self.__current_adjusted_speed
cur_speed_mod = prev_speed_mod
# Scan the input keys that modify speed and adjust current_adjusted_speed
if self.__input.get_keyboard_value(self.__keyboard, carb.input.KeyboardInput.LEFT_SHIFT):
cur_speed_mod *= self.__speed_modifier_amount
if self.__input.get_keyboard_value(self.__keyboard, carb.input.KeyboardInput.LEFT_CONTROL):
if self.__speed_modifier_amount != 0:
cur_speed_mod /= self.__speed_modifier_amount
# Store new speed into proper place
if prev_speed_mod != cur_speed_mod:
self.__current_adjusted_speed = cur_speed_mod
self.__adjust_speed_modifiers(cur_speed_mod, prev_speed_mod)
def __process_speed_modifier(self, key: carb.input.KeyboardEventType, is_down: bool):
if not self.__speed_modifier_amount:
return
def speed_adjustment(increase: bool):
return self.__speed_modifier_amount if increase else (1 / self.__speed_modifier_amount)
prev_speed_mod = self.__current_adjusted_speed
cur_speed_mod = prev_speed_mod
if key == carb.input.KeyboardInput.LEFT_SHIFT:
cur_speed_mod *= speed_adjustment(is_down)
if key == carb.input.KeyboardInput.LEFT_CONTROL:
cur_speed_mod *= speed_adjustment(not is_down)
if prev_speed_mod != cur_speed_mod:
self.__current_adjusted_speed = cur_speed_mod
self.__adjust_speed_modifiers(cur_speed_mod, prev_speed_mod)
return True
return False
def __on_key(self, e) -> bool:
index, value, speed_changed = None, None, False
event_type = e.type
KeyboardEventType = carb.input.KeyboardEventType
if event_type == KeyboardEventType.KEY_PRESS or event_type == KeyboardEventType.KEY_REPEAT:
index, value = self.__key_index.get(e.input), 1
if event_type == KeyboardEventType.KEY_PRESS:
speed_changed = self.__process_speed_modifier(e.input, True)
elif event_type == KeyboardEventType.KEY_RELEASE:
index, value = self.__key_index.get(e.input), 0
speed_changed = self.__process_speed_modifier(e.input, False)
# If not a navigation key, pass it on to another handler (unless it was a speed-moficiation key).
if not index:
return not speed_changed
canceled = self._cancel()
if canceled:
value = 0
has_data = self.__values.update(*index, value)
if hasattr(self.__model, '_start_external_events'):
if has_data:
self.__stop_events = True
self.__model._start_external_events(True)
elif self.__stop_events:
self.__stop_events = False
self.__model._stop_external_events(True)
self.__model.set_floats('fly', self.__values.value)
# self.__model._item_changed(None)
if canceled:
self.destroy()
return False
def end(self):
self.destroy()
return None
def __del__(self):
self.destroy()
def destroy(self) -> None:
if self.__initial_speed is not None:
self.__settings.set('/persistent/app/viewport/camMoveVelocity', self.__initial_speed)
self.__initial_speed = None
self.__current_adjusted_speed = 1
if self.__model:
self.__model.set_floats('fly', None)
if self.__stop_events:
self.__model._stop_external_events()
if self.__keyboard_sub:
self.__input.unsubscribe_to_keyboard_events(self.__keyboard, self.__keyboard_sub)
self.__keyboard_sub = None
self.__keyboard = None
# if self.__mouse_sub:
# self.__input.unsubscribe_to_mouse_events(self.__mouse, self.__mouse_sub)
# self.__mouse_sub = None
self.__mouse = None
self.__input = None
self.__values = None
self.__key_index = None
def get_keyboard_input(model, walk_through: FlightModeKeyboard = None, end_with_mouse_ended: bool = False, mouse_button=carb.input.MouseInput.RIGHT_BUTTON):
iinput = carb.input.acquire_input_interface()
app_window = omni.appwindow.get_default_app_window()
mouse = app_window.get_mouse()
mouse_value = iinput.get_mouse_value(mouse, mouse_button)
if mouse_value:
if walk_through is None:
walk_through = FlightModeKeyboard()
walk_through.init(model, iinput, mouse, mouse_button, app_window)
elif walk_through and end_with_mouse_ended:
walk_through.destroy()
walk_through = None
return walk_through
| 10,350 | Python | 39.913043 | 156 | 0.604444 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/math.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['TransformAccumulator']
from pxr import Gf
class TransformAccumulator:
def __init__(self, initial_xform: Gf.Matrix4d):
self.__inverse_xform = initial_xform.GetInverse() if initial_xform else None
def get_rotation_axis(self, up_axis: Gf.Vec3d):
if up_axis:
return self.__inverse_xform.TransformDir(up_axis)
else:
return self.__inverse_xform.TransformDir(Gf.Vec3d(0, 1, 0))
def get_translation(self, amount: Gf.Vec3d):
return Gf.Matrix4d().SetTranslate(amount)
def get_tumble(self, degrees: Gf.Vec3d, center_of_interest: Gf.Vec3d, up_axis: Gf.Vec3d):
# Rotate around proper scene axis
rotate_axis = self.get_rotation_axis(up_axis)
# Move to center_of_interest, rotate and move back
# No need for identity, all SetXXX methods will do that for us
translate = Gf.Matrix4d().SetTranslate(-center_of_interest)
# X-Y in ui/mouse are swapped so x-move is rotate around Y, and Y-move is rotate around X
rotate_x = Gf.Matrix4d().SetRotate(Gf.Rotation(Gf.Vec3d(1, 0, 0), degrees[1]))
rotate_y = Gf.Matrix4d().SetRotate(Gf.Rotation(rotate_axis, degrees[0]))
return translate * rotate_x * rotate_y * translate.GetInverse()
def get_look(self, degrees: Gf.Vec3d, up_axis: Gf.Vec3d):
# Rotate around proper scene axis
rotate_axis = self.get_rotation_axis(up_axis)
# X-Y in ui/mouse are swapped so x-move is rotate around Y, and Y-move is rotate around X
rotate_x = Gf.Matrix4d().SetRotate(Gf.Rotation(Gf.Vec3d(1, 0, 0), degrees[1]))
rotate_y = Gf.Matrix4d().SetRotate(Gf.Rotation(rotate_axis, degrees[0]))
return rotate_x * rotate_y
| 2,166 | Python | 44.145832 | 97 | 0.686057 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/usd_camera_manipulator.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .manipulator import CameraManipulatorBase, adjust_center_of_interest
from .model import _optional_bool, _flatten_matrix
from omni.kit import commands, undo
import omni.usd
from pxr import Usd, UsdGeom, Sdf, Tf, Gf
import carb.profiler
import carb.settings
import math
from typing import List
__all__ = ['UsdCameraManipulator']
KIT_COI_ATTRIBUTE = 'omni:kit:centerOfInterest'
KIT_LOOKTHROUGH_ATTRIBUTE = 'omni:kit:viewport:lookThrough:target'
KIT_CAMERA_LOCK_ATTRIBUTE = 'omni:kit:cameraLock'
def _get_context_stage(usd_context_name: str):
return omni.usd.get_context(usd_context_name).get_stage()
def _compute_local_transform(imageable: UsdGeom.Imageable, time: Usd.TimeCode):
# xformable = UsdGeom.Xformable(imageable)
# if xformable:
# return xformable.GetLocalTransformation(time)
world_xform = imageable.ComputeLocalToWorldTransform(time)
parent_xform = imageable.ComputeParentToWorldTransform(time)
parent_ixform = parent_xform.GetInverse()
return (world_xform * parent_ixform), parent_ixform
class SRTDecomposer:
def __init__(self, prim: Usd.Prim, time: Usd.TimeCode = None):
if time is None:
time = Usd.TimeCode.Default()
xform_srt = omni.usd.get_local_transform_SRT(prim, time)
xform_srt = (Gf.Vec3d(xform_srt[0]), Gf.Vec3d(xform_srt[1]), Gf.Vec3i(xform_srt[2]), Gf.Vec3d(xform_srt[3]))
self.__start_scale, self.__start_rotation_euler, self.__start_rotation_order, self.__start_translation = xform_srt
self.__current_scale, self.__current_rotation_euler, self.__current_rotation_order, self.__current_translation = xform_srt
@staticmethod
def __repeat(t: float, length: float) -> float:
return t - (math.floor(t / length) * length)
@staticmethod
def __generate_compatible_euler_angles(euler: Gf.Vec3d, rotation_order: Gf.Vec3i) -> List[Gf.Vec3d]:
equal_eulers = [euler]
mid_order = rotation_order[1]
equal = Gf.Vec3d()
for i in range(3):
if i == mid_order:
equal[i] = 180 - euler[i]
else:
equal[i] = euler[i] + 180
equal_eulers.append(equal)
for i in range(3):
equal[i] -= 360
equal_eulers.append(equal)
return equal_eulers
@staticmethod
def __find_best_euler_angles(old_rot_vec: Gf.Vec3d, new_rot_vec: Gf.Vec3d, rotation_order: Gf.Vec3i) -> Gf.Vec3d:
equal_eulers = SRTDecomposer.__generate_compatible_euler_angles(new_rot_vec, rotation_order)
nearest_euler = None
for euler in equal_eulers:
for i in range(3):
euler[i] = SRTDecomposer.__repeat(euler[i] - old_rot_vec[i] + 180.0, 360.0) + old_rot_vec[i] - 180.0
if nearest_euler is None:
nearest_euler = euler
else:
distance_1 = (nearest_euler - old_rot_vec).GetLength()
distance_2 = (euler - old_rot_vec).GetLength()
if distance_2 < distance_1:
nearest_euler = euler
return nearest_euler
def update(self, xform: Gf.Matrix4d):
# Extract new translation
self.__current_translation = xform.ExtractTranslation()
# Extract new euler rotation
ro = self.__start_rotation_order
old_s_mtx = Gf.Matrix4d().SetScale(self.__start_scale)
old_t_mtx = Gf.Matrix4d().SetTranslate(self.__start_translation)
rot_new = (old_s_mtx.GetInverse() * xform * old_t_mtx.GetInverse()).ExtractRotation()
axes = [Gf.Vec3d.XAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.ZAxis()]
decomp_rot = rot_new.Decompose(axes[ro[2]], axes[ro[1]], axes[ro[0]])
index_order = Gf.Vec3i()
for i in range(3):
index_order[ro[i]] = 2 - i
new_rot_vec = Gf.Vec3d(decomp_rot[index_order[0]], decomp_rot[index_order[1]], decomp_rot[index_order[2]])
new_rot_vec = self.__find_best_euler_angles(self.__start_rotation_euler, new_rot_vec, self.__start_rotation_order)
self.__current_rotation_euler = new_rot_vec
# Because this is a camera manipulation, we purposefully ignore scale and rotation order changes
# They remain constant across the interaction.
return self
@property
def translation(self):
return self.__current_translation
@property
def rotation(self):
return self.__current_rotation_euler
@property
def start_translation(self):
return self.__start_translation
@property
def start_rotation(self):
self.__start_rotation_euler
class ExternalUsdCameraChange():
def __init__(self, time: Usd.TimeCode):
self.__tf_listener = None
self.__usd_context_name, self.__prim_path = None, None
self.__updates_paused = False
self.__kill_external_animation = None
self.__time = time
def __del__(self):
self.destroy()
def update(self, model, usd_context_name: str, prim_path: Sdf.Path):
self.__kill_external_animation = getattr(model, '_kill_external_animation', None)
if self.__kill_external_animation is None:
return
self.__prim_path = prim_path
if usd_context_name != self.__usd_context_name:
self.__usd_context_name = usd_context_name
if self.__tf_listener:
self.__tf_listener.Revoke()
self.__tf_listener = None
if not self.__tf_listener:
try:
stage = _get_context_stage(self.__usd_context_name)
if stage:
self.__tf_listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self.__object_changed, stage)
except ImportError:
pass
def destroy(self):
if self.__tf_listener:
self.__tf_listener.Revoke()
self.__tf_listener = None
self.__usd_context_name, self.__prim_path = None, None
self.__kill_external_animation = None
@carb.profiler.profile
def __object_changed(self, notice, sender):
if self.__updates_paused:
return
if not sender or sender != _get_context_stage(self.__usd_context_name):
return
for p in notice.GetChangedInfoOnlyPaths():
if (p.IsPropertyPath()
and p.GetPrimPath() == self.__prim_path
and UsdGeom.Xformable.IsTransformationAffectedByAttrNamed(p.name)):
xformable = UsdGeom.Xformable(sender.GetPrimAtPath(self.__prim_path))
xform = _flatten_matrix(xformable.GetLocalTransformation(self.__time)) if xformable else None
self.__kill_external_animation(True, xform)
break
def pause_tracking(self):
self.__updates_paused = True
def start_tracking(self):
self.__updates_paused = False
# Base Usd implementation that will set model back to Usd data via kit-commands
class UsdCameraManipulator(CameraManipulatorBase):
def __init__(self, bindings: dict = None, usd_context_name: str = '', prim_path: Sdf.Path = None, *args, **kwargs):
self.__usd_context_name, self.__prim_path = None, None
self.__external_change_tracker = None
super().__init__(bindings, *args, **kwargs)
self._set_context(usd_context_name, prim_path)
def _set_context(self, usd_context_name: str, prim_path: Sdf.Path):
self.__usd_context_name = usd_context_name
self.__prim_path = prim_path
self.__srt_decompose = None
if prim_path and carb.settings.get_settings().get('/persistent/app/camera/controllerUseSRT'):
stage = _get_context_stage(self.__usd_context_name)
if stage:
prim = stage.GetPrimAtPath(prim_path)
if prim:
model = self.model
time = model.get_as_floats('time') if model else None
time = Usd.TimeCode(time[0]) if time else Usd.TimeCode.Default()
self.__srt_decompose = SRTDecomposer(prim)
def _on_began(self, model, *args, **kwargs):
super()._on_began(model, *args, **kwargs)
stage = _get_context_stage(self.__usd_context_name)
if not stage:
# TODO: Could we forward this to adjust the viewport_api->omni.scene.ui ?
model.set_ints('disable_tumble', [1])
model.set_ints('disable_look', [1])
model.set_ints('disable_pan', [1])
model.set_ints('disable_zoom', [1])
model.set_ints('disable_fly', [1])
return
cam_prim = stage.GetPrimAtPath(self.__prim_path)
cam_imageable = UsdGeom.Imageable(cam_prim) if bool(cam_prim) else None
if not cam_imageable or not cam_imageable.GetPrim().IsValid():
raise RuntimeError('ViewportCameraManipulator with an invalid UsdGeom.Imageable or Usd.Prim')
# Check if we should actaully keep camera at identity and forward our movements to another object
local_xform, parent_xform = _compute_local_transform(cam_imageable, Usd.TimeCode.Default())
model.set_floats('initial_transform', _flatten_matrix(local_xform))
model.set_floats('transform', _flatten_matrix(local_xform))
up_axis = UsdGeom.GetStageUpAxis(stage)
if up_axis == UsdGeom.Tokens.x:
up_axis = Gf.Vec3d(1, 0, 0)
elif up_axis == UsdGeom.Tokens.y:
up_axis = Gf.Vec3d(0, 1, 0)
elif up_axis == UsdGeom.Tokens.z:
up_axis = Gf.Vec3d(0, 0, 1)
if not bool(carb.settings.get_settings().get("exts/omni.kit.manipulator.camera/forceStageUp")):
up_axis = parent_xform.TransformDir(up_axis).GetNormalized()
model.set_floats('up_axis', [up_axis[0], up_axis[1], up_axis[2]])
@carb.profiler.profile
def __vp1_cooperation(self, prim_path, time, usd_context_name: str, center_of_interest_end):
try:
from omni.kit import viewport_legacy
vp1_iface = viewport_legacy.get_viewport_interface()
final_transform, coi_world, pos_world, cam_path = None, None, None, None
for vp1_handle in vp1_iface.get_instance_list():
vp1_window = vp1_iface.get_viewport_window(vp1_handle)
if not vp1_window or (vp1_window.get_usd_context_name() != usd_context_name):
continue
if not final_transform:
# Save the path's string represnetation
cam_path = prim_path.pathString
# We need to calculate world-space transform for VP-1, important for nested camera's
# TODO: UsdBBoxCache.ComputeWorldBound in compute_path_world_transform doesn't seem to work for non-geometry:
# final_transform = omni.usd.get_context(usd_context_name).compute_path_world_transform(cam_path)
# final_transform = Gf.Matrix4d(*final_transform)
final_transform = UsdGeom.Imageable(prim_path).ComputeLocalToWorldTransform(time)
# center_of_interest_end is adjusted and returned for VP-2
center_of_interest_end = Gf.Vec3d(0, 0, -center_of_interest_end.GetLength())
# Pass world center-of-interest to VP-1 set_camera_target
coi_world = final_transform.Transform(center_of_interest_end)
# Pass world position to VP-1 set_camera_position
pos_world = final_transform.Transform(Gf.Vec3d(0, 0, 0))
# False for first call to set target only, True for second to trigger radius re-calculation
# This isn't particuarly efficient; but 'has to be' for now due to some Viewport-1 internals
vp1_window.set_camera_target(cam_path, coi_world[0], coi_world[1], coi_world[2], False)
vp1_window.set_camera_position(cam_path, pos_world[0], pos_world[1], pos_world[2], True)
except Exception:
pass
return center_of_interest_end
@carb.profiler.profile
def on_model_updated(self, item):
# Handle case of inertia being applied though a new stage-open
usd_context_name = self.__usd_context_name
if usd_context_name is None or _get_context_stage(usd_context_name) is None:
return
model = self.model
prim_path = self.__prim_path
time = model.get_as_floats('time')
time = Usd.TimeCode(time[0]) if time else Usd.TimeCode.Default()
undoable = False
def run_command(cmd_name, **kwargs):
carb.profiler.begin(1, cmd_name)
if undoable:
commands.execute(cmd_name, **kwargs)
else:
commands.create(cmd_name, **kwargs).do()
carb.profiler.end(1)
try:
if item == model.get_item('transform'):
if self.__external_change_tracker:
self.__external_change_tracker.update(model, usd_context_name, prim_path)
self.__external_change_tracker.pause_tracking()
# We are undoable on the final event if undo hasn't been disabled on the model
undoable = _optional_bool(self.model, 'interaction_ended') and not _optional_bool(self.model, 'disable_undo')
if undoable:
undo.begin_group()
final_transform = Gf.Matrix4d(*model.get_as_floats('transform'))
initial_transform = model.get_as_floats('initial_transform')
initial_transform = Gf.Matrix4d(*initial_transform) if initial_transform else initial_transform
had_transform_at_key = _optional_bool(self.model, 'had_transform_at_key')
if self.__srt_decompose:
srt_deompose = self.__srt_decompose.update(final_transform)
run_command(
'TransformPrimSRTCommand',
path=prim_path,
new_translation=srt_deompose.translation,
new_rotation_euler=srt_deompose.rotation,
# new_scale=srt_deompose.scale,
# new_rotation_order=srt_deompose.rotation_order,
old_translation=srt_deompose.start_translation,
old_rotation_euler=srt_deompose.start_rotation,
# old_rotation_order=srt_deompose.start_rotation_order,
# old_scale=srt_deompose.start_scale,
time_code=time,
had_transform_at_key=had_transform_at_key,
usd_context_name=usd_context_name
)
else:
run_command(
'TransformPrimCommand',
path=prim_path,
new_transform_matrix=final_transform,
old_transform_matrix=initial_transform,
time_code=time,
had_transform_at_key=had_transform_at_key,
usd_context_name=usd_context_name
)
center_of_interest_start, center_of_interest_end = adjust_center_of_interest(model, initial_transform, final_transform)
if center_of_interest_start and center_of_interest_end:
# See if we need to adjust center-of-interest to cooperate with Viewport-1, which can only do a 1 dimensional version
center_of_interest_end = self.__vp1_cooperation(prim_path, time, usd_context_name, center_of_interest_end)
run_command(
'ChangePropertyCommand',
prop_path=prim_path.AppendProperty(KIT_COI_ATTRIBUTE),
value=center_of_interest_end,
prev=center_of_interest_start,
usd_context_name=usd_context_name
)
elif item == model.get_item('current_aperture'):
# We are undoable on the final event if undo hasn't been disabled on the model
undoable = _optional_bool(self.model, 'interaction_ended') and not _optional_bool(self.model, 'disable_undo')
if undoable:
undo.begin_group()
initial_aperture = model.get_as_floats('initial_aperture')
current_aperture = model.get_as_floats('current_aperture')
prop_names = ('horizontalAperture', 'verticalAperture')
for initial_value, current_value, prop_name in zip(initial_aperture, current_aperture, prop_names):
run_command(
'ChangePropertyCommand',
prop_path=prim_path.AppendProperty(prop_name),
value=current_value,
prev=initial_value,
timecode=time,
usd_context_name=usd_context_name
)
elif item == model.get_item('interaction_animating'):
interaction_animating = model.get_as_ints(item)
if interaction_animating and interaction_animating[0]:
if not self.__external_change_tracker:
self.__external_change_tracker = ExternalUsdCameraChange(time)
self.__external_change_tracker.update(model, usd_context_name, prim_path)
self.__external_change_tracker.pause_tracking()
elif self.__external_change_tracker:
self.__external_change_tracker.destroy()
self.__external_change_tracker = None
finally:
if undoable:
undo.end_group()
if self.__external_change_tracker:
self.__external_change_tracker.start_tracking()
| 18,397 | Python | 44.539604 | 137 | 0.594717 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/model.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['CameraManipulatorModel']
from omni.ui import scene as sc
from pxr import Gf
from typing import Any, Callable, List, Sequence, Union
from .math import TransformAccumulator
from .animation import AnimationEventStream
import time
import carb.profiler
import carb.settings
ALMOST_ZERO = 1.e-4
def _flatten_matrix(matrix: Gf.Matrix4d):
return [matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],
matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],
matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3],
matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]]
def _optional_floats(model: sc.AbstractManipulatorModel, item: str, default_value: Sequence[float] = None):
item = model.get_item(item)
if item:
values = model.get_as_floats(item)
if values:
return values
return default_value
def _optional_float(model: sc.AbstractManipulatorModel, item: str, default_value: float = 0):
item = model.get_item(item)
if item:
values = model.get_as_floats(item)
if values:
return values[0]
return default_value
def _optional_int(model: sc.AbstractManipulatorModel, item: str, default_value: int = 0):
item = model.get_item(item)
if item:
values = model.get_as_ints(item)
if values:
return values[0]
return default_value
def _optional_bool(model: sc.AbstractManipulatorModel, item: str, default_value: bool = False):
return _optional_int(model, item, default_value)
def _accumulate_values(model: sc.AbstractManipulatorModel, name: str, x: float, y: float, z: float):
item = model.get_item(name)
if item:
values = model.get_as_floats(item)
model.set_floats(item, [values[0] + x, values[1] + y, values[2] + z] if values else [x, y, z])
return item
def _scalar_or_vector(value: Sequence[float]):
acceleration_len = len(value)
if acceleration_len == 1:
return Gf.Vec3d(value[0], value[0], value[0])
if acceleration_len == 2:
return Gf.Vec3d(value[0], value[1], 1)
return Gf.Vec3d(value[0], value[1], value[2])
class ModelState:
def __reduce_value(self, vec: Gf.Vec3d):
if vec and (vec[0] == 0 and vec[1] == 0 and vec[2] == 0):
return None
return vec
def __expand_value(self, vec: Gf.Vec3d, alpha: float):
if vec:
vec = tuple(v * alpha for v in vec)
if vec[0] != 0 or vec[1] != 0 or vec[2] != 0:
return vec
return None
def __init__(self, tumble: Gf.Vec3d = None, look: Gf.Vec3d = None, move: Gf.Vec3d = None, fly: Gf.Vec3d = None):
self.__tumble = self.__reduce_value(tumble)
self.__look = self.__reduce_value(look)
self.__move = self.__reduce_value(move)
self.__fly = self.__reduce_value(fly)
def any_values(self):
return self.__tumble or self.__look or self.__move or self.__fly
def apply_alpha(self, alpha: float):
return (self.__expand_value(self.__tumble, alpha),
self.__expand_value(self.__look, alpha),
self.__expand_value(self.__move, alpha),
self.__expand_value(self.__fly, alpha))
@property
def tumble(self):
return self.__tumble
@property
def look(self):
return self.__look
@property
def move(self):
return self.__move
@property
def fly(self):
return self.__fly
class Velocity:
def __init__(self, acceleration: Sequence[float], dampening: Sequence[float] = (10,), clamp_dt: float = 0.15):
self.__velocity = Gf.Vec3d(0, 0, 0)
self.__acceleration_rate = _scalar_or_vector(acceleration)
self.__dampening = _scalar_or_vector(dampening)
self.__clamp_dt = clamp_dt
def apply(self, value: Gf.Vec3d, dt: float, alpha: float = 1):
### XXX: We're not locked to anything and event can come in spuriously
### So clamp the max delta-time to a value (if this is to high, it can introduces lag)
if (dt > 0) and (dt > self.__clamp_dt):
dt = self.__clamp_dt
if value:
acceleration = Gf.CompMult(value, self.__acceleration_rate) * alpha
self.__velocity += acceleration * dt
damp_factor = tuple(max(min(v * dt, 0.75), 0) for v in self.__dampening)
self.__velocity += Gf.CompMult(-self.__velocity, Gf.Vec3d(*damp_factor))
if Gf.Dot(self.__velocity, self.__velocity) < ALMOST_ZERO:
self.__velocity = Gf.Vec3d(0, 0, 0)
return self.__velocity * dt
@staticmethod
def create(model: sc.AbstractManipulatorModel, mode: str, clamp_dt: float = 0.15):
acceleration = _optional_floats(model, f'{mode}_acceleration')
if acceleration is None:
return None
dampening = _optional_floats(model, f'{mode}_dampening')
return Velocity(acceleration, dampening or (10, 10, 10), clamp_dt)
class Decay:
def __init__(self):
pass
def apply(self, value: Gf.Vec3d, dt: float, alpha: float = 1):
return value * alpha if value else None
class CameraManipulatorModel(sc.AbstractManipulatorModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__settings = carb.settings.get_settings()
self.__items = {
# 'view': (sc.AbstractManipulatorItem(), 16),
'projection': (sc.AbstractManipulatorItem(), 16),
'transform': (sc.AbstractManipulatorItem(), 16),
'orthographic': (sc.AbstractManipulatorItem(), 1),
'center_of_interest': (sc.AbstractManipulatorItem(), 3),
# Accumulated movement
'move': (sc.AbstractManipulatorItem(), 3),
'tumble': (sc.AbstractManipulatorItem(), 3),
'look': (sc.AbstractManipulatorItem(), 3),
'fly': (sc.AbstractManipulatorItem(), 3),
# Optional speed for world (pan, truck) and rotation (tumble, look) operation
# Can be set individually for x, y, z or as a scalar
'world_speed': (sc.AbstractManipulatorItem(), (3, 1)),
'move_speed': (sc.AbstractManipulatorItem(), (3, 1)),
'rotation_speed': (sc.AbstractManipulatorItem(), (3, 1)),
'tumble_speed': (sc.AbstractManipulatorItem(), (3, 1)),
'look_speed': (sc.AbstractManipulatorItem(), (3, 1)),
'fly_speed': (sc.AbstractManipulatorItem(), (3, 1)),
# Inertia enabled, and amoint of second to apply it for
'inertia_enabled': (sc.AbstractManipulatorItem(), 1),
'inertia_seconds': (sc.AbstractManipulatorItem(), 1),
# Power of ineratia decay (for an ease-out) 0 and 1 are linear
'inertia_decay': (sc.AbstractManipulatorItem(), 1),
# Acceleration and dampening values
'tumble_acceleration': (sc.AbstractManipulatorItem(), (3, 1)),
'look_acceleration': (sc.AbstractManipulatorItem(), (3, 1)),
'move_acceleration': (sc.AbstractManipulatorItem(), (3, 1)),
'fly_acceleration': (sc.AbstractManipulatorItem(), (3, 1)),
'tumble_dampening': (sc.AbstractManipulatorItem(), (3, 1)),
'look_dampening': (sc.AbstractManipulatorItem(), (3, 1)),
'move_dampening': (sc.AbstractManipulatorItem(), (3, 1)),
'fly_dampening': (sc.AbstractManipulatorItem(), (3, 1)),
'fly_mode_lock_view': (sc.AbstractManipulatorItem(), 1),
# Decimal precision of rotation operations
'rotation_precision': (sc.AbstractManipulatorItem(), 1),
# Mapping of units from input to world
'ndc_scale': (sc.AbstractManipulatorItem(), 3),
# Optional int-as-bool items
'disable_pan': (sc.AbstractManipulatorItem(), 1),
'disable_tumble': (sc.AbstractManipulatorItem(), 1),
'disable_look': (sc.AbstractManipulatorItem(), 1),
'disable_zoom': (sc.AbstractManipulatorItem(), 1),
'disable_fly': (sc.AbstractManipulatorItem(), 1),
'disable_undo': (sc.AbstractManipulatorItem(), 1),
'object_centric_movement': (sc.AbstractManipulatorItem(), 1),
'viewport_id': (sc.AbstractManipulatorItem(), 1),
# USD specific concepts
'up_axis': (sc.AbstractManipulatorItem(), 3),
'current_aperture': (sc.AbstractManipulatorItem(), 2),
'initial_aperture': (sc.AbstractManipulatorItem(), 2),
'had_transform_at_key': (sc.AbstractManipulatorItem(), 1),
'time': (sc.AbstractManipulatorItem(), 1),
# Internal signal for final application of the changes, use disable_undo for user-control
'interaction_ended': (sc.AbstractManipulatorItem(), 1), # Signal that undo should be applied
'interaction_active': (sc.AbstractManipulatorItem(), 1), # Signal that a gesture is manipualting camera
'interaction_animating': (sc.AbstractManipulatorItem(), 1), # Signal that an animation is manipulating camera
'center_of_interest_start': (sc.AbstractManipulatorItem(), 3),
'center_of_interest_picked': (sc.AbstractManipulatorItem(), 3),
'adjust_center_of_interest': (sc.AbstractManipulatorItem(), 1),
'initial_transform': (sc.AbstractManipulatorItem(), 16),
}
self.__values = {item: [] for item, _ in self.__items.values()}
self.__values[self.__items.get('look_speed')[0]] = [1, 0.5]
self.__values[self.__items.get('fly_speed')[0]] = [1]
self.__values[self.__items.get('inertia_seconds')[0]] = [0.5]
self.__values[self.__items.get('inertia_enabled')[0]] = [0]
# self.__values[self.__items.get('interaction_active')[0]] = [0]
# self.__values[self.__items.get('interaction_animating')[0]] = [0]
self.__settings_changed_subs = []
def read_inertia_setting(mode: str, setting_scale: float):
global_speed_key = f'/persistent/exts/omni.kit.manipulator.camera/{mode}Speed'
subscribe = self.__settings.subscribe_to_tree_change_events
self.__settings_changed_subs.append(
subscribe(global_speed_key,
lambda *args, **kwargs: self.__speed_setting_changed(*args, **kwargs,
mode=mode, setting_scale=setting_scale)),
)
self.__speed_setting_changed(None, None, carb.settings.ChangeEventType.CHANGED, mode, setting_scale)
accel = self.__settings.get(f'/exts/omni.kit.manipulator.camera/{mode}Acceleration')
damp = self.__settings.get(f'/exts/omni.kit.manipulator.camera/{mode}Dampening')
if accel is None or damp is None:
if accel is None and damp is not None:
pass
elif damp is None and accel is not None:
pass
return
self.__values[self.__items.get(f'{mode}_acceleration')[0]] = [accel]
self.__values[self.__items.get(f'{mode}_dampening')[0]] = [damp]
read_inertia_setting('fly', 1)
read_inertia_setting('look', 180)
read_inertia_setting('move', 1)
read_inertia_setting('tumble', 360)
self.__settings_changed_subs.append(
self.__settings.subscribe_to_node_change_events('/persistent/exts/omni.kit.manipulator.camera/flyViewLock',
self.__fly_mode_lock_view_changed)
)
self.__fly_mode_lock_view_changed(None, carb.settings.ChangeEventType.CHANGED)
self.__animation_key = id(self)
self.__flight_inertia_active = False
self.__last_applied = None
# Faster access for key-values looked up during animation
self.__move = self.__items.get('move')[0]
self.__tumble = self.__items.get('tumble')[0]
self.__look = self.__items.get('look')[0]
self.__fly = self.__items.get('fly')[0]
self.__transform = self.__items.get('transform')[0]
self.__projection = self.__items.get('projection')[0]
self.__center_of_interest = self.__items.get('center_of_interest')[0]
self.__adjust_center_of_interest = self.__items.get('adjust_center_of_interest')[0]
self.__inertia_enabled = self.__items.get('inertia_enabled')[0]
self.__inertia_seconds = self.__items.get('inertia_seconds')[0]
self.__tumble_velocity = None
self.__look_velocity = None
self.__move_velocity = None
self.__fly_velocity = None
self.__intertia_state = None
self.__anim_stream = None
self.__anim_stopped = 0
self.__mode = None
def __speed_setting_changed(self, tree_item: carb.dictionary.Item, changed_item: carb.dictionary.Item,
event_type: carb.settings.ChangeEventType, mode: str, setting_scale: float = 1):
if tree_item is None:
speed = self.__settings.get(f'/persistent/exts/omni.kit.manipulator.camera/{mode}Speed')
else:
speed = tree_item.get_dict()
if speed:
if (not isinstance(speed, tuple)) and (not isinstance(speed, list)):
speed = [speed]
self.__values[self.__items.get(f'{mode}_speed')[0]] = [float(x) / setting_scale for x in speed]
def __fly_mode_lock_view_changed(self, changed_item: carb.dictionary.Item, event_type: carb.settings.ChangeEventType):
model_key = self.__items.get('fly_mode_lock_view')[0]
setting_key = '/persistent/exts/omni.kit.manipulator.camera/flyViewLock'
self.__values[model_key] = [self.__settings.get(setting_key)]
def __del__(self):
self.destroy()
def destroy(self):
self.__destroy_animation()
if self.__settings and self.__settings_changed_subs:
for subscription in self.__settings_changed_subs:
self.__settings.unsubscribe_to_change_events(subscription)
self.__settings_changed_subs = None
self.__settings = None
def __destroy_animation(self):
if self.__anim_stream:
self.__anim_stream.destroy()
self.__anim_stream = None
self.__mark_animating(0)
def __validate_arguments(self, name: Union[str, sc.AbstractManipulatorItem],
values: Sequence[Union[int, float]] = None) -> sc.AbstractManipulatorItem:
if isinstance(name, sc.AbstractManipulatorItem):
return name
item, expected_len = self.__items.get(name, (None, None))
if item is None:
raise KeyError(f"CameraManipulatorModel doesn't understand values of {name}")
if values and (len(values) != expected_len):
if (not isinstance(expected_len, tuple)) or (not len(values) in expected_len):
raise ValueError(f"CameraManipulatorModel {name} takes {expected_len} values, got {len(values)}")
return item
def get_item(self, name: str) -> sc.AbstractManipulatorItem():
return self.__items.get(name, (None, None))[0]
def set_ints(self, item: Union[str, sc.AbstractManipulatorItem], values: Sequence[int]):
item = self.__validate_arguments(item, values)
self.__values[item] = values
def set_floats(self, item: Union[str, sc.AbstractManipulatorItem], values: Sequence[int]):
item = self.__validate_arguments(item, values)
self.__values[item] = values
def get_as_ints(self, item: Union[str, sc.AbstractManipulatorItem]) -> List[int]:
item = self.__validate_arguments(item)
return self.__values[item]
def get_as_floats(self, item: Union[str, sc.AbstractManipulatorItem]) -> List[float]:
item = self.__validate_arguments(item)
return self.__values[item]
@carb.profiler.profile
def _item_changed(self, item: Union[str, sc.AbstractManipulatorItem], delta_time: float = None, alpha: float = None):
# item == None is the signal to push all model values into a final matrix at 'transform'
if item is not None:
if not isinstance(item, sc.AbstractManipulatorItem):
item = self.__items.get(item)
item = item[0] if item else None
# Either of these adjust the pixel-to-world mapping
if item == self.__center_of_interest or item == self.__projection:
self.calculate_pixel_to_world(Gf.Vec3d(self.get_as_floats(self.__center_of_interest)))
super()._item_changed(item)
return
if self.__anim_stream and delta_time is None:
# If this is the end of an interaction (mouse up), return and let animation/inertia continue as is.
if _optional_int(self, 'interaction_ended', 0) or (self.__intertia_state is None):
return
# If inertia is active, look values should be passed through; so as camera is drifting the look-rotation
# is still applied. If there is no look applied, then inertia is killed for any other movement.
look = self.get_as_floats(self.__look) if self.__flight_inertia_active else None
if look:
# Destroy the look-velocity correction; otherwise look wil lag as camera drifts through inertia
self.__look_velocity = None
else:
self._kill_external_animation(False)
return
tumble, look, move, fly = None, None, None, None
if item is None or item == self.__tumble:
tumble = self.get_as_floats(self.__tumble)
if tumble:
tumble = Gf.Vec3d(*tumble)
self.set_floats(self.__tumble, None)
if item is None or item == self.__look:
look = self.get_as_floats(self.__look)
if look:
look = Gf.Vec3d(*look)
self.set_floats(self.__look, None)
if item is None or item == self.__move:
move = self.get_as_floats(self.__move)
if move:
move = Gf.Vec3d(*move)
self.set_floats(self.__move, None)
if item is None or item == self.__fly:
fly = self.get_as_floats(self.__fly)
if fly:
fly = Gf.Vec3d(*fly)
fly_speed = _optional_floats(self, 'fly_speed')
if fly_speed:
if len(fly_speed) == 1:
fly_speed = Gf.Vec3d(fly_speed[0], fly_speed[0], fly_speed[0])
else:
fly_speed = Gf.Vec3d(*fly_speed)
# Flight speed is multiplied by 5 for VP-1 compatability
fly = Gf.CompMult(fly, fly_speed * 5)
self.__last_applied = ModelState(tumble, look, move, fly)
if (delta_time is not None) or self.__last_applied.any_values():
self._apply_state(self.__last_applied, delta_time, alpha)
else:
super()._item_changed(item)
def calculate_pixel_to_world(self, pos):
projection = Gf.Matrix4d(*self.get_as_floats(self.__projection))
top_left, bot_right = self._calculate_pixel_to_world(pos, projection, projection.GetInverse())
x = top_left[0] - bot_right[0]
y = top_left[1] - bot_right[1]
# For NDC-z we don't want to use the clip range which could be huge
# So avergae the X-Y scales instead
self.set_floats('ndc_scale', [x, y, (x + y) * 0.5])
def _calculate_pixel_to_world(self, pos, projection, inv_projection):
ndc = projection.Transform(pos)
top_left = inv_projection.Transform(Gf.Vec3d(-1, -1, ndc[2]))
bot_right = inv_projection.Transform(Gf.Vec3d(1, 1, ndc[2]))
return (top_left, bot_right)
def _set_animation_key(self, key: str):
self.__animation_key = key
def _start_external_events(self, flight_mode: bool = False):
# If flight mode is already doing inertia, do nothing.
# This is for the case where right-click for WASD navigation end with a mouse up and global inertia is enabled.
if self.__flight_inertia_active and not flight_mode:
return False
# Quick check that inertia is enabled for any mode other than flight
if not flight_mode:
inertia_modes = self.__settings.get('/exts/omni.kit.manipulator.camera/inertiaModesEnabled')
len_inertia_enabled = len(inertia_modes) if inertia_modes else 0
if len_inertia_enabled == 0:
return
if len_inertia_enabled == 1:
self.__inertia_modes = [inertia_modes[0], 0, 0, 0]
elif len_inertia_enabled == 2:
self.__inertia_modes = [inertia_modes[0], inertia_modes[1], 0, 0]
elif len_inertia_enabled == 3:
self.__inertia_modes = [inertia_modes[0], inertia_modes[1], inertia_modes[2], 0]
else:
self.__inertia_modes = inertia_modes
else:
self.__inertia_modes = [1, 0, 1, 0]
# Setup the animation state
self.__anim_stopped = 0
self.__intertia_state = None
self.__flight_inertia_active = flight_mode
# Pull more infor from inertai settings fro what is to be created
create_tumble = self.__inertia_modes[1]
create_look = flight_mode or self.__inertia_modes[2]
create_move = self.__inertia_modes[3]
create_fly = flight_mode
if self.__anim_stream:
# Handle case where key was down, then lifted, then pushed again by recreating look_velocity / flight correction.
create_tumble = create_tumble and not self.__tumble_velocity
create_look = create_look and not self.__look_velocity
create_move = create_move and not self.__move_velocity
create_fly = False
clamp_dt = self.__settings.get('/ext/omni.kit.manipulator.camera/clampUpdates') or 0.15
if create_look:
self.__look_velocity = Velocity.create(self, 'look', clamp_dt)
if create_tumble:
self.__tumble_velocity = Velocity.create(self, 'tumble', clamp_dt)
if create_move:
self.__move_velocity = Velocity.create(self, 'move', clamp_dt)
if create_fly:
self.__fly_velocity = Velocity.create(self, 'fly', clamp_dt)
# If any velocities are valid, then setup an animation to apply it.
if self.__tumble_velocity or self.__look_velocity or self.__move_velocity or self.__fly_velocity:
# Only set up the animation in flight-mode, let _stop_external_events set it up otherwise
if flight_mode and not self.__anim_stream:
self.__anim_stream = AnimationEventStream.get_instance()
self.__anim_stream.add_animation(self._apply_state_tick, self.__animation_key)
return True
if self.__anim_stream:
anim_stream, self.__anim_stream = self.__anim_stream, None
anim_stream.destroy()
return False
def _stop_external_events(self, flight_mode: bool = False):
# Setup animation for inertia in non-flight mode
if not flight_mode and not self.__anim_stream:
tumble, look, move = None, None, None
if self.__last_applied and (self.__tumble_velocity or self.__look_velocity or self.__move_velocity or self.__fly_velocity):
if self.__tumble_velocity and self.__inertia_modes[1]:
tumble = self.__last_applied.tumble
if self.__look_velocity and self.__inertia_modes[2]:
look = self.__last_applied.look
if self.__move_velocity and self.__inertia_modes[3]:
move = self.__last_applied.move
if tumble or look or move:
self.__last_applied = ModelState(tumble, look, move, self.__last_applied.fly)
self.__anim_stream = AnimationEventStream.get_instance()
self.__anim_stream.add_animation(self._apply_state_tick, self.__animation_key)
else:
self.__tumble_velocity = None
self.__look_velocity = None
self.__move_velocity = None
self.__fly_velocity = None
self.__intertia_state = None
return
self.__anim_stopped = time.time()
self.__intertia_state = self.__last_applied
self.__mark_animating(1)
def __mark_animating(self, interaction_animating: int):
item, _ = self.__items.get('interaction_animating', (None, None))
self.set_ints(item, [interaction_animating])
super()._item_changed(item)
def _apply_state_time(self, dt: float, apply_fn: Callable):
alpha = 1
if self.__anim_stopped:
now = time.time()
inertia_enabled = _optional_int(self, 'inertia_enabled', 0)
inertia_seconds = _optional_float(self, 'inertia_seconds', 0)
if inertia_enabled and inertia_seconds > 0:
alpha = 1.0 - ((now - self.__anim_stopped) / inertia_seconds)
if alpha > ALMOST_ZERO:
decay = self.__settings.get('/exts/omni.kit.manipulator.camera/inertiaDecay')
decay = _optional_int(self, 'inertia_decay', decay)
alpha = pow(alpha, decay) if decay else 1
else:
alpha = 0
else:
alpha = 0
if alpha == 0:
if self.__anim_stream:
anim_stream, self.__anim_stream = self.__anim_stream, None
anim_stream.destroy()
self.set_ints('interaction_ended', [1])
apply_fn(dt * alpha, 1)
if alpha == 0:
self.set_ints('interaction_ended', [0])
self.__mark_animating(0)
self.__tumble_velocity = None
self.__look_velocity = None
self.__move_velocity = None
self.__fly_velocity = None
self.__intertia_state = None
self.__flight_inertia_active = False
return False
return True
def _apply_state_tick(self, dt: float = None):
keep_anim = True
istate = self.__intertia_state
if istate:
if self.__flight_inertia_active:
# See _item_changed, but during an inertia move, look should still be applied (but without any velocity)
look = self.get_as_floats(self.__look)
if look:
self.set_floats(self.__look, None)
state = ModelState(None, look, None, istate.fly)
else:
tumble = (self.get_as_floats(self.__tumble) or istate.tumble) if self.__inertia_modes[1] else None
look = (self.get_as_floats(self.__look) or istate.look) if self.__inertia_modes[2] else None
move = (self.get_as_floats(self.__move) or istate.move) if self.__inertia_modes[3] else None
state = ModelState(tumble, look, move)
keep_anim = self._apply_state_time(dt, lambda dt, alpha: self._apply_state(state, dt, alpha))
else:
keep_anim = self._apply_state_time(dt, lambda dt, alpha: self._item_changed(None, dt, alpha))
if not keep_anim and self.__anim_stream:
self.__destroy_animation()
def _kill_external_animation(self, kill_stream: bool = True, initial_transform = None):
if kill_stream:
self.__destroy_animation()
# self._stop_external_events()
self.__tumble_velocity = None
self.__look_velocity = None
self.__move_velocity = None
self.__fly_velocity = None
self.__intertia_state = None
self.__flight_inertia_active = False
# Reset internal transform if provided
if initial_transform:
self.set_floats('transform', initial_transform)
self.set_floats('initial_transform', initial_transform)
@carb.profiler.profile
def _apply_state(self, state: ModelState, dt: float = None, alpha: float = None):
up_axis = _optional_floats(self, 'up_axis')
rotation_precision = _optional_int(self, 'rotation_precision', 5)
last_transform = Gf.Matrix4d(*self.get_as_floats(self.__transform))
xforms = TransformAccumulator(last_transform)
center_of_interest = None
tumble = state.tumble
if self.__tumble_velocity:
tumble = self.__tumble_velocity.apply(tumble, dt, alpha)
if tumble:
center_of_interest = Gf.Vec3d(*self.get_as_floats(self.__center_of_interest))
tumble = Gf.Vec3d(round(tumble[0], rotation_precision), round(tumble[1], rotation_precision), round(tumble[2], rotation_precision))
final_xf = xforms.get_tumble(tumble, center_of_interest, up_axis)
else:
final_xf = Gf.Matrix4d(1)
look = state.look
if self.__look_velocity:
look = self.__look_velocity.apply(look, dt, alpha)
if look:
look = Gf.Vec3d(round(look[0], rotation_precision), round(look[1], rotation_precision), round(look[2], rotation_precision))
final_xf = final_xf * xforms.get_look(look, up_axis)
move = state.move
if self.__move_velocity:
move = self.__move_velocity.apply(move, dt, alpha)
if move:
final_xf = xforms.get_translation(move) * final_xf
adjust_coi = move[2] != 0
else:
adjust_coi = False
fly = None if _optional_int(self, 'disable_fly', 0) else state.fly
if self.__fly_velocity:
fly = self.__fly_velocity.apply(fly, dt, alpha)
if fly:
if _optional_bool(self, 'fly_mode_lock_view', False):
decomp_rot = last_transform.ExtractRotation().Decompose(Gf.Vec3d.ZAxis(), Gf.Vec3d.YAxis(), Gf.Vec3d.XAxis())
rot_z = Gf.Rotation(Gf.Vec3d.ZAxis(), decomp_rot[0])
rot_y = Gf.Rotation(Gf.Vec3d.YAxis(), decomp_rot[1])
rot_x = Gf.Rotation(Gf.Vec3d.XAxis(), decomp_rot[2])
last_transform_tr = Gf.Matrix4d().SetTranslate(last_transform.ExtractTranslation())
last_transform_rt_0 = Gf.Matrix4d().SetRotate(rot_x)
last_transform_rt_1 = Gf.Matrix4d().SetRotate(rot_y * rot_z)
if up_axis[2]:
fly[1], fly[2] = -fly[2], fly[1]
elif Gf.Dot(Gf.Vec3d.ZAxis(), last_transform.TransformDir((0, 0, 1))) < 0:
fly[1], fly[2] = -fly[1], -fly[2]
flight_xf = xforms.get_translation(fly)
last_transform = last_transform_rt_0 * flight_xf * last_transform_rt_1 * last_transform_tr
else:
final_xf = xforms.get_translation(fly) * final_xf
transform = final_xf * last_transform
# If zooming out in Z, adjust the center-of-interest and pixel-to-world in 'ndc_scale'
self.set_ints(self.__adjust_center_of_interest, [adjust_coi])
if adjust_coi:
center_of_interest = center_of_interest or Gf.Vec3d(*self.get_as_floats(self.__center_of_interest))
coi = Gf.Matrix4d(*self.get_as_floats('initial_transform')).Transform(center_of_interest)
coi = transform.GetInverse().Transform(coi)
self.calculate_pixel_to_world(coi)
self.set_floats(self.__transform, _flatten_matrix(transform))
super()._item_changed(self.__transform)
def _broadcast_mode(self, mode: str):
if mode == self.__mode:
return
viewport_id = _optional_int(self, 'viewport_id', None)
if viewport_id is None:
return
# Send a signal that contains the viewport_id and mode (carb requires a homogenous array, so as strings)
self.__settings.set("/exts/omni.kit.manipulator.camera/viewportMode", [str(viewport_id), mode])
self.__mode = mode
| 32,559 | Python | 45.781609 | 143 | 0.589729 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/manipulator.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['CameraManipulatorBase', 'adjust_center_of_interest']
from omni.ui import scene as sc
from .gestures import build_gestures
from .model import CameraManipulatorModel, _optional_bool, _flatten_matrix
from pxr import Gf
# Common math to adjust the center-of-interest
def adjust_center_of_interest(model: CameraManipulatorModel, initial_transform: Gf.Matrix4d, final_transform: Gf.Matrix4d):
# Adjust the center-of-interest if requested.
# For object-centric movement we always adjust it if an object was hit
object_centric = _optional_bool(model, 'object_centric_movement')
coi_picked = model.get_as_floats('center_of_interest_picked') if object_centric else False
adjust_center_of_interest = (object_centric and coi_picked) or _optional_bool(model, 'adjust_center_of_interest')
if not adjust_center_of_interest:
return None, None
# When adjusting the center of interest we'll operate on a direction and length (in camera-space)
# Which helps to not introduce -drift- as we jump through the different spaces to update it.
# Final camera position
world_cam_pos = final_transform.Transform(Gf.Vec3d(0, 0, 0))
# center_of_interest_start is in camera-space
center_of_interest_start = Gf.Vec3d(*model.get_as_floats('center_of_interest_start'))
# Save the direction
center_of_interest_dir = center_of_interest_start.GetNormalized()
if coi_picked:
# Keep original center-of-interest direction, but adjust its length to the picked position
world_coi = Gf.Vec3d(coi_picked[0], coi_picked[1], coi_picked[2])
# TODO: Setting to keep subsequent movement focused on screen-center or move it to the object.
if False:
# Save the center-of-interest to the hit-point by adjusting direction
center_of_interest_dir = final_transform.GetInverse().Transform(world_coi).GetNormalized()
else:
# Move center-of-interest to world space at initial transform
world_coi = initial_transform.Transform(center_of_interest_start)
# Now get the length between final camera-position and the world-space-coi,
# and apply that to the direction.
center_of_interest_end = center_of_interest_dir * (world_cam_pos - world_coi).GetLength()
return center_of_interest_start, center_of_interest_end
# Base class, resposible for building up the gestures
class CameraManipulatorBase(sc.Manipulator):
def __init__(self, bindings: dict = None, model: sc.AbstractManipulatorModel = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._screen = None
# Provide some defaults
self.model = model or CameraManipulatorModel()
self.bindings = bindings
# Provide a slot for a user to fill in with a GestureManager but don't use anything by default
self.manager = None
self.gestures = []
self.__transform = None
self.__gamepad = None
def _on_began(self, model: CameraManipulatorModel, *args, **kwargs):
pass
def on_build(self):
# Need to hold a reference to this or the sc.Screen would be destroyed when out of scope
self.__transform = sc.Transform()
with self.__transform:
self._screen = sc.Screen(gestures=self.gestures or build_gestures(self.model, self.bindings, self.manager, self._on_began))
def destroy(self):
if self.__gamepad:
self.__gamepad.destroy()
self.__gamepad = None
if self.__transform:
self.__transform.clear()
self.__transform = None
self._screen = None
if hasattr(self.model, 'destroy'):
self.model.destroy()
@property
def gamepad_enabled(self) -> bool:
return self.__gamepad is not None
@gamepad_enabled.setter
def gamepad_enabled(self, value: bool):
if value:
if not self.__gamepad:
from .gamepad import GamePadController
self.__gamepad = GamePadController(self)
elif self.__gamepad:
self.__gamepad.destroy()
self.__gamepad = None
# We have all the imoorts already, so provide a simple omni.ui.scene camera manipulator that one can use.
# Takes an omni.ui.scene view and center-of-interest and applies model changes to that view
class SceneViewCameraManipulator(CameraManipulatorBase):
def __init__(self, center_of_interest, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__center_of_interest = center_of_interest
def _on_began(self, model: CameraManipulatorModel, mouse):
model.set_floats('center_of_interest', [self.__center_of_interest[0], self.__center_of_interest[1], self.__center_of_interest[2]])
if _optional_bool(model, 'orthographic'):
model.set_ints('disable_tumble', [1])
model.set_ints('disable_look', [1])
def on_model_updated(self, item):
model = self.model
if item == model.get_item('transform'):
final_transform = Gf.Matrix4d(*model.get_as_floats(item))
initial_transform = Gf.Matrix4d(*model.get_as_floats('initial_transform'))
# Adjust our center-of-interest
coi_start, coi_end = adjust_center_of_interest(model, initial_transform, final_transform)
if coi_end:
self.__center_of_interest = coi_end
# omni.ui.scene.SceneView.CameraModel expects 'view', but we operate on 'transform'
# The following will push our transform changes into the SceneView.model.view
sv_model = self.scene_view.model
view = sv_model.get_item('view')
sv_model.set_floats(view, _flatten_matrix(final_transform.GetInverse()))
sv_model._item_changed(view)
| 6,222 | Python | 46.503816 | 138 | 0.674381 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/gesturebase.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['CameraGestureBase']
from omni.ui import scene as sc
from .model import _accumulate_values, _optional_bool, _optional_floats, _flatten_matrix
from .flight_mode import get_keyboard_input
import carb.settings
from pxr import Gf
import time
from typing import Callable, Sequence
# Base class for camera transform manipulation/gesture
#
class CameraGestureBase(sc.DragGesture):
def __init__(self, model: sc.AbstractManipulatorModel, configure_model: Callable = None, name: str = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name if name else self.__class__.__name__
self.model = model
# XXX: Need a manipulator on_began method
self.__configure_model = configure_model
self.__prev_mouse = None
self.__prev_mouse_time = None
self.__keyboard = None
self.__fly_active = None
def destroy(self):
self.model = None
self._disable_flight()
super().destroy()
@property
def center_of_interest(self):
return Gf.Vec3d(self.model.get_as_floats('center_of_interest'))
@property
def initial_transform(self):
return Gf.Matrix4d(*self.model.get_as_floats('initial_transform'))
@property
def last_transform(self):
return Gf.Matrix4d(*self.model.get_as_floats('transform'))
@property
def projection(self):
return Gf.Matrix4d(*self.model.get_as_floats('projection'))
@property
def orthographic(self):
return _optional_bool(self.model, 'orthographic')
@property
def disable_pan(self):
return _optional_bool(self.model, 'disable_pan')
@property
def disable_tumble(self):
return _optional_bool(self.model, 'disable_tumble')
@property
def disable_look(self):
return _optional_bool(self.model, 'disable_look')
@property
def disable_zoom(self):
return _optional_bool(self.model, 'disable_zoom')
@property
def intertia(self):
inertia = _optional_bool(self.model, 'inertia_enabled')
if not inertia:
return 0
inertia = _optional_floats(self.model, 'inertia_seconds')
return inertia[0] if inertia else 0
@property
def up_axis(self):
# Assume Y-up if not specified
return _optional_bool(self.model, 'up_axis', 1)
@staticmethod
def __conform_speed(values):
if values:
vlen = len(values)
if vlen == 1:
return (values[0], values[0], values[0])
if vlen == 2:
return (values[0], values[1], 0)
return values
return (1, 1, 1)
def get_rotation_speed(self, secondary):
model = self.model
rotation_speed = self.__conform_speed(_optional_floats(model, 'rotation_speed'))
secondary_speed = self.__conform_speed(_optional_floats(model, secondary))
return (rotation_speed[0] * secondary_speed[0],
rotation_speed[1] * secondary_speed[1],
rotation_speed[2] * secondary_speed[2])
@property
def tumble_speed(self):
return self.get_rotation_speed('tumble_speed')
@property
def look_speed(self):
return self.get_rotation_speed('look_speed')
@property
def move_speed(self):
return self.__conform_speed(_optional_floats(self.model, 'move_speed'))
@property
def world_speed(self):
model = self.model
ndc_scale = self.__conform_speed(_optional_floats(model, 'ndc_scale'))
world_speed = self.__conform_speed(_optional_floats(model, 'world_speed'))
return Gf.CompMult(world_speed, ndc_scale)
def _disable_flight(self):
if self.__keyboard:
self.__keyboard.destroy()
def _setup_keyboard(self, model, exit_mode: bool) -> bool:
"""Setup keyboard and return whether the manipualtor mode (fly) was broadcast to consumers"""
self.__keyboard = get_keyboard_input(model, self.__keyboard)
if self.__keyboard:
# If the keyboard is active, broadcast that fly mode has been entered
if self.__keyboard.active:
self.__fly_active = True
model._broadcast_mode("fly")
return True
# Check if fly mode was exited
if self.__fly_active:
exit_mode = self.name.replace('Gesture', '').lower() if exit_mode else ""
model._broadcast_mode(exit_mode)
return True
return False
# omni.ui.scene Gesture interface
# We absract on top of this due to asynchronous picking, in that we
# don't want a gesture to begin until the object/world-space query has completed
# This 'delay' could be a setting, but will wind up 'snapping' from the transition
# from a Camera's centerOfInterest to the new world-space position
def on_began(self, mouse: Sequence[float] = None):
model = self.model
# Setup flight mode and possibly broadcast that mode to any consumers
was_brodcast = self._setup_keyboard(model, False)
# If fly mode was not broadcast, then brodcast this gesture's mode
if not was_brodcast:
# LookGesture => look
manip_mode = self.name.replace('Gesture', '').lower()
model._broadcast_mode(manip_mode)
mouse = mouse if mouse else self.sender.gesture_payload.mouse
if self.__configure_model:
self.__configure_model(model, mouse)
self.__prev_mouse = mouse
xf = model.get_as_floats('transform')
if xf:
# Save an imutable copy of transform for undoable end-event
model.set_floats('initial_transform', xf.copy())
coi = model.get_as_floats('center_of_interest')
if coi:
# Save an imutable copy of center_of_interest for end adjustment if desired (avoiding space conversions)
model.set_floats('center_of_interest_start', coi.copy())
model._item_changed('center_of_interest')
model.set_ints('interaction_active', [1])
def on_changed(self, mouse: Sequence[float] = None):
self._setup_keyboard(self.model, True)
self.__last_change = time.time()
cur_mouse = mouse if mouse else self.sender.gesture_payload.mouse
mouse_moved = (cur_mouse[0] - self.__prev_mouse[0], cur_mouse[1] - self.__prev_mouse[1])
# if (mouse_moved[0] != 0) or (mouse_moved[1] != 0):
self.__prev_mouse = cur_mouse
self.on_mouse_move(mouse_moved)
def on_ended(self):
model = self.model
final_position = True
# Brodcast that the camera manipulationmode is now none
model._broadcast_mode("")
if self.__keyboard:
self.__keyboard = self.__keyboard.end()
final_position = self.__keyboard is None
self.__prev_mouse = None
self.__prev_mouse_time = None
if final_position:
if model._start_external_events(False):
model._stop_external_events(False)
self.__apply_as_undoable()
model.set_ints('adjust_center_of_interest', [])
model.set_floats('current_aperture', [])
model.set_ints('interaction_active', [0])
# model.set_floats('center_of_interest_start', [])
# model.set_floats('center_of_interest_picked', [])
def dirty_items(self, model: sc.AbstractManipulatorModel):
model = self.model
cur_item = model.get_item('transform')
if model.get_as_floats('initial_transform') != model.get_as_floats(cur_item):
return [cur_item]
def __apply_as_undoable(self):
model = self.model
dirty_items = self.dirty_items(model)
if dirty_items:
model.set_ints('interaction_ended', [1])
try:
for item in dirty_items:
model._item_changed(item)
except:
raise
finally:
model.set_ints('interaction_ended', [0])
def _accumulate_values(self, key: str, x: float, y: float, z: float):
item = _accumulate_values(self.model, key, x, y, z)
if item:
self.model._item_changed(None if self.__keyboard else item)
| 8,715 | Python | 35.316667 | 128 | 0.616753 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/gamepad.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .model import _accumulate_values
import omni.kit.app
from omni.ui import scene as sc
import carb
import asyncio
from typing import Dict, List, Sequence, Set
# Setting per action mode (i.e):
# /exts/omni.kit.manipulator.camera/gamePad/fly/deadZone
# /exts/omni.kit.manipulator.camera/gamePad/look/deadZone
ACTION_MODE_SETTING_KEYS = {"scale", "deadZone"}
ACTION_MODE_SETTING_ROOT = "/exts/omni.kit.manipulator.camera/gamePad"
# Setting per action trigger (i.e):
# /exts/omni.kit.manipulator.camera/gamePad/button/a/scale
ACTION_TRIGGER_SETTING_KEYS = {"scale"}
__all__ = ['GamePadController']
class ValueMapper:
def __init__(self, mode: str, trigger: str, index: int, sub_index: int):
self.__mode: str = mode
self.__trigger: str = trigger
self.__index: int = index
self.__sub_index = sub_index
@property
def mode(self) -> str:
return self.__mode
@property
def trigger(self) -> str:
return self.__trigger
@property
def index(self) -> int:
return self.__index
@property
def sub_index(self) -> int:
return self.__sub_index
class ModeSettings:
def __init__(self, action_mode: str, settings: carb.settings.ISettings):
self.__scale: float = 1.0
self.__dead_zone: float = 1e-04
self.__action_mode = action_mode
self.__setting_subs: Sequence[carb.settings.SubscriptionId] = []
for setting_key in ACTION_MODE_SETTING_KEYS:
sp = self.__get_setting_path(setting_key)
self.__setting_subs.append(
settings.subscribe_to_node_change_events(sp, lambda *args, k=setting_key: self.__setting_changed(*args, setting_key=k))
)
self.__setting_changed(None, carb.settings.ChangeEventType.CHANGED, setting_key=setting_key)
def __del__(self):
self.destroy()
def __get_setting_path(self, setting_key: str):
return f"{ACTION_MODE_SETTING_ROOT}/{self.__action_mode}/{setting_key}"
def __setting_changed(self, item: carb.dictionary.Item, event_type: carb.settings.ChangeEventType, setting_key: str):
if event_type == carb.settings.ChangeEventType.CHANGED:
setting_path = self.__get_setting_path(setting_key)
if setting_key == "scale":
self.__scale = carb.settings.get_settings().get(setting_path)
if self.__scale is None:
self.__scale = 1.0
elif setting_key == "deadZone":
# Use absolute value, no negative dead-zones and clamp to 1.0
dead_zone = carb.settings.get_settings().get(setting_path)
self.__dead_zone = min(abs(dead_zone) or 1e-04, 1.0) if (dead_zone is not None) else 0.0
def destroy(self, settings: carb.settings.ISettings = None):
settings = settings or carb.settings.get_settings()
for setting_sub in self.__setting_subs:
settings.unsubscribe_to_change_events(setting_sub)
self.__setting_subs = tuple()
def get_value(self, value: float, axis_idx: int) -> float:
# Legacy implementation, which scales input value into new range fitted by dead_zone
value = (value - self.__dead_zone) / (1.0 - self.__dead_zone)
value = max(0, min(1, value))
scale = self.__scale
return value * scale
# Somewhat simpler version that doesn't scale input by dead-zone
if abs(value) > self.__dead_zone:
return value * scale
return 0
def _limit_camera_velocity(value: float, settings: carb.settings.ISettings, context_name: str):
cam_limit = settings.get('/exts/omni.kit.viewport.window/cameraSpeedLimit')
if context_name in cam_limit:
vel_min = settings.get('/persistent/app/viewport/camVelocityMin')
if vel_min is not None:
value = max(vel_min, value)
vel_max = settings.get('/persistent/app/viewport/camVelocityMax')
if vel_max is not None:
value = min(vel_max, value)
return value
def _adjust_flight_speed(xyz_value: Sequence[float]):
y = xyz_value[1]
if y == 0.0:
return
import math
settings = carb.settings.get_settings()
value = settings.get('/persistent/app/viewport/camMoveVelocity') or 1
scaler = settings.get('/persistent/app/viewport/camVelocityScalerMultAmount') or 1.1
scaler = 1.0 + (max(scaler, 1.0 + 1e-8) - 1.0) * abs(y)
if y < 0:
value = value / scaler
elif y > 0:
value = value * scaler
if math.isfinite(value) and (value > 1e-8):
value = _limit_camera_velocity(value, settings, 'gamepad')
settings.set('/persistent/app/viewport/camMoveVelocity', value)
class GamePadController:
def __init__(self, manipulator: sc.Manipulator):
self.__manipulator: sc.Manipulator = manipulator
self.__gp_event_sub: Dict[carb.input.Gamepad, int] = {}
self.__compressed_events: Dict[int, float] = {}
self.__action_modes: Dict[str, List[float]] = {}
self.__app_event_sub: carb.events.ISubscription = None
self.__mode_settings: Dict[str, ModeSettings] = {}
self.__value_actions: Dict[carb.input.GamepadInput, ValueMapper] = {}
self.__setting_subs: Sequence[carb.settings.SubscriptionId] = []
# Some button presses need synthetic events because unlike keyboard input, carb gamepad doesn't repeat.
# event 1 left presssed: value = 0.5
# event 2 right pressed: value = 0.5
# these should cancel, but there is no notification of left event until it changes from 0.5
# This is all handled in __gamepad_event
trigger_synth = {carb.input.GamepadInput.RIGHT_TRIGGER, carb.input.GamepadInput.LEFT_TRIGGER}
shoulder_synth = {carb.input.GamepadInput.RIGHT_SHOULDER, carb.input.GamepadInput.LEFT_SHOULDER}
self.__synthetic_state_init = {
carb.input.GamepadInput.RIGHT_TRIGGER: trigger_synth,
carb.input.GamepadInput.LEFT_TRIGGER: trigger_synth,
carb.input.GamepadInput.RIGHT_SHOULDER: shoulder_synth,
carb.input.GamepadInput.LEFT_SHOULDER: shoulder_synth,
}
self.__synthetic_state = self.__synthetic_state_init.copy()
self.__init_gamepad_action(None, carb.settings.ChangeEventType.CHANGED)
self.__gp_connect_sub = self._iinput.subscribe_to_gamepad_connection_events(self.__gamepad_connection)
def __init_gamepad_action(self, item: carb.dictionary.Item, event_type: carb.settings.ChangeEventType):
if event_type != carb.settings.ChangeEventType.CHANGED:
return
self.__value_actions: Dict[carb.input.GamepadInput, ValueMapper] = {}
settings = carb.settings.get_settings()
create_subs = not bool(self.__setting_subs)
gamepad_action_paths = []
gamepad_input_names = ["rightStick", "leftStick", "dPad", "trigger", "shoulder", "button/a", "button/b", "button/x", "button/y"]
for gamepad_input in gamepad_input_names:
action_setting_path = f"{ACTION_MODE_SETTING_ROOT}/{gamepad_input}/action"
gamepad_action_paths.append(action_setting_path)
if create_subs:
self.__setting_subs.append(
settings.subscribe_to_node_change_events(action_setting_path, self.__init_gamepad_action)
)
# TODO: Maybe need more configuable/robust action mapping
def action_mapping_4(action_mode: str):
action_modes = action_mode.split(".")
if len(action_modes) != 1:
carb.log_error(f"Action mapping '{action_mode}' for quad input is invalid, using '{action_modes[0]}'")
action_mode = action_modes[0]
if action_mode == "look":
return action_mode, (0, 1), (0, 1, 0, 1)
return action_mode, (0, 2), (1, 0, 1, 0)
def action_mapping_2(action_mode: str):
action_modes = action_mode.split(".")
if len(action_modes) != 2:
action_modes = (action_modes[0], "x")
carb.log_error(f"Action mapping '{action_mode}' for dual input is invalid, using '{action_modes[0]}.x'")
axis = {'x': 0, 'y': 1, 'z': 2}.get(action_modes[1], 0)
return action_modes[0], axis, (0, 1)
def action_mapping_1(action_mode: str):
action_modes = action_mode.split(".")
if len(action_modes) != 2:
action_modes = (action_modes[0], "x")
carb.log_error(f"Action mapping '{action_mode}' for dual input is invalid, using '{action_modes[0]}.x'")
axis = {'x': 0, 'y': 1, 'z': 2}.get(action_modes[1], 0)
return action_modes[0], axis, 0
# Go through the list of named events and setup the action based on it's value
right_stick_action = settings.get(gamepad_action_paths[0])
if right_stick_action:
right_stick_action, axis, sub_idx = action_mapping_4(right_stick_action)
self.__value_actions[carb.input.GamepadInput.RIGHT_STICK_LEFT] = ValueMapper(right_stick_action, gamepad_input_names[0], axis[0], sub_idx[0])
self.__value_actions[carb.input.GamepadInput.RIGHT_STICK_RIGHT] = ValueMapper(right_stick_action, gamepad_input_names[0], axis[0], sub_idx[1])
self.__value_actions[carb.input.GamepadInput.RIGHT_STICK_UP] = ValueMapper(right_stick_action, gamepad_input_names[0], axis[1], sub_idx[2])
self.__value_actions[carb.input.GamepadInput.RIGHT_STICK_DOWN] = ValueMapper(right_stick_action, gamepad_input_names[0], axis[1], sub_idx[3])
left_stick_action = settings.get(gamepad_action_paths[1])
if left_stick_action:
left_stick_action, axis, sub_idx = action_mapping_4(left_stick_action)
self.__value_actions[carb.input.GamepadInput.LEFT_STICK_LEFT] = ValueMapper(left_stick_action, gamepad_input_names[1], axis[0], sub_idx[0])
self.__value_actions[carb.input.GamepadInput.LEFT_STICK_RIGHT] = ValueMapper(left_stick_action, gamepad_input_names[1], axis[0], sub_idx[1])
self.__value_actions[carb.input.GamepadInput.LEFT_STICK_UP] = ValueMapper(left_stick_action, gamepad_input_names[1], axis[1], sub_idx[2])
self.__value_actions[carb.input.GamepadInput.LEFT_STICK_DOWN] = ValueMapper(left_stick_action, gamepad_input_names[1], axis[1], sub_idx[3])
dpad_action = settings.get(gamepad_action_paths[2])
if dpad_action:
dpad_action, axis, sub_idx = action_mapping_4(dpad_action)
self.__value_actions[carb.input.GamepadInput.DPAD_LEFT] = ValueMapper(dpad_action, gamepad_input_names[2], axis[0], sub_idx[0])
self.__value_actions[carb.input.GamepadInput.DPAD_RIGHT] = ValueMapper(dpad_action, gamepad_input_names[2], axis[0], sub_idx[1])
self.__value_actions[carb.input.GamepadInput.DPAD_UP] = ValueMapper(dpad_action, gamepad_input_names[2], axis[1], sub_idx[2])
self.__value_actions[carb.input.GamepadInput.DPAD_DOWN] = ValueMapper(dpad_action, gamepad_input_names[2], axis[1], sub_idx[3])
trigger_action = settings.get(gamepad_action_paths[3])
if trigger_action:
trigger_action, axis, sub_idx = action_mapping_2(trigger_action)
self.__value_actions[carb.input.GamepadInput.RIGHT_TRIGGER] = ValueMapper(trigger_action, gamepad_input_names[3], axis, sub_idx[0])
self.__value_actions[carb.input.GamepadInput.LEFT_TRIGGER] = ValueMapper(trigger_action, gamepad_input_names[3], axis, sub_idx[1])
shoulder_action = settings.get(gamepad_action_paths[4])
if shoulder_action:
shoulder_action, axis, sub_idx = action_mapping_2(shoulder_action)
self.__value_actions[carb.input.GamepadInput.RIGHT_SHOULDER] = ValueMapper(shoulder_action, gamepad_input_names[4], axis, sub_idx[0])
self.__value_actions[carb.input.GamepadInput.LEFT_SHOULDER] = ValueMapper(shoulder_action, gamepad_input_names[4], axis, sub_idx[1])
button_action = settings.get(gamepad_action_paths[5])
if button_action:
button_action, axis, sub_idx = action_mapping_1(button_action)
self.__value_actions[carb.input.GamepadInput.A] = ValueMapper(button_action, gamepad_input_names[5], axis, sub_idx)
button_action = settings.get(gamepad_action_paths[6])
if button_action:
button_action, axis, sub_idx = action_mapping_1(button_action)
self.__value_actions[carb.input.GamepadInput.B] = ValueMapper(button_action, gamepad_input_names[6], axis, sub_idx)
button_action = settings.get(gamepad_action_paths[7])
if button_action:
button_action, axis, sub_idx = action_mapping_1(button_action)
self.__value_actions[carb.input.GamepadInput.X] = ValueMapper(button_action, gamepad_input_names[7], axis, sub_idx)
button_action = settings.get(gamepad_action_paths[8])
if button_action:
button_action, axis, sub_idx = action_mapping_1(button_action)
self.__value_actions[carb.input.GamepadInput.Y] = ValueMapper(button_action, gamepad_input_names[8], axis, sub_idx)
for value_mapper in self.__value_actions.values():
action_mode = value_mapper.mode
if self.__mode_settings.get(action_mode) is None:
self.__mode_settings[action_mode] = ModeSettings(action_mode, settings)
action_trigger = value_mapper.trigger
if self.__mode_settings.get(action_trigger) is None:
self.__mode_settings[action_trigger] = ModeSettings(action_trigger, settings)
def __del__(self):
self.destroy()
@property
def _iinput(self):
return carb.input.acquire_input_interface()
async def __apply_events(self):
# Grab the events to apply and reset the state to empty
events, self.__compressed_events = self.__compressed_events, {}
# Reset the synthetic state
self.__synthetic_state = self.__synthetic_state_init.copy()
if not events:
return
manipulator = self.__manipulator
if not manipulator:
return
model = manipulator.model
manipulator._on_began(model, None)
# Map the action to +/- values per x, y, z components
action_modes: Dict[str, Dict[int, List[float]]] = {}
for input, value in events.items():
action = self.__value_actions.get(input)
if not action:
continue
# Must exists, KeyError otherwise
mode_seting = self.__mode_settings[action.mode]
trigger_setting = self.__mode_settings[action.trigger]
# Get the dict for this action storing +/- values per x, y, z
pos_neg_value_dict = action_modes.get(action.mode) or {}
# Get the +/- values for the x, y, z component
pos_neg_values = pos_neg_value_dict.get(action.index) or [0, 0]
# Scale the value by the action's scaling factor
value = mode_seting.get_value(value, action.index)
# Scale the value by the trigger's scaling factor
value = trigger_setting.get_value(value, action.index)
# Store the +/- value into the proper slot '+' into 0, '-' into 1
pos_neg_values[action.sub_index] += value
# Store back into the dict mapping x, y, z to +/- values
pos_neg_value_dict[action.index] = pos_neg_values
# Store back into the dict storing the +/- values per x, y, z into the action
action_modes[action.mode] = pos_neg_value_dict
# Collapse the +/- values per individual action and x, y, z into a single total
for action_mode, pos_neg_value_dict in action_modes.items():
# Some components may not have been touched but need to preserve last value
xyz_value = self.__action_modes.get(action_mode) or [0, 0, 0]
for xyz_index, pos_neg_value in pos_neg_value_dict.items():
xyz_value[xyz_index] = pos_neg_value[0] - pos_neg_value[1]
# Apply model speed to anything but fly (that is handled by model itself)
if action_mode != "fly":
model_speed = model.get_item(f"{action_mode}_speed")
if model_speed is not None:
model_speed = model.get_as_floats(model_speed)
if model_speed is not None:
for i in range(len(model_speed)):
xyz_value[i] *= model_speed[i]
# Store the final values
self.__action_modes[action_mode] = xyz_value
# Prune any actions that now do nothing (has 0 for x, y, and z)
self.__action_modes = {
action_mode: xyz_value for action_mode, xyz_value in self.__action_modes.items() if (xyz_value[0] or xyz_value[1] or xyz_value[2])
}
has_data: bool = bool(self.__action_modes)
if has_data:
self.__apply_gamepad_state()
if hasattr(model, '_start_external_events'):
if has_data:
self.___start_external_events(model)
else:
self.__stop_external_events(model)
def ___start_external_events(self, model):
if self.__app_event_sub:
return
_broadcast_mode = getattr(model, '_broadcast_mode', None)
if _broadcast_mode:
_broadcast_mode("gamepad")
self.__app_event_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(
self.__apply_gamepad_state,
name=f"omni.kit.manipulator.camera.GamePadController.{id(self)}",
# order=omni.kit.app.UPDATE_ORDER_PYTHON_ASYNC_FUTURE_END_UPDATE
)
model._start_external_events(True)
def __stop_external_events(self, model):
if self.__app_event_sub:
_broadcast_mode = getattr(model, '_broadcast_mode', None)
if _broadcast_mode:
_broadcast_mode("")
self.__app_event_sub = None
self.__action_modes = {}
model._stop_external_events(True)
def __apply_gamepad_state(self, *args, **kwargs):
manipulator = self.__manipulator
model = manipulator.model
# manipulator._on_began(model, None)
for action_mode, xyz_value in self.__action_modes.items():
if action_mode == "fly":
model.set_floats("fly", xyz_value)
continue
elif action_mode == "speed":
_adjust_flight_speed(xyz_value)
continue
item = _accumulate_values(model, action_mode, xyz_value[0], xyz_value[1], xyz_value[2])
if item:
model._item_changed(item)
def __gamepad_event(self, event: carb.input.GamepadEvent):
event_input = event.input
self.__compressed_events[event_input] = event.value
# Gamepad does not get repeat events, so on certain button presses there needs to be a 'synthetic' event
# that represents the inverse-key (left/right) based on its last/current state.
synth_state = self.__synthetic_state.get(event.input)
if synth_state:
for synth_input in synth_state:
del self.__synthetic_state[synth_input]
if synth_input != event_input:
self.__compressed_events[synth_input] = self._iinput.get_gamepad_value(event.gamepad, synth_input)
asyncio.ensure_future(self.__apply_events())
def __gamepad_connection(self, event: carb.input.GamepadConnectionEvent):
e_type = event.type
e_gamepad = event.gamepad
if e_type == carb.input.GamepadConnectionEventType.DISCONNECTED:
e_gamepad_sub = self.__gp_event_sub.get(e_gamepad)
if e_gamepad_sub:
self._iinput.unsubscribe_to_gamepad_events(e_gamepad, e_gamepad_sub)
del self.__gp_event_sub[e_gamepad]
pass
elif e_type == carb.input.GamepadConnectionEventType.CONNECTED:
if self.__gp_event_sub.get(e_gamepad):
carb.log_error("Gamepad connected event, but already subscribed")
return
gp_event_sub = self._iinput.subscribe_to_gamepad_events(e_gamepad, self.__gamepad_event)
if gp_event_sub:
self.__gp_event_sub[e_gamepad] = gp_event_sub
def destroy(self):
iinput = self._iinput
settings = carb.settings.get_settings()
# Remove gamepad connected subscriptions
if self.__gp_connect_sub:
iinput.unsubscribe_to_gamepad_connection_events(self.__gp_connect_sub)
self.__gp_connect_sub = None
# Remove gamepad event subscriptions
for gamepad, gamepad_sub in self.__gp_event_sub.items():
iinput.unsubscribe_to_gamepad_events(gamepad, gamepad_sub)
self.__gp_event_sub = {}
# Remove any pending state on the model
model = self.__manipulator.model if self.__manipulator else None
if model:
self.__stop_external_events(model)
self.__manipulator = None
# Remove any settings subscriptions
for setting_sub in self.__setting_subs:
settings.unsubscribe_to_change_events(setting_sub)
self.__setting_subs = []
# Destroy any mode/action specific settings
for action_mode, mode_settings in self.__mode_settings.items():
mode_settings.destroy(settings)
self.__mode_settings = {}
| 22,082 | Python | 47.427631 | 154 | 0.623675 |
omniverse-code/kit/exts/omni.kit.manipulator.camera/omni/kit/manipulator/camera/gestures.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ['build_gestures', 'PanGesture', 'TumbleGesture', 'LookGesture', 'ZoomGesture']
from omni.ui import scene as sc
from .gesturebase import CameraGestureBase
from pxr import Gf
import carb
from typing import Callable
kDefaultKeyBindings = {
'PanGesture': 'Any MiddleButton',
'TumbleGesture': 'Alt LeftButton',
'ZoomGesture': 'Alt RightButton',
'LookGesture': 'RightButton'
}
def build_gestures(model: sc.AbstractManipulatorModel,
bindings: dict = None,
manager: sc.GestureManager = None,
configure_model: Callable = None):
def _parse_binding(binding_str: str):
keys = binding_str.split(' ')
button = {
'LeftButton': 0,
'RightButton': 1,
'MiddleButton': 2
}.get(keys.pop())
modifiers = 0
for mod_str in keys:
mod_bit = {
'Shift': carb.input.KEYBOARD_MODIFIER_FLAG_SHIFT,
'Ctrl': carb.input.KEYBOARD_MODIFIER_FLAG_CONTROL,
'Alt': carb.input.KEYBOARD_MODIFIER_FLAG_ALT,
'Super': carb.input.KEYBOARD_MODIFIER_FLAG_SUPER,
'Any': 0xffffffff,
}.get(mod_str)
if not mod_bit:
raise RuntimeError(f'Unparseable binding: {binding_str}')
modifiers = modifiers | mod_bit
return (button, modifiers)
if not bindings:
bindings = kDefaultKeyBindings
gestures = []
for gesture, binding in bindings.items():
instantiator = globals().get(gesture)
if not instantiator:
carb.log_warn(f'Gesture "{gesture}" was not found for key-binding: "{binding}"')
continue
button, modifers = _parse_binding(binding)
gestures.append(instantiator(model, configure_model, mouse_button=button, modifiers=modifers, manager=manager))
return gestures
class PanGesture(CameraGestureBase):
def on_mouse_move(self, mouse_moved):
if self.disable_pan:
return
world_speed = self.world_speed
move_speed = self.move_speed
self._accumulate_values('move', mouse_moved[0] * 0.5 * world_speed[0] * move_speed[0],
mouse_moved[1] * 0.5 * world_speed[1] * move_speed[1],
0)
class TumbleGesture(CameraGestureBase):
def on_mouse_move(self, mouse_moved):
if self.disable_tumble:
return
# Mouse moved is [-1,1], so make a full drag scross the viewport a 180 tumble
speed = self.tumble_speed
self._accumulate_values('tumble', mouse_moved[0] * speed[0] * -90,
mouse_moved[1] * speed[1] * 90,
0)
class LookGesture(CameraGestureBase):
def on_mouse_move(self, mouse_moved):
if self.disable_look:
return
# Mouse moved is [-1,1], so make a full drag scross the viewport a 180 look
speed = self.look_speed
self._accumulate_values('look', mouse_moved[0] * speed[0] * -90,
mouse_moved[1] * speed[1] * 90,
0)
class OrthoZoomAperture():
def __init__(self, model: sc.AbstractManipulatorModel, apertures):
self.__values = apertures.copy()
def apply(self, model: sc.AbstractManipulatorModel, distance: float):
# TODO ortho-speed
for i in range(2):
self.__values[i] -= distance * 2
model.set_floats('current_aperture', self.__values)
model._item_changed(model.get_item('current_aperture'))
def dirty_items(self, model: sc.AbstractManipulatorModel):
cur_ap = model.get_item('current_aperture')
if model.get_as_floats('initial_aperture') != model.get_as_floats(cur_ap):
return [cur_ap]
class OrthoZoomProjection():
def __init__(self, model: sc.AbstractManipulatorModel, projection):
self.__projection = projection.copy()
def apply(self, model: sc.AbstractManipulatorModel, distance: float):
# TODO ortho-speed
distance /= 3.0
rml = (2.0 / self.__projection[0])
tmb = (2.0 / self.__projection[5])
aspect = tmb / rml
rpl = rml * -self.__projection[12]
tpb = tmb * self.__projection[13]
rml -= distance
tmb -= distance * aspect
rpl += distance
tpb += distance
self.__projection[0] = 2.0 / rml
self.__projection[5] = 2.0 / tmb
#self.__projection[12] = -rpl / rml
#self.__projection[13] = tpb / tmb
model.set_floats('projection', self.__projection)
# Trigger recomputation of ndc_speed
model._item_changed(model.get_item('projection'))
def dirty_items(self, model: sc.AbstractManipulatorModel):
proj = model.get_item('projection')
return [proj]
if model.get_as_floats('projection') != model.get_as_floats(proj):
return [proj]
class ZoomGesture(CameraGestureBase):
def dirty_items(self, model: sc.AbstractManipulatorModel):
return super().dirty_items(model) if not self.__orth_zoom else self.__orth_zoom.dirty_items(model)
def __setup_ortho_zoom(self):
apertures = self.model.get_as_floats('initial_aperture')
if apertures:
self.__orth_zoom = OrthoZoomAperture(self.model, apertures)
return True
projection = self.model.get_as_floats('projection')
if projection:
self.__orth_zoom = OrthoZoomProjection(self.model, projection)
return True
carb.log_warn("Orthographic zoom needs a projection or aperture")
return False
def on_began(self, *args, **kwargs):
super().on_began(*args, **kwargs)
# Setup an orthographic movement (aperture adjustment) if needed
self.__orth_zoom = False
if self.orthographic:
self.__setup_ortho_zoom()
# self.model.set_ints('adjust_center_of_interest', [1])
# Zoom into center of view or mouse interest
self.__direction = Gf.Vec3d(self.center_of_interest.GetNormalized()) if False else None
def on_mouse_move(self, mouse_moved):
if self.disable_zoom:
return
# Compute length/radius from gesture start
distance = (mouse_moved[0] + mouse_moved[1]) * self.world_speed.GetLength() * 1.41421356
distance *= self.move_speed[2]
if self.__orth_zoom:
self.__orth_zoom.apply(self.model, distance)
return
# Zoom into view-enter or current mouse/world interest
direction = self.__direction if self.__direction else Gf.Vec3d(self.center_of_interest.GetNormalized())
amount = direction * distance
self._accumulate_values('move', amount[0], amount[1], amount[2])
| 7,324 | Python | 36.372449 | 119 | 0.604178 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.