in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
certbot__certbot-6134 | Reuse ACMEv1 accounts for ACMEv2 production
The lines removed following the discussion at https://github.com/certbot/certbot/pull/5902#discussion_r192532446 need to be added back.
| [
{
"content": "\"\"\"Certbot constants.\"\"\"\nimport logging\nimport os\nimport pkg_resources\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"certbot.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nOLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Plugins Setuptools entry point before rename.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n\n # Main parser\n verbose_count=-int(logging.INFO / 10),\n text_mode=False,\n max_log_backups=1000,\n noninteractive_mode=False,\n force_interactive=False,\n domains=[],\n certname=None,\n dry_run=False,\n register_unsafely_without_email=False,\n update_registration=False,\n email=None,\n eff_email=None,\n reinstall=False,\n expand=False,\n renew_by_default=False,\n renew_with_new_domains=False,\n autorenew=True,\n allow_subset_of_names=False,\n tos=False,\n account=None,\n duplicate=False,\n os_packages_only=False,\n no_self_upgrade=False,\n no_bootstrap=False,\n quiet=False,\n staging=False,\n debug=False,\n debug_challenges=False,\n no_verify_ssl=False,\n tls_sni_01_port=challenges.TLSSNI01Response.PORT,\n tls_sni_01_address=\"\",\n http01_port=challenges.HTTP01Response.PORT,\n http01_address=\"\",\n break_my_certs=False,\n rsa_key_size=2048,\n must_staple=False,\n redirect=None,\n hsts=None,\n uir=None,\n staple=None,\n strict_permissions=False,\n pref_challs=[],\n validate_hooks=True,\n directory_hooks=True,\n reuse_key=False,\n disable_renew_updates=False,\n\n # Subparsers\n num=None,\n user_agent=None,\n user_agent_comment=None,\n csr=None,\n reason=0,\n delete_after_revoke=None,\n rollback_checkpoints=1,\n init=False,\n prepare=False,\n ifaces=None,\n\n # Path parsers\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n key_path=None,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n server=\"https://acme-v01.api.letsencrypt.org/directory\",\n\n # Plugins parsers\n configurator=None,\n authenticator=None,\n installer=None,\n apache=False,\n nginx=False,\n standalone=False,\n manual=False,\n webroot=False,\n dns_cloudflare=False,\n dns_cloudxns=False,\n dns_digitalocean=False,\n dns_dnsimple=False,\n dns_dnsmadeeasy=False,\n dns_google=False,\n dns_luadns=False,\n dns_nsone=False,\n dns_rfc2136=False,\n dns_route53=False\n\n)\nSTAGING_URI = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n\n# The set of reasons for revoking a certificate is defined in RFC 5280 in\n# section 5.3.1. The reasons that users are allowed to submit are restricted to\n# those accepted by the ACME server implementation. They are listed in\n# `letsencrypt.boulder.revocation.reasons.go`.\nREVOCATION_REASONS = {\n \"unspecified\": 0,\n \"keycompromise\": 1,\n \"affiliationchanged\": 3,\n \"superseded\": 4,\n \"cessationofoperation\": 5}\n\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\nQUIET_LOGGING_LEVEL = logging.WARNING\n\"\"\"Logging level to use in quiet mode.\"\"\"\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n # This value should ensure that there is never a deployment delay by\n # default.\n deploy_before_expiry=\"99 years\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"ensure-http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`certbot.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- ensure-http-header: name of header (i.e. Strict-Transport-Security)\n- ocsp-stapling: certificate chain file path\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nLE_REUSE_SERVERS = {\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n}\n\"\"\"Servers that can reuse accounts from other servers.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCSR_DIR = \"csr\"\n\"\"\"See `.IConfig.csr_dir`.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"renewal\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWAL_HOOKS_DIR = \"renewal-hooks\"\n\"\"\"Basename of directory containing hooks to run with the renew command.\"\"\"\n\nRENEWAL_PRE_HOOKS_DIR = \"pre\"\n\"\"\"Basename of directory containing pre-hooks to run with the renew command.\"\"\"\n\nRENEWAL_DEPLOY_HOOKS_DIR = \"deploy\"\n\"\"\"Basename of directory containing deploy-hooks to run with the renew command.\"\"\"\n\nRENEWAL_POST_HOOKS_DIR = \"post\"\n\"\"\"Basename of directory containing post-hooks to run with the renew command.\"\"\"\n\nFORCE_INTERACTIVE_FLAG = \"--force-interactive\"\n\"\"\"Flag to disable TTY checking in IDisplay.\"\"\"\n\nEFF_SUBSCRIBE_URI = \"https://supporters.eff.org/subscribe/certbot\"\n\"\"\"EFF URI used to submit the e-mail address of users who opt-in.\"\"\"\n\nSSL_DHPARAMS_DEST = \"ssl-dhparams.pem\"\n\"\"\"Name of the ssl_dhparams file as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_DHPARAMS_SRC = pkg_resources.resource_filename(\n \"certbot\", \"ssl-dhparams.pem\")\n\"\"\"Path to the nginx ssl_dhparams file found in the Certbot distribution.\"\"\"\n\nUPDATED_SSL_DHPARAMS_DIGEST = \".updated-ssl-dhparams-pem-digest.txt\"\n\"\"\"Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`.\"\"\"\n\nALL_SSL_DHPARAMS_HASHES = [\n '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',\n]\n\"\"\"SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC\"\"\"\n",
"path": "certbot/constants.py"
}
] | [
{
"content": "\"\"\"Certbot constants.\"\"\"\nimport logging\nimport os\nimport pkg_resources\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"certbot.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nOLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Plugins Setuptools entry point before rename.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n\n # Main parser\n verbose_count=-int(logging.INFO / 10),\n text_mode=False,\n max_log_backups=1000,\n noninteractive_mode=False,\n force_interactive=False,\n domains=[],\n certname=None,\n dry_run=False,\n register_unsafely_without_email=False,\n update_registration=False,\n email=None,\n eff_email=None,\n reinstall=False,\n expand=False,\n renew_by_default=False,\n renew_with_new_domains=False,\n autorenew=True,\n allow_subset_of_names=False,\n tos=False,\n account=None,\n duplicate=False,\n os_packages_only=False,\n no_self_upgrade=False,\n no_bootstrap=False,\n quiet=False,\n staging=False,\n debug=False,\n debug_challenges=False,\n no_verify_ssl=False,\n tls_sni_01_port=challenges.TLSSNI01Response.PORT,\n tls_sni_01_address=\"\",\n http01_port=challenges.HTTP01Response.PORT,\n http01_address=\"\",\n break_my_certs=False,\n rsa_key_size=2048,\n must_staple=False,\n redirect=None,\n hsts=None,\n uir=None,\n staple=None,\n strict_permissions=False,\n pref_challs=[],\n validate_hooks=True,\n directory_hooks=True,\n reuse_key=False,\n disable_renew_updates=False,\n\n # Subparsers\n num=None,\n user_agent=None,\n user_agent_comment=None,\n csr=None,\n reason=0,\n delete_after_revoke=None,\n rollback_checkpoints=1,\n init=False,\n prepare=False,\n ifaces=None,\n\n # Path parsers\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n key_path=None,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n server=\"https://acme-v01.api.letsencrypt.org/directory\",\n\n # Plugins parsers\n configurator=None,\n authenticator=None,\n installer=None,\n apache=False,\n nginx=False,\n standalone=False,\n manual=False,\n webroot=False,\n dns_cloudflare=False,\n dns_cloudxns=False,\n dns_digitalocean=False,\n dns_dnsimple=False,\n dns_dnsmadeeasy=False,\n dns_google=False,\n dns_luadns=False,\n dns_nsone=False,\n dns_rfc2136=False,\n dns_route53=False\n\n)\nSTAGING_URI = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n\n# The set of reasons for revoking a certificate is defined in RFC 5280 in\n# section 5.3.1. The reasons that users are allowed to submit are restricted to\n# those accepted by the ACME server implementation. They are listed in\n# `letsencrypt.boulder.revocation.reasons.go`.\nREVOCATION_REASONS = {\n \"unspecified\": 0,\n \"keycompromise\": 1,\n \"affiliationchanged\": 3,\n \"superseded\": 4,\n \"cessationofoperation\": 5}\n\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\nQUIET_LOGGING_LEVEL = logging.WARNING\n\"\"\"Logging level to use in quiet mode.\"\"\"\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n # This value should ensure that there is never a deployment delay by\n # default.\n deploy_before_expiry=\"99 years\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"ensure-http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`certbot.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- ensure-http-header: name of header (i.e. Strict-Transport-Security)\n- ocsp-stapling: certificate chain file path\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nLE_REUSE_SERVERS = {\n 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory',\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n}\n\"\"\"Servers that can reuse accounts from other servers.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCSR_DIR = \"csr\"\n\"\"\"See `.IConfig.csr_dir`.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"renewal\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWAL_HOOKS_DIR = \"renewal-hooks\"\n\"\"\"Basename of directory containing hooks to run with the renew command.\"\"\"\n\nRENEWAL_PRE_HOOKS_DIR = \"pre\"\n\"\"\"Basename of directory containing pre-hooks to run with the renew command.\"\"\"\n\nRENEWAL_DEPLOY_HOOKS_DIR = \"deploy\"\n\"\"\"Basename of directory containing deploy-hooks to run with the renew command.\"\"\"\n\nRENEWAL_POST_HOOKS_DIR = \"post\"\n\"\"\"Basename of directory containing post-hooks to run with the renew command.\"\"\"\n\nFORCE_INTERACTIVE_FLAG = \"--force-interactive\"\n\"\"\"Flag to disable TTY checking in IDisplay.\"\"\"\n\nEFF_SUBSCRIBE_URI = \"https://supporters.eff.org/subscribe/certbot\"\n\"\"\"EFF URI used to submit the e-mail address of users who opt-in.\"\"\"\n\nSSL_DHPARAMS_DEST = \"ssl-dhparams.pem\"\n\"\"\"Name of the ssl_dhparams file as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_DHPARAMS_SRC = pkg_resources.resource_filename(\n \"certbot\", \"ssl-dhparams.pem\")\n\"\"\"Path to the nginx ssl_dhparams file found in the Certbot distribution.\"\"\"\n\nUPDATED_SSL_DHPARAMS_DIGEST = \".updated-ssl-dhparams-pem-digest.txt\"\n\"\"\"Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`.\"\"\"\n\nALL_SSL_DHPARAMS_HASHES = [\n '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',\n]\n\"\"\"SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC\"\"\"\n",
"path": "certbot/constants.py"
}
] | diff --git a/certbot/constants.py b/certbot/constants.py
index 93bc269af63..e4ce0147d70 100644
--- a/certbot/constants.py
+++ b/certbot/constants.py
@@ -160,6 +160,7 @@
"""Directory where all accounts are saved."""
LE_REUSE_SERVERS = {
+ 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory',
'acme-staging-v02.api.letsencrypt.org/directory':
'acme-staging.api.letsencrypt.org/directory'
}
diff --git a/certbot/tests/account_test.py b/certbot/tests/account_test.py
index a8059fbcf5d..a4fe5edb78c 100644
--- a/certbot/tests/account_test.py
+++ b/certbot/tests/account_test.py
@@ -218,12 +218,18 @@ def test_find_all_server_downgrade(self):
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.assertEqual([], self.storage.find_all())
- def test_upgrade_version(self):
+ def test_upgrade_version_staging(self):
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertEqual([self.acc], self.storage.find_all())
+ def test_upgrade_version_production(self):
+ self._set_server('https://acme-v01.api.letsencrypt.org/directory')
+ self.storage.save(self.acc, self.mock_client)
+ self._set_server('https://acme-v02.api.letsencrypt.org/directory')
+ self.assertEqual([self.acc], self.storage.find_all())
+
@mock.patch('os.rmdir')
def test_corrupted_account(self, mock_rmdir):
# pylint: disable=protected-access
|
xonsh__xonsh-3623 | Include signature of functions in help (? in Python mode)
In xonsh:
```
$ from pathlib import Path
$ p = Path.cwd()
$ p.mkdir?
Type: method
String form: <bound method Path.mkdir of PosixPath('/home/pierre')>
File: /usr/lib/python3.8/pathlib.py
Docstring: Create a new directory at this given path.
<bound method Path.mkdir of PosixPath('/home/pierre')>
```
I tend to think the IPython output is better, in particular because it includes the signature:
```
$ ipython
Python 3.8.2 (default, Apr 29 2020, 17:54:06)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.13.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: from pathlib import Path
In [2]: p = Path.cwd()
In [3]: p.mkdir?
Signature: p.mkdir(mode=511, parents=False, exist_ok=False)
Docstring: Create a new directory at this given path.
File: ~/.pyenv/versions/3.8.2/lib/python3.8/pathlib.py
Type: method
```
Could the signature be also included in xonsh?
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"Tools for inspecting Python objects.\n\nThis file was forked from the IPython project:\n\n* Copyright (c) 2008-2014, IPython Development Team\n* Copyright (C) 2001-2007 Fernando Perez <[email protected]>\n* Copyright (c) 2001, Janko Hauser <[email protected]>\n* Copyright (c) 2001, Nathaniel Gray <[email protected]>\n\"\"\"\nimport os\nimport io\nimport sys\nimport types\nimport inspect\nimport itertools\nimport linecache\nimport collections\n\nfrom xonsh.lazyasd import LazyObject\nfrom xonsh.tokenize import detect_encoding\nfrom xonsh.openpy import read_py_file\nfrom xonsh.tools import cast_unicode, safe_hasattr, indent, print_color, format_color\nfrom xonsh.platform import HAS_PYGMENTS, PYTHON_VERSION_INFO\nfrom xonsh.lazyimps import pygments, pyghooks\nfrom xonsh.style_tools import partial_color_tokenize\n\n\n# builtin docstrings to ignore\n_func_call_docstring = LazyObject(\n lambda: types.FunctionType.__call__.__doc__, globals(), \"_func_call_docstring\"\n)\n_object_init_docstring = LazyObject(\n lambda: object.__init__.__doc__, globals(), \"_object_init_docstring\"\n)\n\n_builtin_type_docstrings = LazyObject(\n lambda: {\n inspect.getdoc(t)\n for t in (types.ModuleType, types.MethodType, types.FunctionType, property)\n },\n globals(),\n \"_builtin_type_docstrings\",\n)\n\n_builtin_func_type = LazyObject(lambda: type(all), globals(), \"_builtin_func_type\")\n# Bound methods have the same type as builtin functions\n_builtin_meth_type = LazyObject(\n lambda: type(str.upper), globals(), \"_builtin_meth_type\"\n)\n\ninfo_fields = LazyObject(\n lambda: [\n \"type_name\",\n \"base_class\",\n \"string_form\",\n \"namespace\",\n \"length\",\n \"file\",\n \"definition\",\n \"docstring\",\n \"source\",\n \"init_definition\",\n \"class_docstring\",\n \"init_docstring\",\n \"call_def\",\n \"call_docstring\",\n # These won't be printed but will be used to determine how to\n # format the object\n \"ismagic\",\n \"isalias\",\n \"isclass\",\n \"argspec\",\n \"found\",\n \"name\",\n ],\n globals(),\n \"info_fields\",\n)\n\n\ndef object_info(**kw):\n \"\"\"Make an object info dict with all fields present.\"\"\"\n infodict = dict(itertools.zip_longest(info_fields, [None]))\n infodict.update(kw)\n return infodict\n\n\ndef get_encoding(obj):\n \"\"\"Get encoding for python source file defining obj\n\n Returns None if obj is not defined in a sourcefile.\n \"\"\"\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile is None:\n return None\n elif ofile.endswith((\".so\", \".dll\", \".pyd\")):\n return None\n elif not os.path.isfile(ofile):\n return None\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n with io.open(ofile, \"rb\") as buf: # Tweaked to use io.open for Python 2\n encoding, _ = detect_encoding(buf.readline)\n return encoding\n\n\ndef getdoc(obj):\n \"\"\"Stable wrapper around inspect.getdoc.\n\n This can't crash because of attribute problems.\n\n It also attempts to call a getdoc() method on the given object. This\n allows objects which provide their docstrings via non-standard mechanisms\n (like Pyro proxies) to still be inspected by ipython's ? system.\"\"\"\n # Allow objects to offer customized documentation via a getdoc method:\n try:\n ds = obj.getdoc()\n except Exception: # pylint:disable=broad-except\n pass\n else:\n # if we get extra info, we add it to the normal docstring.\n if isinstance(ds, str):\n return inspect.cleandoc(ds)\n\n try:\n docstr = inspect.getdoc(obj)\n encoding = get_encoding(obj)\n return cast_unicode(docstr, encoding=encoding)\n except Exception: # pylint:disable=broad-except\n # Harden against an inspect failure, which can occur with\n # SWIG-wrapped extensions.\n raise\n\n\ndef getsource(obj, is_binary=False):\n \"\"\"Wrapper around inspect.getsource.\n\n This can be modified by other projects to provide customized source\n extraction.\n\n Inputs:\n\n - obj: an object whose source code we will attempt to extract.\n\n Optional inputs:\n\n - is_binary: whether the object is known to come from a binary source.\n This implementation will skip returning any output for binary objects,\n but custom extractors may know how to meaningfully process them.\"\"\"\n\n if is_binary:\n return None\n else:\n # get source if obj was decorated with @decorator\n if hasattr(obj, \"__wrapped__\"):\n obj = obj.__wrapped__\n try:\n src = inspect.getsource(obj)\n except TypeError:\n if hasattr(obj, \"__class__\"):\n src = inspect.getsource(obj.__class__)\n encoding = get_encoding(obj)\n return cast_unicode(src, encoding=encoding)\n\n\ndef is_simple_callable(obj):\n \"\"\"True if obj is a function ()\"\"\"\n return (\n inspect.isfunction(obj)\n or inspect.ismethod(obj)\n or isinstance(obj, _builtin_func_type)\n or isinstance(obj, _builtin_meth_type)\n )\n\n\ndef getargspec(obj):\n \"\"\"Wrapper around :func:`inspect.getfullargspec` on Python 3, and\n :func:inspect.getargspec` on Python 2.\n\n In addition to functions and methods, this can also handle objects with a\n ``__call__`` attribute.\n \"\"\"\n if safe_hasattr(obj, \"__call__\") and not is_simple_callable(obj):\n obj = obj.__call__\n\n return inspect.getfullargspec(obj)\n\n\ndef format_argspec(argspec):\n \"\"\"Format argspect, convenience wrapper around inspect's.\n\n This takes a dict instead of ordered arguments and calls\n inspect.format_argspec with the arguments in the necessary order.\n \"\"\"\n return inspect.formatargspec(\n argspec[\"args\"], argspec[\"varargs\"], argspec[\"varkw\"], argspec[\"defaults\"]\n )\n\n\ndef call_tip(oinfo, format_call=True):\n \"\"\"Extract call tip data from an oinfo dict.\n\n Parameters\n ----------\n oinfo : dict\n\n format_call : bool, optional\n If True, the call line is formatted and returned as a string. If not, a\n tuple of (name, argspec) is returned.\n\n Returns\n -------\n call_info : None, str or (str, dict) tuple.\n When format_call is True, the whole call information is formatted as a\n single string. Otherwise, the object's name and its argspec dict are\n returned. If no call information is available, None is returned.\n\n docstring : str or None\n The most relevant docstring for calling purposes is returned, if\n available. The priority is: call docstring for callable instances, then\n constructor docstring for classes, then main object's docstring otherwise\n (regular functions).\n \"\"\"\n # Get call definition\n argspec = oinfo.get(\"argspec\")\n if argspec is None:\n call_line = None\n else:\n # Callable objects will have 'self' as their first argument, prune\n # it out if it's there for clarity (since users do *not* pass an\n # extra first argument explicitly).\n try:\n has_self = argspec[\"args\"][0] == \"self\"\n except (KeyError, IndexError):\n pass\n else:\n if has_self:\n argspec[\"args\"] = argspec[\"args\"][1:]\n\n call_line = oinfo[\"name\"] + format_argspec(argspec)\n\n # Now get docstring.\n # The priority is: call docstring, constructor docstring, main one.\n doc = oinfo.get(\"call_docstring\")\n if doc is None:\n doc = oinfo.get(\"init_docstring\")\n if doc is None:\n doc = oinfo.get(\"docstring\", \"\")\n\n return call_line, doc\n\n\ndef find_file(obj):\n \"\"\"Find the absolute path to the file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getabsfile`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n fname : str\n The absolute path to the file where the object was defined.\n \"\"\"\n # get source if obj was decorated with @decorator\n if safe_hasattr(obj, \"__wrapped__\"):\n obj = obj.__wrapped__\n\n fname = None\n try:\n fname = inspect.getabsfile(obj)\n except TypeError:\n # For an instance, the file that matters is where its class was\n # declared.\n if hasattr(obj, \"__class__\"):\n try:\n fname = inspect.getabsfile(obj.__class__)\n except TypeError:\n # Can happen for builtins\n pass\n except: # pylint:disable=bare-except\n pass\n return cast_unicode(fname)\n\n\ndef find_source_lines(obj):\n \"\"\"Find the line number in a file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getsourcelines`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n lineno : int\n The line number where the object definition starts.\n \"\"\"\n # get source if obj was decorated with @decorator\n if safe_hasattr(obj, \"__wrapped__\"):\n obj = obj.__wrapped__\n\n try:\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except TypeError:\n # For instances, try the class object like getsource() does\n if hasattr(obj, \"__class__\"):\n lineno = inspect.getsourcelines(obj.__class__)[1]\n else:\n lineno = None\n except: # pylint:disable=bare-except\n return None\n\n return lineno\n\n\nif PYTHON_VERSION_INFO < (3, 5, 0):\n FrameInfo = collections.namedtuple(\n \"FrameInfo\",\n [\"frame\", \"filename\", \"lineno\", \"function\", \"code_context\", \"index\"],\n )\n\n def getouterframes(frame, context=1):\n \"\"\"Wrapper for getouterframes so that it acts like the Python v3.5 version.\"\"\"\n return [FrameInfo(*f) for f in inspect.getouterframes(frame, context=context)]\n\n\nelse:\n getouterframes = inspect.getouterframes\n\n\nclass Inspector(object):\n \"\"\"Inspects objects.\"\"\"\n\n def __init__(self, str_detail_level=0):\n self.str_detail_level = str_detail_level\n\n def _getdef(self, obj, oname=\"\"):\n \"\"\"Return the call signature for any callable object.\n\n If any exception is generated, None is returned instead and the\n exception is suppressed.\n \"\"\"\n try:\n hdef = oname + inspect.signature(*getargspec(obj))\n return cast_unicode(hdef)\n except: # pylint:disable=bare-except\n return None\n\n def noinfo(self, msg, oname):\n \"\"\"Generic message when no information is found.\"\"\"\n print(\"No %s found\" % msg, end=\" \")\n if oname:\n print(\"for %s\" % oname)\n else:\n print()\n\n def pdef(self, obj, oname=\"\"):\n \"\"\"Print the call signature for any callable object.\n\n If the object is a class, print the constructor information.\n \"\"\"\n\n if not callable(obj):\n print(\"Object is not callable.\")\n return\n\n header = \"\"\n\n if inspect.isclass(obj):\n header = self.__head(\"Class constructor information:\\n\")\n obj = obj.__init__\n\n output = self._getdef(obj, oname)\n if output is None:\n self.noinfo(\"definition header\", oname)\n else:\n print(header, output, end=\" \", file=sys.stdout)\n\n def pdoc(self, obj, oname=\"\"):\n \"\"\"Print the docstring for any object.\n\n Optional\n\n -formatter: a function to run the docstring through for specially\n formatted docstrings.\n \"\"\"\n\n head = self.__head # For convenience\n lines = []\n ds = getdoc(obj)\n if ds:\n lines.append(head(\"Class docstring:\"))\n lines.append(indent(ds))\n if inspect.isclass(obj) and hasattr(obj, \"__init__\"):\n init_ds = getdoc(obj.__init__)\n if init_ds is not None:\n lines.append(head(\"Init docstring:\"))\n lines.append(indent(init_ds))\n elif hasattr(obj, \"__call__\"):\n call_ds = getdoc(obj.__call__)\n if call_ds:\n lines.append(head(\"Call docstring:\"))\n lines.append(indent(call_ds))\n\n if not lines:\n self.noinfo(\"documentation\", oname)\n else:\n print(\"\\n\".join(lines))\n\n def psource(self, obj, oname=\"\"):\n \"\"\"Print the source code for an object.\"\"\"\n # Flush the source cache because inspect can return out-of-date source\n linecache.checkcache()\n try:\n src = getsource(obj)\n except: # pylint:disable=bare-except\n self.noinfo(\"source\", oname)\n else:\n print(src)\n\n def pfile(self, obj, oname=\"\"):\n \"\"\"Show the whole file where an object was defined.\"\"\"\n lineno = find_source_lines(obj)\n if lineno is None:\n self.noinfo(\"file\", oname)\n return\n\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile.endswith((\".so\", \".dll\", \".pyd\")):\n print(\"File %r is binary, not printing.\" % ofile)\n elif not os.path.isfile(ofile):\n print(\"File %r does not exist, not printing.\" % ofile)\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n o = read_py_file(ofile, skip_encoding_cookie=False)\n print(o, lineno - 1)\n\n def _format_fields_str(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display using color strings.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n out = []\n if title_width == 0:\n title_width = max(len(title) + 2 for title, _ in fields)\n for title, content in fields:\n title_len = len(title)\n title = \"{BOLD_RED}\" + title + \":{NO_COLOR}\"\n if len(content.splitlines()) > 1:\n title += \"\\n\"\n else:\n title += \" \".ljust(title_width - title_len)\n out.append(cast_unicode(title) + cast_unicode(content))\n return format_color(\"\\n\".join(out) + \"\\n\")\n\n def _format_fields_tokens(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display using color tokens from\n pygments.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n out = []\n if title_width == 0:\n title_width = max(len(title) + 2 for title, _ in fields)\n for title, content in fields:\n title_len = len(title)\n title = \"{BOLD_RED}\" + title + \":{NO_COLOR}\"\n if not isinstance(content, str) or len(content.splitlines()) > 1:\n title += \"\\n\"\n else:\n title += \" \".ljust(title_width - title_len)\n out += partial_color_tokenize(title)\n if isinstance(content, str):\n out[-1] = (out[-1][0], out[-1][1] + content + \"\\n\")\n else:\n out += content\n out[-1] = (out[-1][0], out[-1][1] + \"\\n\")\n out[-1] = (out[-1][0], out[-1][1] + \"\\n\")\n return out\n\n def _format_fields(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display using color tokens from\n pygments.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n if HAS_PYGMENTS:\n rtn = self._format_fields_tokens(fields, title_width=title_width)\n else:\n rtn = self._format_fields_str(fields, title_width=title_width)\n return rtn\n\n # The fields to be displayed by pinfo: (fancy_name, key_in_info_dict)\n pinfo_fields1 = [(\"Type\", \"type_name\")]\n\n pinfo_fields2 = [(\"String form\", \"string_form\")]\n\n pinfo_fields3 = [\n (\"Length\", \"length\"),\n (\"File\", \"file\"),\n (\"Definition\", \"definition\"),\n ]\n\n pinfo_fields_obj = [\n (\"Class docstring\", \"class_docstring\"),\n (\"Init docstring\", \"init_docstring\"),\n (\"Call def\", \"call_def\"),\n (\"Call docstring\", \"call_docstring\"),\n ]\n\n def pinfo(self, obj, oname=\"\", info=None, detail_level=0):\n \"\"\"Show detailed information about an object.\n\n Parameters\n ----------\n obj : object\n oname : str, optional\n name of the variable pointing to the object.\n info : dict, optional\n a structure with some information fields which may have been\n precomputed already.\n detail_level : int, optional\n if set to 1, more information is given.\n \"\"\"\n info = self.info(obj, oname=oname, info=info, detail_level=detail_level)\n displayfields = []\n\n def add_fields(fields):\n for title, key in fields:\n field = info[key]\n if field is not None:\n displayfields.append((title, field.rstrip()))\n\n add_fields(self.pinfo_fields1)\n add_fields(self.pinfo_fields2)\n\n # Namespace\n if info[\"namespace\"] is not None and info[\"namespace\"] != \"Interactive\":\n displayfields.append((\"Namespace\", info[\"namespace\"].rstrip()))\n\n add_fields(self.pinfo_fields3)\n if info[\"isclass\"] and info[\"init_definition\"]:\n displayfields.append((\"Init definition\", info[\"init_definition\"].rstrip()))\n\n # Source or docstring, depending on detail level and whether\n # source found.\n if detail_level > 0 and info[\"source\"] is not None:\n displayfields.append((\"Source\", cast_unicode(info[\"source\"])))\n elif info[\"docstring\"] is not None:\n displayfields.append((\"Docstring\", info[\"docstring\"]))\n\n # Constructor info for classes\n if info[\"isclass\"]:\n if info[\"init_docstring\"] is not None:\n displayfields.append((\"Init docstring\", info[\"init_docstring\"]))\n\n # Info for objects:\n else:\n add_fields(self.pinfo_fields_obj)\n\n # Finally send to printer/pager:\n if displayfields:\n print_color(self._format_fields(displayfields))\n\n def info(self, obj, oname=\"\", info=None, detail_level=0):\n \"\"\"Compute a dict with detailed information about an object.\n\n Optional arguments:\n\n - oname: name of the variable pointing to the object.\n\n - info: a structure with some information fields which may have been\n precomputed already.\n\n - detail_level: if set to 1, more information is given.\n \"\"\"\n obj_type = type(obj)\n if info is None:\n ismagic = 0\n isalias = 0\n ospace = \"\"\n else:\n ismagic = info.ismagic\n isalias = info.isalias\n ospace = info.namespace\n # Get docstring, special-casing aliases:\n if isalias:\n if not callable(obj):\n if len(obj) >= 2 and isinstance(obj[1], str):\n ds = \"Alias to the system command:\\n {0}\".format(obj[1])\n else: # pylint:disable=bare-except\n ds = \"Alias: \" + str(obj)\n else:\n ds = \"Alias to \" + str(obj)\n if obj.__doc__:\n ds += \"\\nDocstring:\\n\" + obj.__doc__\n else:\n ds = getdoc(obj)\n if ds is None:\n ds = \"<no docstring>\"\n\n # store output in a dict, we initialize it here and fill it as we go\n out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)\n\n string_max = 200 # max size of strings to show (snipped if longer)\n shalf = int((string_max - 5) / 2)\n\n if ismagic:\n obj_type_name = \"Magic function\"\n elif isalias:\n obj_type_name = \"System alias\"\n else:\n obj_type_name = obj_type.__name__\n out[\"type_name\"] = obj_type_name\n\n try:\n bclass = obj.__class__\n out[\"base_class\"] = str(bclass)\n except: # pylint:disable=bare-except\n pass\n\n # String form, but snip if too long in ? form (full in ??)\n if detail_level >= self.str_detail_level:\n try:\n ostr = str(obj)\n str_head = \"string_form\"\n if not detail_level and len(ostr) > string_max:\n ostr = ostr[:shalf] + \" <...> \" + ostr[-shalf:]\n ostr = (\"\\n\" + \" \" * len(str_head.expandtabs())).join(\n q.strip() for q in ostr.split(\"\\n\")\n )\n out[str_head] = ostr\n except: # pylint:disable=bare-except\n pass\n\n if ospace:\n out[\"namespace\"] = ospace\n\n # Length (for strings and lists)\n try:\n out[\"length\"] = str(len(obj))\n except: # pylint:disable=bare-except\n pass\n\n # Filename where object was defined\n binary_file = False\n fname = find_file(obj)\n if fname is None:\n # if anything goes wrong, we don't want to show source, so it's as\n # if the file was binary\n binary_file = True\n else:\n if fname.endswith((\".so\", \".dll\", \".pyd\")):\n binary_file = True\n elif fname.endswith(\"<string>\"):\n fname = \"Dynamically generated function. \" \"No source code available.\"\n out[\"file\"] = fname\n\n # Docstrings only in detail 0 mode, since source contains them (we\n # avoid repetitions). If source fails, we add them back, see below.\n if ds and detail_level == 0:\n out[\"docstring\"] = ds\n\n # Original source code for any callable\n if detail_level:\n # Flush the source cache because inspect can return out-of-date\n # source\n linecache.checkcache()\n source = None\n try:\n try:\n source = getsource(obj, binary_file)\n except TypeError:\n if hasattr(obj, \"__class__\"):\n source = getsource(obj.__class__, binary_file)\n if source is not None:\n source = source.rstrip()\n if HAS_PYGMENTS:\n lexer = pyghooks.XonshLexer()\n source = list(pygments.lex(source, lexer=lexer))\n out[\"source\"] = source\n except Exception: # pylint:disable=broad-except\n pass\n\n if ds and source is None:\n out[\"docstring\"] = ds\n\n # Constructor docstring for classes\n if inspect.isclass(obj):\n out[\"isclass\"] = True\n # reconstruct the function definition and print it:\n try:\n obj_init = obj.__init__\n except AttributeError:\n init_def = init_ds = None\n else:\n init_def = self._getdef(obj_init, oname)\n init_ds = getdoc(obj_init)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n\n if init_def or init_ds:\n if init_def:\n out[\"init_definition\"] = init_def\n if init_ds:\n out[\"init_docstring\"] = init_ds\n\n # and class docstring for instances:\n else:\n # reconstruct the function definition and print it:\n defln = self._getdef(obj, oname)\n if defln:\n out[\"definition\"] = defln\n\n # First, check whether the instance docstring is identical to the\n # class one, and print it separately if they don't coincide. In\n # most cases they will, but it's nice to print all the info for\n # objects which use instance-customized docstrings.\n if ds:\n try:\n cls = getattr(obj, \"__class__\")\n except: # pylint:disable=bare-except\n class_ds = None\n else:\n class_ds = getdoc(cls)\n # Skip Python's auto-generated docstrings\n if class_ds in _builtin_type_docstrings:\n class_ds = None\n if class_ds and ds != class_ds:\n out[\"class_docstring\"] = class_ds\n\n # Next, try to show constructor docstrings\n try:\n init_ds = getdoc(obj.__init__)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n except AttributeError:\n init_ds = None\n if init_ds:\n out[\"init_docstring\"] = init_ds\n\n # Call form docstring for callable instances\n if safe_hasattr(obj, \"__call__\") and not is_simple_callable(obj):\n call_def = self._getdef(obj.__call__, oname)\n if call_def:\n call_def = call_def\n # it may never be the case that call def and definition\n # differ, but don't include the same signature twice\n if call_def != out.get(\"definition\"):\n out[\"call_def\"] = call_def\n call_ds = getdoc(obj.__call__)\n # Skip Python's auto-generated docstrings\n if call_ds == _func_call_docstring:\n call_ds = None\n if call_ds:\n out[\"call_docstring\"] = call_ds\n\n # Compute the object's argspec as a callable. The key is to decide\n # whether to pull it from the object itself, from its __init__ or\n # from its __call__ method.\n\n if inspect.isclass(obj):\n # Old-style classes need not have an __init__\n callable_obj = getattr(obj, \"__init__\", None)\n elif callable(obj):\n callable_obj = obj\n else:\n callable_obj = None\n\n if callable_obj:\n try:\n argspec = getargspec(callable_obj)\n except (TypeError, AttributeError):\n # For extensions/builtins we can't retrieve the argspec\n pass\n else:\n # named tuples' _asdict() method returns an OrderedDict, but we\n # we want a normal\n out[\"argspec\"] = argspec_dict = dict(argspec._asdict())\n # We called this varkw before argspec became a named tuple.\n # With getfullargspec it's also called varkw.\n if \"varkw\" not in argspec_dict:\n argspec_dict[\"varkw\"] = argspec_dict.pop(\"keywords\")\n\n return object_info(**out)\n",
"path": "xonsh/inspectors.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"Tools for inspecting Python objects.\n\nThis file was forked from the IPython project:\n\n* Copyright (c) 2008-2014, IPython Development Team\n* Copyright (C) 2001-2007 Fernando Perez <[email protected]>\n* Copyright (c) 2001, Janko Hauser <[email protected]>\n* Copyright (c) 2001, Nathaniel Gray <[email protected]>\n\"\"\"\nimport os\nimport io\nimport sys\nimport types\nimport inspect\nimport itertools\nimport linecache\nimport collections\n\nfrom xonsh.lazyasd import LazyObject\nfrom xonsh.tokenize import detect_encoding\nfrom xonsh.openpy import read_py_file\nfrom xonsh.tools import cast_unicode, safe_hasattr, indent, print_color, format_color\nfrom xonsh.platform import HAS_PYGMENTS, PYTHON_VERSION_INFO\nfrom xonsh.lazyimps import pygments, pyghooks\nfrom xonsh.style_tools import partial_color_tokenize\n\n\n# builtin docstrings to ignore\n_func_call_docstring = LazyObject(\n lambda: types.FunctionType.__call__.__doc__, globals(), \"_func_call_docstring\"\n)\n_object_init_docstring = LazyObject(\n lambda: object.__init__.__doc__, globals(), \"_object_init_docstring\"\n)\n\n_builtin_type_docstrings = LazyObject(\n lambda: {\n inspect.getdoc(t)\n for t in (types.ModuleType, types.MethodType, types.FunctionType, property)\n },\n globals(),\n \"_builtin_type_docstrings\",\n)\n\n_builtin_func_type = LazyObject(lambda: type(all), globals(), \"_builtin_func_type\")\n# Bound methods have the same type as builtin functions\n_builtin_meth_type = LazyObject(\n lambda: type(str.upper), globals(), \"_builtin_meth_type\"\n)\n\ninfo_fields = LazyObject(\n lambda: [\n \"type_name\",\n \"base_class\",\n \"string_form\",\n \"namespace\",\n \"length\",\n \"file\",\n \"definition\",\n \"docstring\",\n \"source\",\n \"init_definition\",\n \"class_docstring\",\n \"init_docstring\",\n \"call_def\",\n \"call_docstring\",\n # These won't be printed but will be used to determine how to\n # format the object\n \"ismagic\",\n \"isalias\",\n \"isclass\",\n \"argspec\",\n \"found\",\n \"name\",\n ],\n globals(),\n \"info_fields\",\n)\n\n\ndef object_info(**kw):\n \"\"\"Make an object info dict with all fields present.\"\"\"\n infodict = dict(itertools.zip_longest(info_fields, [None]))\n infodict.update(kw)\n return infodict\n\n\ndef get_encoding(obj):\n \"\"\"Get encoding for python source file defining obj\n\n Returns None if obj is not defined in a sourcefile.\n \"\"\"\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile is None:\n return None\n elif ofile.endswith((\".so\", \".dll\", \".pyd\")):\n return None\n elif not os.path.isfile(ofile):\n return None\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n with io.open(ofile, \"rb\") as buf: # Tweaked to use io.open for Python 2\n encoding, _ = detect_encoding(buf.readline)\n return encoding\n\n\ndef getdoc(obj):\n \"\"\"Stable wrapper around inspect.getdoc.\n\n This can't crash because of attribute problems.\n\n It also attempts to call a getdoc() method on the given object. This\n allows objects which provide their docstrings via non-standard mechanisms\n (like Pyro proxies) to still be inspected by ipython's ? system.\"\"\"\n # Allow objects to offer customized documentation via a getdoc method:\n try:\n ds = obj.getdoc()\n except Exception: # pylint:disable=broad-except\n pass\n else:\n # if we get extra info, we add it to the normal docstring.\n if isinstance(ds, str):\n return inspect.cleandoc(ds)\n\n try:\n docstr = inspect.getdoc(obj)\n encoding = get_encoding(obj)\n return cast_unicode(docstr, encoding=encoding)\n except Exception: # pylint:disable=broad-except\n # Harden against an inspect failure, which can occur with\n # SWIG-wrapped extensions.\n raise\n\n\ndef getsource(obj, is_binary=False):\n \"\"\"Wrapper around inspect.getsource.\n\n This can be modified by other projects to provide customized source\n extraction.\n\n Inputs:\n\n - obj: an object whose source code we will attempt to extract.\n\n Optional inputs:\n\n - is_binary: whether the object is known to come from a binary source.\n This implementation will skip returning any output for binary objects,\n but custom extractors may know how to meaningfully process them.\"\"\"\n\n if is_binary:\n return None\n else:\n # get source if obj was decorated with @decorator\n if hasattr(obj, \"__wrapped__\"):\n obj = obj.__wrapped__\n try:\n src = inspect.getsource(obj)\n except TypeError:\n if hasattr(obj, \"__class__\"):\n src = inspect.getsource(obj.__class__)\n encoding = get_encoding(obj)\n return cast_unicode(src, encoding=encoding)\n\n\ndef is_simple_callable(obj):\n \"\"\"True if obj is a function ()\"\"\"\n return (\n inspect.isfunction(obj)\n or inspect.ismethod(obj)\n or isinstance(obj, _builtin_func_type)\n or isinstance(obj, _builtin_meth_type)\n )\n\n\ndef getargspec(obj):\n \"\"\"Wrapper around :func:`inspect.getfullargspec` on Python 3, and\n :func:inspect.getargspec` on Python 2.\n\n In addition to functions and methods, this can also handle objects with a\n ``__call__`` attribute.\n \"\"\"\n if safe_hasattr(obj, \"__call__\") and not is_simple_callable(obj):\n obj = obj.__call__\n\n return inspect.getfullargspec(obj)\n\n\ndef format_argspec(argspec):\n \"\"\"Format argspect, convenience wrapper around inspect's.\n\n This takes a dict instead of ordered arguments and calls\n inspect.format_argspec with the arguments in the necessary order.\n \"\"\"\n return inspect.formatargspec(\n argspec[\"args\"], argspec[\"varargs\"], argspec[\"varkw\"], argspec[\"defaults\"]\n )\n\n\ndef call_tip(oinfo, format_call=True):\n \"\"\"Extract call tip data from an oinfo dict.\n\n Parameters\n ----------\n oinfo : dict\n\n format_call : bool, optional\n If True, the call line is formatted and returned as a string. If not, a\n tuple of (name, argspec) is returned.\n\n Returns\n -------\n call_info : None, str or (str, dict) tuple.\n When format_call is True, the whole call information is formatted as a\n single string. Otherwise, the object's name and its argspec dict are\n returned. If no call information is available, None is returned.\n\n docstring : str or None\n The most relevant docstring for calling purposes is returned, if\n available. The priority is: call docstring for callable instances, then\n constructor docstring for classes, then main object's docstring otherwise\n (regular functions).\n \"\"\"\n # Get call definition\n argspec = oinfo.get(\"argspec\")\n if argspec is None:\n call_line = None\n else:\n # Callable objects will have 'self' as their first argument, prune\n # it out if it's there for clarity (since users do *not* pass an\n # extra first argument explicitly).\n try:\n has_self = argspec[\"args\"][0] == \"self\"\n except (KeyError, IndexError):\n pass\n else:\n if has_self:\n argspec[\"args\"] = argspec[\"args\"][1:]\n\n call_line = oinfo[\"name\"] + format_argspec(argspec)\n\n # Now get docstring.\n # The priority is: call docstring, constructor docstring, main one.\n doc = oinfo.get(\"call_docstring\")\n if doc is None:\n doc = oinfo.get(\"init_docstring\")\n if doc is None:\n doc = oinfo.get(\"docstring\", \"\")\n\n return call_line, doc\n\n\ndef find_file(obj):\n \"\"\"Find the absolute path to the file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getabsfile`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n fname : str\n The absolute path to the file where the object was defined.\n \"\"\"\n # get source if obj was decorated with @decorator\n if safe_hasattr(obj, \"__wrapped__\"):\n obj = obj.__wrapped__\n\n fname = None\n try:\n fname = inspect.getabsfile(obj)\n except TypeError:\n # For an instance, the file that matters is where its class was\n # declared.\n if hasattr(obj, \"__class__\"):\n try:\n fname = inspect.getabsfile(obj.__class__)\n except TypeError:\n # Can happen for builtins\n pass\n except: # pylint:disable=bare-except\n pass\n return cast_unicode(fname)\n\n\ndef find_source_lines(obj):\n \"\"\"Find the line number in a file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getsourcelines`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n lineno : int\n The line number where the object definition starts.\n \"\"\"\n # get source if obj was decorated with @decorator\n if safe_hasattr(obj, \"__wrapped__\"):\n obj = obj.__wrapped__\n\n try:\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except TypeError:\n # For instances, try the class object like getsource() does\n if hasattr(obj, \"__class__\"):\n lineno = inspect.getsourcelines(obj.__class__)[1]\n else:\n lineno = None\n except: # pylint:disable=bare-except\n return None\n\n return lineno\n\n\nif PYTHON_VERSION_INFO < (3, 5, 0):\n FrameInfo = collections.namedtuple(\n \"FrameInfo\",\n [\"frame\", \"filename\", \"lineno\", \"function\", \"code_context\", \"index\"],\n )\n\n def getouterframes(frame, context=1):\n \"\"\"Wrapper for getouterframes so that it acts like the Python v3.5 version.\"\"\"\n return [FrameInfo(*f) for f in inspect.getouterframes(frame, context=context)]\n\n\nelse:\n getouterframes = inspect.getouterframes\n\n\nclass Inspector(object):\n \"\"\"Inspects objects.\"\"\"\n\n def __init__(self, str_detail_level=0):\n self.str_detail_level = str_detail_level\n\n def _getdef(self, obj, oname=\"\"):\n \"\"\"Return the call signature for any callable object.\n\n If any exception is generated, None is returned instead and the\n exception is suppressed.\n \"\"\"\n try:\n hdef = oname + str(inspect.signature(obj))\n return cast_unicode(hdef)\n except: # pylint:disable=bare-except\n return None\n\n def noinfo(self, msg, oname):\n \"\"\"Generic message when no information is found.\"\"\"\n print(\"No %s found\" % msg, end=\" \")\n if oname:\n print(\"for %s\" % oname)\n else:\n print()\n\n def pdef(self, obj, oname=\"\"):\n \"\"\"Print the call signature for any callable object.\n\n If the object is a class, print the constructor information.\n \"\"\"\n\n if not callable(obj):\n print(\"Object is not callable.\")\n return\n\n header = \"\"\n\n if inspect.isclass(obj):\n header = self.__head(\"Class constructor information:\\n\")\n obj = obj.__init__\n\n output = self._getdef(obj, oname)\n if output is None:\n self.noinfo(\"definition header\", oname)\n else:\n print(header, output, end=\" \", file=sys.stdout)\n\n def pdoc(self, obj, oname=\"\"):\n \"\"\"Print the docstring for any object.\n\n Optional\n\n -formatter: a function to run the docstring through for specially\n formatted docstrings.\n \"\"\"\n\n head = self.__head # For convenience\n lines = []\n ds = getdoc(obj)\n if ds:\n lines.append(head(\"Class docstring:\"))\n lines.append(indent(ds))\n if inspect.isclass(obj) and hasattr(obj, \"__init__\"):\n init_ds = getdoc(obj.__init__)\n if init_ds is not None:\n lines.append(head(\"Init docstring:\"))\n lines.append(indent(init_ds))\n elif hasattr(obj, \"__call__\"):\n call_ds = getdoc(obj.__call__)\n if call_ds:\n lines.append(head(\"Call docstring:\"))\n lines.append(indent(call_ds))\n\n if not lines:\n self.noinfo(\"documentation\", oname)\n else:\n print(\"\\n\".join(lines))\n\n def psource(self, obj, oname=\"\"):\n \"\"\"Print the source code for an object.\"\"\"\n # Flush the source cache because inspect can return out-of-date source\n linecache.checkcache()\n try:\n src = getsource(obj)\n except: # pylint:disable=bare-except\n self.noinfo(\"source\", oname)\n else:\n print(src)\n\n def pfile(self, obj, oname=\"\"):\n \"\"\"Show the whole file where an object was defined.\"\"\"\n lineno = find_source_lines(obj)\n if lineno is None:\n self.noinfo(\"file\", oname)\n return\n\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile.endswith((\".so\", \".dll\", \".pyd\")):\n print(\"File %r is binary, not printing.\" % ofile)\n elif not os.path.isfile(ofile):\n print(\"File %r does not exist, not printing.\" % ofile)\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n o = read_py_file(ofile, skip_encoding_cookie=False)\n print(o, lineno - 1)\n\n def _format_fields_str(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display using color strings.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n out = []\n if title_width == 0:\n title_width = max(len(title) + 2 for title, _ in fields)\n for title, content in fields:\n title_len = len(title)\n title = \"{BOLD_RED}\" + title + \":{NO_COLOR}\"\n if len(content.splitlines()) > 1:\n title += \"\\n\"\n else:\n title += \" \".ljust(title_width - title_len)\n out.append(cast_unicode(title) + cast_unicode(content))\n return format_color(\"\\n\".join(out) + \"\\n\")\n\n def _format_fields_tokens(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display using color tokens from\n pygments.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n out = []\n if title_width == 0:\n title_width = max(len(title) + 2 for title, _ in fields)\n for title, content in fields:\n title_len = len(title)\n title = \"{BOLD_RED}\" + title + \":{NO_COLOR}\"\n if not isinstance(content, str) or len(content.splitlines()) > 1:\n title += \"\\n\"\n else:\n title += \" \".ljust(title_width - title_len)\n out += partial_color_tokenize(title)\n if isinstance(content, str):\n out[-1] = (out[-1][0], out[-1][1] + content + \"\\n\")\n else:\n out += content\n out[-1] = (out[-1][0], out[-1][1] + \"\\n\")\n out[-1] = (out[-1][0], out[-1][1] + \"\\n\")\n return out\n\n def _format_fields(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display using color tokens from\n pygments.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n if HAS_PYGMENTS:\n rtn = self._format_fields_tokens(fields, title_width=title_width)\n else:\n rtn = self._format_fields_str(fields, title_width=title_width)\n return rtn\n\n # The fields to be displayed by pinfo: (fancy_name, key_in_info_dict)\n pinfo_fields1 = [(\"Type\", \"type_name\")]\n\n pinfo_fields2 = [(\"String form\", \"string_form\")]\n\n pinfo_fields3 = [\n (\"Length\", \"length\"),\n (\"File\", \"file\"),\n (\"Definition\", \"definition\"),\n ]\n\n pinfo_fields_obj = [\n (\"Class docstring\", \"class_docstring\"),\n (\"Init docstring\", \"init_docstring\"),\n (\"Call def\", \"call_def\"),\n (\"Call docstring\", \"call_docstring\"),\n ]\n\n def pinfo(self, obj, oname=\"\", info=None, detail_level=0):\n \"\"\"Show detailed information about an object.\n\n Parameters\n ----------\n obj : object\n oname : str, optional\n name of the variable pointing to the object.\n info : dict, optional\n a structure with some information fields which may have been\n precomputed already.\n detail_level : int, optional\n if set to 1, more information is given.\n \"\"\"\n info = self.info(obj, oname=oname, info=info, detail_level=detail_level)\n displayfields = []\n\n def add_fields(fields):\n for title, key in fields:\n field = info[key]\n if field is not None:\n displayfields.append((title, field.rstrip()))\n\n add_fields(self.pinfo_fields1)\n add_fields(self.pinfo_fields2)\n\n # Namespace\n if info[\"namespace\"] is not None and info[\"namespace\"] != \"Interactive\":\n displayfields.append((\"Namespace\", info[\"namespace\"].rstrip()))\n\n add_fields(self.pinfo_fields3)\n if info[\"isclass\"] and info[\"init_definition\"]:\n displayfields.append((\"Init definition\", info[\"init_definition\"].rstrip()))\n\n # Source or docstring, depending on detail level and whether\n # source found.\n if detail_level > 0 and info[\"source\"] is not None:\n displayfields.append((\"Source\", cast_unicode(info[\"source\"])))\n elif info[\"docstring\"] is not None:\n displayfields.append((\"Docstring\", info[\"docstring\"]))\n\n # Constructor info for classes\n if info[\"isclass\"]:\n if info[\"init_docstring\"] is not None:\n displayfields.append((\"Init docstring\", info[\"init_docstring\"]))\n\n # Info for objects:\n else:\n add_fields(self.pinfo_fields_obj)\n\n # Finally send to printer/pager:\n if displayfields:\n print_color(self._format_fields(displayfields))\n\n def info(self, obj, oname=\"\", info=None, detail_level=0):\n \"\"\"Compute a dict with detailed information about an object.\n\n Optional arguments:\n\n - oname: name of the variable pointing to the object.\n\n - info: a structure with some information fields which may have been\n precomputed already.\n\n - detail_level: if set to 1, more information is given.\n \"\"\"\n obj_type = type(obj)\n if info is None:\n ismagic = 0\n isalias = 0\n ospace = \"\"\n else:\n ismagic = info.ismagic\n isalias = info.isalias\n ospace = info.namespace\n # Get docstring, special-casing aliases:\n if isalias:\n if not callable(obj):\n if len(obj) >= 2 and isinstance(obj[1], str):\n ds = \"Alias to the system command:\\n {0}\".format(obj[1])\n else: # pylint:disable=bare-except\n ds = \"Alias: \" + str(obj)\n else:\n ds = \"Alias to \" + str(obj)\n if obj.__doc__:\n ds += \"\\nDocstring:\\n\" + obj.__doc__\n else:\n ds = getdoc(obj)\n if ds is None:\n ds = \"<no docstring>\"\n\n # store output in a dict, we initialize it here and fill it as we go\n out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)\n\n string_max = 200 # max size of strings to show (snipped if longer)\n shalf = int((string_max - 5) / 2)\n\n if ismagic:\n obj_type_name = \"Magic function\"\n elif isalias:\n obj_type_name = \"System alias\"\n else:\n obj_type_name = obj_type.__name__\n out[\"type_name\"] = obj_type_name\n\n try:\n bclass = obj.__class__\n out[\"base_class\"] = str(bclass)\n except: # pylint:disable=bare-except\n pass\n\n # String form, but snip if too long in ? form (full in ??)\n if detail_level >= self.str_detail_level:\n try:\n ostr = str(obj)\n str_head = \"string_form\"\n if not detail_level and len(ostr) > string_max:\n ostr = ostr[:shalf] + \" <...> \" + ostr[-shalf:]\n ostr = (\"\\n\" + \" \" * len(str_head.expandtabs())).join(\n q.strip() for q in ostr.split(\"\\n\")\n )\n out[str_head] = ostr\n except: # pylint:disable=bare-except\n pass\n\n if ospace:\n out[\"namespace\"] = ospace\n\n # Length (for strings and lists)\n try:\n out[\"length\"] = str(len(obj))\n except: # pylint:disable=bare-except\n pass\n\n # Filename where object was defined\n binary_file = False\n fname = find_file(obj)\n if fname is None:\n # if anything goes wrong, we don't want to show source, so it's as\n # if the file was binary\n binary_file = True\n else:\n if fname.endswith((\".so\", \".dll\", \".pyd\")):\n binary_file = True\n elif fname.endswith(\"<string>\"):\n fname = \"Dynamically generated function. \" \"No source code available.\"\n out[\"file\"] = fname\n\n # Docstrings only in detail 0 mode, since source contains them (we\n # avoid repetitions). If source fails, we add them back, see below.\n if ds and detail_level == 0:\n out[\"docstring\"] = ds\n\n # Original source code for any callable\n if detail_level:\n # Flush the source cache because inspect can return out-of-date\n # source\n linecache.checkcache()\n source = None\n try:\n try:\n source = getsource(obj, binary_file)\n except TypeError:\n if hasattr(obj, \"__class__\"):\n source = getsource(obj.__class__, binary_file)\n if source is not None:\n source = source.rstrip()\n if HAS_PYGMENTS:\n lexer = pyghooks.XonshLexer()\n source = list(pygments.lex(source, lexer=lexer))\n out[\"source\"] = source\n except Exception: # pylint:disable=broad-except\n pass\n\n if ds and source is None:\n out[\"docstring\"] = ds\n\n # Constructor docstring for classes\n if inspect.isclass(obj):\n out[\"isclass\"] = True\n # reconstruct the function definition and print it:\n try:\n obj_init = obj.__init__\n except AttributeError:\n init_def = init_ds = None\n else:\n init_def = self._getdef(obj_init, oname)\n init_ds = getdoc(obj_init)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n\n if init_def or init_ds:\n if init_def:\n out[\"init_definition\"] = init_def\n if init_ds:\n out[\"init_docstring\"] = init_ds\n\n # and class docstring for instances:\n else:\n # reconstruct the function definition and print it:\n defln = self._getdef(obj, oname)\n if defln:\n out[\"definition\"] = defln\n\n # First, check whether the instance docstring is identical to the\n # class one, and print it separately if they don't coincide. In\n # most cases they will, but it's nice to print all the info for\n # objects which use instance-customized docstrings.\n if ds:\n try:\n cls = getattr(obj, \"__class__\")\n except: # pylint:disable=bare-except\n class_ds = None\n else:\n class_ds = getdoc(cls)\n # Skip Python's auto-generated docstrings\n if class_ds in _builtin_type_docstrings:\n class_ds = None\n if class_ds and ds != class_ds:\n out[\"class_docstring\"] = class_ds\n\n # Next, try to show constructor docstrings\n try:\n init_ds = getdoc(obj.__init__)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n except AttributeError:\n init_ds = None\n if init_ds:\n out[\"init_docstring\"] = init_ds\n\n # Call form docstring for callable instances\n if safe_hasattr(obj, \"__call__\") and not is_simple_callable(obj):\n call_def = self._getdef(obj.__call__, oname)\n if call_def:\n call_def = call_def\n # it may never be the case that call def and definition\n # differ, but don't include the same signature twice\n if call_def != out.get(\"definition\"):\n out[\"call_def\"] = call_def\n call_ds = getdoc(obj.__call__)\n # Skip Python's auto-generated docstrings\n if call_ds == _func_call_docstring:\n call_ds = None\n if call_ds:\n out[\"call_docstring\"] = call_ds\n\n # Compute the object's argspec as a callable. The key is to decide\n # whether to pull it from the object itself, from its __init__ or\n # from its __call__ method.\n\n if inspect.isclass(obj):\n # Old-style classes need not have an __init__\n callable_obj = getattr(obj, \"__init__\", None)\n elif callable(obj):\n callable_obj = obj\n else:\n callable_obj = None\n\n if callable_obj:\n try:\n argspec = getargspec(callable_obj)\n except (TypeError, AttributeError):\n # For extensions/builtins we can't retrieve the argspec\n pass\n else:\n # named tuples' _asdict() method returns an OrderedDict, but we\n # we want a normal\n out[\"argspec\"] = argspec_dict = dict(argspec._asdict())\n # We called this varkw before argspec became a named tuple.\n # With getfullargspec it's also called varkw.\n if \"varkw\" not in argspec_dict:\n argspec_dict[\"varkw\"] = argspec_dict.pop(\"keywords\")\n\n return object_info(**out)\n",
"path": "xonsh/inspectors.py"
}
] | diff --git a/news/fix-help-def.rst b/news/fix-help-def.rst
new file mode 100644
index 0000000000..767a18ae7c
--- /dev/null
+++ b/news/fix-help-def.rst
@@ -0,0 +1,23 @@
+**Added:**
+
+* <news item>
+
+**Changed:**
+
+* <news item>
+
+**Deprecated:**
+
+* <news item>
+
+**Removed:**
+
+* <news item>
+
+**Fixed:**
+
+* Fixed help operator not displaying definition for callables.
+
+**Security:**
+
+* <news item>
diff --git a/xonsh/inspectors.py b/xonsh/inspectors.py
index a2151dbd37..05c3d91397 100644
--- a/xonsh/inspectors.py
+++ b/xonsh/inspectors.py
@@ -356,7 +356,7 @@ def _getdef(self, obj, oname=""):
exception is suppressed.
"""
try:
- hdef = oname + inspect.signature(*getargspec(obj))
+ hdef = oname + str(inspect.signature(obj))
return cast_unicode(hdef)
except: # pylint:disable=bare-except
return None
|
cloud-custodian__cloud-custodian-1979 | provisioning lambda to vpc appears to be broken still
Hi,
I've tested https://github.com/capitalone/cloud-custodian/pull/1919.
I see the validation now passes, but it looks like the vpc attributes are not being passed when the lambda is created.
Policy:
```
policies:
- name: vpc-test-sandbox
resource: ec2
mode:
type: config-rule
role: arn:aws:iam::{{ACCOUNT}}:role/service-role/tscloud_lambda_role
timeout: 180
security_groups: [sg-ea399290]
subnets: [subnet-3c3d8367,subnet-a09275c6,subnet-278bb56e]
description: |
Testing vpc provisioning
filters:
- "tag:c7n_testing": present
actions:
- type: mark-for-op
tag: c7n_tag_compliance
op: terminate
days: 1
```
Deploy:
```
$ custodian run -s /tmp/c7n --cache-period 0 vpc-test.yml
2018-01-18 07:48:42,873: custodian.policy:INFO Provisioning policy lambda vpc-test-sandbox
2018-01-18 07:48:43,233: custodian.lambda:INFO Publishing custodian policy lambda function custodian-vpc-test-sandbox
```
Get function vpc config:
```
$ aws lambda get-function --function-name custodian-vpc-test-sandbox | jq '.Configuration.VpcConfig'
null
Expected result:
$ aws lambda get-function --function-name custodian-vpc-test-sandbox | jq '.Configuration.VpcConfig'
{
"SubnetIds": [
"subnet-3c3d8367",
"subnet-a09275c6",
"subnet-278bb56e"
],
"SecurityGroupIds": [
"sg-ea399290"
],
"VpcId": "vpc-d3519bb5"
}
```
The role I'm using has access to vpc provisioning, as far as I can tell, using the canned AWS Policy AWSLambdaVPCAccessExecutionRole. I'm able to use the role to add vpc config manually after provisioning with c7n.
| [
{
"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCloud Custodian Lambda Provisioning Support\n\ndocs/lambda.rst\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport abc\nimport base64\nimport hashlib\nimport importlib\nimport io\nimport json\nimport logging\nimport os\nimport time\nimport tempfile\nimport zipfile\n\nfrom boto3.s3.transfer import S3Transfer, TransferConfig\nfrom botocore.exceptions import ClientError\n\nfrom concurrent.futures import ThreadPoolExecutor\n\n# Static event mapping to help simplify cwe rules creation\nfrom c7n.cwe import CloudWatchEvents\nfrom c7n.logs_support import _timestamp_from_string\nfrom c7n.utils import parse_s3, local_session\n\n\nlog = logging.getLogger('custodian.lambda')\n\n\nclass PythonPackageArchive(object):\n \"\"\"Creates a zip file for python lambda functions.\n\n :param tuple modules: the Python modules to add to the archive\n\n Amazon doesn't give us straightforward docs here, only `an example\n <http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-deployment-pkg.html#with-s3-example-deployment-pkg-python>`_,\n from which we can infer that they simply unzip the file into a directory on\n ``sys.path``. So what we do is locate all of the ``modules`` specified, and\n add all of the ``.py`` files we find for these modules to a zip file.\n\n In addition to the modules specified during instantiation, you can add\n arbitrary additional files to the archive using :py:func:`add_file` and\n :py:func:`add_contents`. For example, since we only add ``*.py`` files for\n you, you'll need to manually add files for any compiled extension modules\n that your Lambda requires.\n\n \"\"\"\n\n def __init__(self, *modules):\n self._temp_archive_file = tempfile.NamedTemporaryFile()\n self._zip_file = zipfile.ZipFile(\n self._temp_archive_file, mode='w',\n compression=zipfile.ZIP_DEFLATED)\n self._closed = False\n self.add_modules(*modules)\n\n @property\n def path(self):\n return self._temp_archive_file.name\n\n @property\n def size(self):\n if not self._closed:\n raise ValueError(\"Archive not closed, size not accurate\")\n return os.stat(self._temp_archive_file.name).st_size\n\n def add_modules(self, *modules):\n \"\"\"Add the named Python modules to the archive. For consistency's sake\n we only add ``*.py`` files, not ``*.pyc``. We also don't add other\n files, including compiled modules. You'll have to add such files\n manually using :py:meth:`add_file`.\n \"\"\"\n for module_name in modules:\n module = importlib.import_module(module_name)\n\n if hasattr(module, '__path__'):\n # https://docs.python.org/3/reference/import.html#module-path\n for directory in module.__path__:\n self.add_directory(directory)\n if not hasattr(module, '__file__'):\n\n # Likely a namespace package. Try to add *.pth files so\n # submodules are importable under Python 2.7.\n\n sitedir = list(module.__path__)[0].rsplit('/', 1)[0]\n for filename in os.listdir(sitedir):\n s = filename.startswith\n e = filename.endswith\n if s(module_name) and e('-nspkg.pth'):\n self.add_file(os.path.join(sitedir, filename))\n\n elif hasattr(module, '__file__'):\n # https://docs.python.org/3/reference/import.html#__file__\n path = module.__file__\n\n if path.endswith('.pyc'):\n _path = path[:-1]\n if not os.path.isfile(_path):\n raise ValueError(\n 'Could not find a *.py source file behind ' + path)\n path = _path\n\n if not path.endswith('.py'):\n raise ValueError(\n 'We need a *.py source file instead of ' + path)\n\n self.add_file(path)\n\n def add_directory(self, path):\n \"\"\"Add ``*.py`` files under the directory ``path`` to the archive.\n \"\"\"\n for root, dirs, files in os.walk(path):\n arc_prefix = os.path.relpath(root, os.path.dirname(path))\n for f in files:\n if not f.endswith('.py'):\n continue\n f_path = os.path.join(root, f)\n dest_path = os.path.join(arc_prefix, f)\n self.add_file(f_path, dest_path)\n\n def add_file(self, src, dest=None):\n \"\"\"Add the file at ``src`` to the archive.\n\n If ``dest`` is ``None`` then it is added under just the original\n filename. So ``add_file('foo/bar.txt')`` ends up at ``bar.txt`` in the\n archive, while ``add_file('bar.txt', 'foo/bar.txt')`` ends up at\n ``foo/bar.txt``.\n\n \"\"\"\n dest = dest or os.path.basename(src)\n with open(src, 'rb') as fp:\n contents = fp.read()\n self.add_contents(dest, contents)\n\n def add_py_file(self, src, dest=None):\n \"\"\"This is a special case of :py:meth:`add_file` that helps for adding\n a ``py`` when a ``pyc`` may be present as well. So for example, if\n ``__file__`` is ``foo.pyc`` and you do:\n\n .. code-block:: python\n\n archive.add_py_file(__file__)\n\n then this method will add ``foo.py`` instead if it exists, and raise\n ``IOError`` if it doesn't.\n\n \"\"\"\n src = src[:-1] if src.endswith('.pyc') else src\n self.add_file(src, dest)\n\n def add_contents(self, dest, contents):\n \"\"\"Add file contents to the archive under ``dest``.\n\n If ``dest`` is a path, it will be added compressed and world-readable\n (user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for\n custom behavior.\n\n \"\"\"\n assert not self._closed, \"Archive closed\"\n if not isinstance(dest, zipfile.ZipInfo):\n dest = zinfo(dest) # see for some caveats\n self._zip_file.writestr(dest, contents)\n\n def close(self):\n \"\"\"Close the zip file.\n\n Note underlying tempfile is removed when archive is garbage collected.\n \"\"\"\n self._closed = True\n self._zip_file.close()\n log.debug(\n \"Created custodian lambda archive size: %0.2fmb\",\n (os.path.getsize(self._temp_archive_file.name) / (\n 1024.0 * 1024.0)))\n return self\n\n def remove(self):\n \"\"\"Dispose of the temp file for garbage collection.\"\"\"\n if self._temp_archive_file:\n self._temp_archive_file = None\n\n def get_checksum(self):\n \"\"\"Return the b64 encoded sha256 checksum of the archive.\"\"\"\n assert self._closed, \"Archive not closed\"\n with open(self._temp_archive_file.name, 'rb') as fh:\n return base64.b64encode(checksum(fh, hashlib.sha256()))\n\n def get_bytes(self):\n \"\"\"Return the entire zip file as a byte string. \"\"\"\n assert self._closed, \"Archive not closed\"\n return open(self._temp_archive_file.name, 'rb').read()\n\n def get_reader(self):\n \"\"\"Return a read-only :py:class:`~zipfile.ZipFile`.\"\"\"\n assert self._closed, \"Archive not closed\"\n buf = io.BytesIO(self.get_bytes())\n return zipfile.ZipFile(buf, mode='r')\n\n def get_filenames(self):\n \"\"\"Return a list of filenames in the archive.\"\"\"\n return [n.filename for n in self.get_reader().filelist]\n\n\ndef checksum(fh, hasher, blocksize=65536):\n buf = fh.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fh.read(blocksize)\n return hasher.digest()\n\n\ndef custodian_archive(packages=None):\n \"\"\"Create a lambda code archive for running custodian.\n\n Lambda archive currently always includes `c7n` and `pkg_resources`. Add additional\n packages in the mode block\n\n Example policy that includes additional packages\n\n .. code-block:: yaml\n\n policy:\n name: lambda-archive-example\n resource: s3\n mode:\n packages:\n - botocore\n\n Kwargs:\n packages (set): List of additional packages to include in the lambda archive.\n \"\"\"\n modules = {'c7n', 'pkg_resources'}\n if packages:\n modules = filter(None, modules.union(packages))\n return PythonPackageArchive(*modules)\n\n\nclass LambdaManager(object):\n \"\"\" Provides CRUD operations around lambda functions\n \"\"\"\n\n def __init__(self, session_factory, s3_asset_path=None):\n self.session_factory = session_factory\n self.client = self.session_factory().client('lambda')\n self.s3_asset_path = s3_asset_path\n\n def list_functions(self, prefix=None):\n p = self.client.get_paginator('list_functions')\n for rp in p.paginate():\n for f in rp.get('Functions', []):\n if not prefix:\n yield f\n elif f['FunctionName'].startswith(prefix):\n yield f\n\n def publish(self, func, alias=None, role=None, s3_uri=None):\n result, changed = self._create_or_update(\n func, role, s3_uri, qualifier=alias)\n func.arn = result['FunctionArn']\n if alias and changed:\n func.alias = self.publish_alias(result, alias)\n elif alias:\n func.alias = \"%s:%s\" % (func.arn, alias)\n else:\n func.alias = func.arn\n\n for e in func.get_events(self.session_factory):\n if e.add(func):\n log.debug(\n \"Added event source: %s to function: %s\",\n e, func.alias)\n return result\n\n def remove(self, func, alias=None):\n for e in func.get_events(self.session_factory):\n e.remove(func)\n log.info(\"Removing lambda function %s\", func.name)\n try:\n self.client.delete_function(FunctionName=func.name)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n def metrics(self, funcs, start, end, period=5 * 60):\n\n def func_metrics(f):\n metrics = local_session(self.session_factory).client('cloudwatch')\n values = {}\n for m in ('Errors', 'Invocations', 'Durations', 'Throttles'):\n values[m] = metrics.get_metric_statistics(\n Namespace=\"AWS/Lambda\",\n Dimensions=[{\n 'Name': 'FunctionName',\n 'Value': (\n isinstance(f, dict) and f['FunctionName'] or f.name)}],\n Statistics=[\"Sum\"],\n StartTime=start,\n EndTime=end,\n Period=period,\n MetricName=m)['Datapoints']\n return values\n\n with ThreadPoolExecutor(max_workers=3) as w:\n results = list(w.map(func_metrics, funcs))\n for m, f in zip(results, funcs):\n if isinstance(f, dict):\n f['Metrics'] = m\n return results\n\n def logs(self, func, start, end):\n logs = self.session_factory().client('logs')\n group_name = \"/aws/lambda/%s\" % func.name\n log.info(\"Fetching logs from group: %s\" % group_name)\n try:\n logs.describe_log_groups(\n logGroupNamePrefix=group_name)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n return\n raise\n try:\n log_streams = logs.describe_log_streams(\n logGroupName=group_name,\n orderBy=\"LastEventTime\", limit=3, descending=True)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n return\n raise\n start = _timestamp_from_string(start)\n end = _timestamp_from_string(end)\n for s in reversed(log_streams['logStreams']):\n result = logs.get_log_events(\n logGroupName=group_name,\n logStreamName=s['logStreamName'],\n startTime=start,\n endTime=end,\n )\n for e in result['events']:\n yield e\n\n @staticmethod\n def delta_function(old_config, new_config):\n for k in new_config:\n if k not in old_config or new_config[k] != old_config[k]:\n return True\n\n @staticmethod\n def diff_tags(old_tags, new_tags):\n add = {}\n remove = set()\n for k,v in new_tags.items():\n if k not in old_tags or old_tags[k] != v:\n add[k] = v\n for k in old_tags:\n if k not in new_tags:\n remove.add(k)\n return add, list(remove)\n\n def _create_or_update(self, func, role=None, s3_uri=None, qualifier=None):\n role = func.role or role\n assert role, \"Lambda function role must be specified\"\n archive = func.get_archive()\n existing = self.get(func.name, qualifier)\n\n if s3_uri:\n # TODO: support versioned buckets\n bucket, key = self._upload_func(s3_uri, func, archive)\n code_ref = {'S3Bucket': bucket, 'S3Key': key}\n else:\n code_ref = {'ZipFile': archive.get_bytes()}\n\n changed = False\n if existing:\n old_config = existing['Configuration']\n if archive.get_checksum() != old_config['CodeSha256']:\n log.debug(\"Updating function %s code\", func.name)\n params = dict(FunctionName=func.name, Publish=True)\n params.update(code_ref)\n result = self.client.update_function_code(**params)\n changed = True\n # TODO/Consider also set publish above to false, and publish\n # after configuration change?\n\n new_config = func.get_config()\n new_config['Role'] = role\n new_tags = new_config.pop('Tags', {})\n\n if self.delta_function(old_config, new_config):\n log.debug(\"Updating function: %s config\" % func.name)\n result = self.client.update_function_configuration(**new_config)\n changed = True\n\n # tag dance\n base_arn = old_config['FunctionArn']\n if base_arn.count(':') > 6: # trim version/alias\n base_arn = base_arn.rsplit(':', 1)[0]\n\n old_tags = self.client.list_tags(Resource=base_arn)['Tags']\n tags_to_add, tags_to_remove = self.diff_tags(old_tags, new_tags)\n\n if tags_to_add:\n log.debug(\"Adding/updating tags: %s config\" % func.name)\n self.client.tag_resource(\n Resource=base_arn, Tags=tags_to_add)\n if tags_to_remove:\n log.debug(\"Removing tags: %s config\" % func.name)\n self.client.untag_resource(\n Resource=base_arn, TagKeys=tags_to_remove)\n\n if not changed:\n result = old_config\n else:\n log.info('Publishing custodian policy lambda function %s', func.name)\n params = func.get_config()\n params.update({'Publish': True, 'Code': code_ref, 'Role': role})\n result = self.client.create_function(**params)\n changed = True\n\n return result, changed\n\n def _upload_func(self, s3_uri, func, archive):\n _, bucket, key_prefix = parse_s3(s3_uri)\n key = \"%s/%s\" % (key_prefix, func.name)\n transfer = S3Transfer(\n self.session_factory().client('s3'),\n config=TransferConfig(\n multipart_threshold=1024 * 1024 * 4))\n transfer.upload_file(\n archive.path,\n bucket=bucket,\n key=key,\n extra_args={\n 'ServerSideEncryption': 'AES256'})\n return bucket, key\n\n def publish_alias(self, func_data, alias):\n \"\"\"Create or update an alias for the given function.\n \"\"\"\n if not alias:\n return func_data['FunctionArn']\n func_name = func_data['FunctionName']\n func_version = func_data['Version']\n\n exists = resource_exists(\n self.client.get_alias, FunctionName=func_name, Name=alias)\n\n if not exists:\n log.debug(\"Publishing custodian lambda alias %s\", alias)\n alias_result = self.client.create_alias(\n FunctionName=func_name,\n Name=alias,\n FunctionVersion=func_version)\n else:\n if (exists['FunctionVersion'] == func_version and\n exists['Name'] == alias):\n return exists['AliasArn']\n log.debug('Updating custodian lambda alias %s', alias)\n alias_result = self.client.update_alias(\n FunctionName=func_name,\n Name=alias,\n FunctionVersion=func_version)\n return alias_result['AliasArn']\n\n def get(self, func_name, qualifier=None):\n params = {'FunctionName': func_name}\n if qualifier:\n params['Qualifier'] = qualifier\n return resource_exists(\n self.client.get_function, **params)\n\n\ndef resource_exists(op, NotFound=\"ResourceNotFoundException\", *args, **kw):\n try:\n return op(*args, **kw)\n except ClientError as e:\n if e.response['Error']['Code'] == NotFound:\n return False\n raise\n\n\nclass AbstractLambdaFunction:\n \"\"\"Abstract base class for lambda functions.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n alias = None\n\n @abc.abstractproperty\n def name(self):\n \"\"\"Name for the lambda function\"\"\"\n\n @abc.abstractproperty\n def runtime(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def description(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def handler(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def memory_size(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def timeout(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def role(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def subnets(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def security_groups(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def dead_letter_config(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def environment(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def kms_key_arn(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def tracing_config(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def tags(self):\n \"\"\" \"\"\"\n\n @abc.abstractmethod\n def get_events(self, session_factory):\n \"\"\"event sources that should be bound to this lambda.\"\"\"\n\n @abc.abstractmethod\n def get_archive(self):\n \"\"\"Return the lambda distribution archive object.\"\"\"\n\n def get_config(self):\n conf = {\n 'FunctionName': self.name,\n 'MemorySize': self.memory_size,\n 'Role': self.role,\n 'Description': self.description,\n 'Runtime': self.runtime,\n 'Handler': self.handler,\n 'Timeout': self.timeout,\n 'DeadLetterConfig': self.dead_letter_config,\n 'Environment': self.environment,\n 'KMSKeyArn': self.kms_key_arn,\n 'TracingConfig': self.tracing_config,\n 'Tags': self.tags}\n if self.subnets and self.security_groups:\n conf['VpcConfig'] = {\n 'SubnetIds': self.subnets,\n 'SecurityGroupIds': self.security_groups}\n return conf\n\n\nclass LambdaFunction(AbstractLambdaFunction):\n\n def __init__(self, func_data, archive):\n self.func_data = func_data\n required = set((\n 'name', 'handler', 'memory_size',\n 'timeout', 'role', 'runtime',\n 'description'))\n missing = required.difference(func_data)\n if missing:\n raise ValueError(\"Missing required keys %s\" % \" \".join(missing))\n self.archive = archive\n\n @property\n def name(self):\n return self.func_data['name']\n\n @property\n def description(self):\n return self.func_data['description']\n\n @property\n def handler(self):\n return self.func_data['handler']\n\n @property\n def memory_size(self):\n return self.func_data['memory_size']\n\n @property\n def timeout(self):\n return self.func_data['timeout']\n\n @property\n def runtime(self):\n return self.func_data['runtime']\n\n @property\n def role(self):\n return self.func_data['role']\n\n @property\n def security_groups(self):\n return self.func_data.get('security_groups', None)\n\n @property\n def subnets(self):\n return self.func_data.get('subnets', None)\n\n @property\n def dead_letter_config(self):\n return self.func_data.get('dead_letter_config', {})\n\n @property\n def environment(self):\n return self.func_data.get('environment', {})\n\n @property\n def kms_key_arn(self):\n return self.func_data.get('kms_key_arn', '')\n\n @property\n def tracing_config(self):\n return self.func_data.get('tracing_config', {})\n\n @property\n def tags(self):\n return self.func_data.get('tags', {})\n\n def get_events(self, session_factory):\n return self.func_data.get('events', ())\n\n def get_archive(self):\n return self.archive\n\n\nPolicyHandlerTemplate = \"\"\"\\\nfrom c7n import handler\n\ndef run(event, context):\n return handler.dispatch_event(event, context)\n\n\"\"\"\n\n\nclass PolicyLambda(AbstractLambdaFunction):\n \"\"\"Wraps a custodian policy to turn it into a lambda function.\n \"\"\"\n handler = \"custodian_policy.run\"\n\n def __init__(self, policy):\n self.policy = policy\n self.archive = custodian_archive(packages=self.packages)\n\n @property\n def name(self):\n return \"custodian-%s\" % self.policy.name\n\n @property\n def description(self):\n return self.policy.data.get(\n 'description', 'cloud-custodian lambda policy')\n\n @property\n def role(self):\n return self.policy.data['mode'].get('role', '')\n\n @property\n def runtime(self):\n return self.policy.data['mode'].get('runtime', 'python2.7')\n\n @property\n def memory_size(self):\n return self.policy.data['mode'].get('memory', 512)\n\n @property\n def timeout(self):\n return self.policy.data['mode'].get('timeout', 60)\n\n @property\n def security_groups(self):\n return None\n\n @property\n def subnets(self):\n return None\n\n @property\n def dead_letter_config(self):\n return self.policy.data['mode'].get('dead_letter_config', {})\n\n @property\n def environment(self):\n return self.policy.data['mode'].get('environment', {})\n\n @property\n def kms_key_arn(self):\n return self.policy.data['mode'].get('kms_key_arn', '')\n\n @property\n def tracing_config(self):\n return self.policy.data['mode'].get('tracing_config', {})\n\n @property\n def tags(self):\n return self.policy.data['mode'].get('tags', {})\n\n @property\n def packages(self):\n return self.policy.data['mode'].get('packages')\n\n def get_events(self, session_factory):\n events = []\n if self.policy.data['mode']['type'] == 'config-rule':\n events.append(\n ConfigRule(self.policy.data['mode'], session_factory))\n else:\n events.append(\n CloudWatchEventSource(\n self.policy.data['mode'], session_factory))\n return events\n\n def get_archive(self):\n self.archive.add_contents(\n 'config.json', json.dumps(\n {'policies': [self.policy.data]}, indent=2))\n self.archive.add_contents('custodian_policy.py', PolicyHandlerTemplate)\n self.archive.close()\n return self.archive\n\n\ndef zinfo(fname):\n \"\"\"Amazon lambda exec environment setup can break itself\n if zip files aren't constructed a particular way.\n\n ie. It respects file perm attributes from the zip including\n those that prevent lambda from working. Namely lambda\n extracts code as one user, and executes code as a different\n user. Without permissions for the executing user to read\n the file the lambda function is broken.\n\n Python's default zipfile.writestr does a 0600 perm which\n we modify here as a workaround.\n \"\"\"\n info = zipfile.ZipInfo(fname)\n # Grant other users permissions to read\n # http://unix.stackexchange.com/questions/14705/\n info.external_attr = 0o644 << 16\n return info\n\n\nclass CloudWatchEventSource(object):\n \"\"\"Subscribe a lambda to cloud watch events.\n\n Cloud watch events supports a number of different event\n sources, from periodic timers with cron syntax, to\n real time instance state notifications, cloud trail\n events, and realtime asg membership changes.\n\n Event Pattern for Instance State\n\n .. code-block:: json\n\n {\n \"source\": [\"aws.ec2\"],\n \"detail-type\": [\"EC2 Instance State-change Notification\"],\n \"detail\": { \"state\": [\"pending\"]}\n }\n\n Event Pattern for Cloud Trail API\n\n .. code-block:: json\n\n {\n \"detail-type\": [\"AWS API Call via CloudTrail\"],\n \"detail\": {\n \"eventSource\": [\"s3.amazonaws.com\"],\n \"eventName\": [\"CreateBucket\", \"DeleteBucket\"]\n }\n }\n \"\"\"\n ASG_EVENT_MAPPING = {\n 'launch-success': 'EC2 Instance Launch Successful',\n 'launch-failure': 'EC2 Instance Launch Unsuccessful',\n 'terminate-success': 'EC2 Instance Terminate Successful',\n 'terminate-failure': 'EC2 Instance Terminate Unsuccessful'}\n\n def __init__(self, data, session_factory, prefix=\"custodian-\"):\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('events')\n self.data = data\n self.prefix = prefix\n\n def _make_notification_id(self, function_name):\n if not function_name.startswith(self.prefix):\n return \"%s%s\" % (self.prefix, function_name)\n return function_name\n\n def get(self, rule_name):\n return resource_exists(\n self.client.describe_rule,\n Name=self._make_notification_id(rule_name))\n\n @staticmethod\n def delta(src, tgt):\n \"\"\"Given two cwe rules determine if the configuration is the same.\n\n Name is already implied.\n \"\"\"\n for k in ['State', 'EventPattern', 'ScheduleExpression']:\n if src.get(k) != tgt.get(k):\n return True\n return False\n\n def __repr__(self):\n return \"<CWEvent Type:%s Events:%s>\" % (\n self.data.get('type'),\n ', '.join(map(str, self.data.get('events', []))))\n\n def resolve_cloudtrail_payload(self, payload):\n sources = self.data.get('sources', [])\n events = []\n for e in self.data.get('events'):\n if not isinstance(e, dict):\n events.append(e)\n event_info = CloudWatchEvents.get(e)\n if event_info is None:\n continue\n else:\n event_info = e\n events.append(e['event'])\n sources.append(event_info['source'])\n\n payload['detail'] = {\n 'eventSource': list(set(sources)),\n 'eventName': events}\n\n def render_event_pattern(self):\n event_type = self.data.get('type')\n payload = {}\n if event_type == 'cloudtrail':\n payload['detail-type'] = ['AWS API Call via CloudTrail']\n self.resolve_cloudtrail_payload(payload)\n\n if event_type == 'cloudtrail':\n if 'signin.amazonaws.com' in payload['detail']['eventSource']:\n payload['detail-type'] = ['AWS Console Sign In via CloudTrail']\n elif event_type == 'guard-duty':\n payload['source'] = ['aws.guardduty']\n payload['detail-type'] = ['GuardDuty Finding']\n if 'resource-filter' in self.data:\n payload.update({\n 'detail': {'resource': {'resourceType': [self.data['resource-filter']]}}})\n elif event_type == \"ec2-instance-state\":\n payload['source'] = ['aws.ec2']\n payload['detail-type'] = [\n \"EC2 Instance State-change Notification\"]\n # Technically could let empty be all events, but likely misconfig\n payload['detail'] = {\"state\": self.data.get('events', [])}\n elif event_type == \"asg-instance-state\":\n payload['source'] = ['aws.autoscaling']\n events = []\n for e in self.data.get('events', []):\n events.append(self.ASG_EVENT_MAPPING.get(e, e))\n payload['detail-type'] = events\n elif event_type == 'periodic':\n pass\n else:\n raise ValueError(\n \"Unknown lambda event source type: %s\" % event_type)\n if not payload:\n return None\n return json.dumps(payload)\n\n def add(self, func):\n params = dict(\n Name=func.name, Description=func.description, State='ENABLED')\n\n pattern = self.render_event_pattern()\n if pattern:\n params['EventPattern'] = pattern\n schedule = self.data.get('schedule')\n if schedule:\n params['ScheduleExpression'] = schedule\n\n rule = self.get(func.name)\n\n if rule and self.delta(rule, params):\n log.debug(\"Updating cwe rule for %s\" % self)\n response = self.client.put_rule(**params)\n elif not rule:\n log.debug(\"Creating cwe rule for %s\" % (self))\n response = self.client.put_rule(**params)\n else:\n response = {'RuleArn': rule['Arn']}\n\n try:\n self.session.client('lambda').add_permission(\n FunctionName=func.name,\n StatementId=func.name,\n SourceArn=response['RuleArn'],\n Action='lambda:InvokeFunction',\n Principal='events.amazonaws.com')\n log.debug('Added lambda invoke cwe rule permission')\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n # Add Targets\n found = False\n response = self.client.list_targets_by_rule(Rule=func.name)\n # CWE seems to be quite picky about function arns (no aliases/versions)\n func_arn = func.arn\n\n if func_arn.count(':') > 6:\n func_arn, version = func_arn.rsplit(':', 1)\n for t in response['Targets']:\n if func_arn == t['Arn']:\n found = True\n\n if found:\n return\n\n log.debug('Creating cwe rule target for %s on func:%s' % (\n self, func_arn))\n\n self.client.put_targets(\n Rule=func.name, Targets=[{\"Id\": func.name, \"Arn\": func_arn}])\n\n return True\n\n def update(self, func):\n self.add(func)\n\n def pause(self, func):\n try:\n self.client.disable_rule(Name=func.name)\n except Exception:\n pass\n\n def resume(self, func):\n try:\n self.client.enable_rule(Name=func.name)\n except Exception:\n pass\n\n def remove(self, func):\n if self.get(func.name):\n log.info(\"Removing cwe targets and rule %s\", func.name)\n try:\n targets = self.client.list_targets_by_rule(\n Rule=func.name)['Targets']\n self.client.remove_targets(\n Rule=func.name,\n Ids=[t['Id'] for t in targets])\n except ClientError as e:\n log.warning(\n \"Could not remove targets for rule %s error: %s\",\n func.name, e)\n self.client.delete_rule(Name=func.name)\n\n\nclass BucketLambdaNotification(object):\n \"\"\" Subscribe a lambda to bucket notifications directly. \"\"\"\n\n def __init__(self, data, session_factory, bucket):\n self.data = data\n self.session_factory = session_factory\n self.session = session_factory()\n self.bucket = bucket\n\n def delta(self, src, tgt):\n for k in ['Id', 'LambdaFunctionArn', 'Events', 'Filters']:\n if src.get(k) != tgt.get(k):\n return True\n return False\n\n def _get_notifies(self, s3, func):\n notifies = s3.get_bucket_notification_configuration(\n Bucket=self.bucket['Name'])\n found = False\n for f in notifies.get('LambdaFunctionConfigurations', []):\n if f['Id'] != func.name:\n continue\n found = f\n return notifies, found\n\n def add(self, func):\n s3 = self.session.client('s3')\n notifies, found = self._get_notifies(s3, func)\n notifies.pop('ResponseMetadata', None)\n func_arn = func.arn\n if func_arn.rsplit(':', 1)[-1].isdigit():\n func_arn = func_arn.rsplit(':', 1)[0]\n n_params = {\n 'Id': func.name,\n 'LambdaFunctionArn': func_arn,\n 'Events': self.data.get('events', ['s3:ObjectCreated:*'])}\n if self.data.get('filters'):\n n_params['Filters'] = {\n 'Key': {'FilterRules': self.filters}}\n\n if found:\n if self.delta(found, n_params):\n notifies['LambdaFunctionConfigurations'].remove(found)\n else:\n log.info(\"Bucket lambda notification present\")\n return\n\n lambda_client = self.session.client('lambda')\n params = dict(\n FunctionName=func.name,\n StatementId=self.bucket['Name'],\n Action='lambda:InvokeFunction',\n Principal='s3.amazonaws.com')\n if self.data.get('account_s3'):\n params['SourceAccount'] = self.data['account_s3']\n params['SourceArn'] = 'arn:aws:s3:::*'\n else:\n params['SourceArn'] = 'arn:aws:s3:::%' % self.bucket['Name']\n try:\n lambda_client.add_permission(**params)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n notifies.setdefault('LambdaFunctionConfigurations', []).append(n_params)\n s3.put_bucket_notification_configuration(\n Bucket=self.bucket['Name'], NotificationConfiguration=notifies)\n\n return True\n\n def remove(self, func):\n s3 = self.session.client('s3')\n notifies, found = self._get_notifies(s3, func)\n if not found:\n return\n\n lambda_client = self.session.client('lambda')\n try:\n response = lambda_client.remove_permission(\n FunctionName=func['FunctionName'],\n StatementId=self.bucket['Name'])\n log.debug(\"Removed lambda permission result: %s\" % response)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n notifies['LambdaFunctionConfigurations'].remove(found)\n s3.put_bucket_notification_configuration(\n Bucket=self.bucket['Name'],\n NotificationConfiguration=notifies)\n\n\nclass CloudWatchLogSubscription(object):\n \"\"\" Subscribe a lambda to a log group[s]\n \"\"\"\n\n iam_delay = 1.5\n\n def __init__(self, session_factory, log_groups, filter_pattern):\n self.log_groups = log_groups\n self.filter_pattern = filter_pattern\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('logs')\n\n def add(self, func):\n lambda_client = self.session.client('lambda')\n for group in self.log_groups:\n log.info(\n \"Creating subscription filter for %s\" % group['logGroupName'])\n region = group['arn'].split(':', 4)[3]\n try:\n lambda_client.add_permission(\n FunctionName=func.name,\n StatementId=group['logGroupName'][1:].replace('/', '-'),\n SourceArn=group['arn'],\n Action='lambda:InvokeFunction',\n Principal='logs.%s.amazonaws.com' % region)\n log.debug(\"Added lambda ipo nvoke log group permission\")\n # iam eventual consistency and propagation\n time.sleep(self.iam_delay)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n # Consistent put semantics / ie no op if extant\n self.client.put_subscription_filter(\n logGroupName=group['logGroupName'],\n filterName=func.name,\n filterPattern=self.filter_pattern,\n destinationArn=func.alias or func.arn)\n\n def remove(self, func):\n lambda_client = self.session.client('lambda')\n for group in self.log_groups:\n try:\n response = lambda_client.remove_permission(\n FunctionName=func.name,\n StatementId=group['logGroupName'][1:].replace('/', '-'))\n log.debug(\"Removed lambda permission result: %s\" % response)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n try:\n response = self.client.delete_subscription_filter(\n logGroupName=group['logGroupName'], filterName=func.name)\n log.debug(\"Removed subscription filter from: %s\",\n group['logGroupName'])\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n\nclass SNSSubscription(object):\n \"\"\" Subscribe a lambda to one or more SNS topics.\n \"\"\"\n\n iam_delay = 1.5\n\n def __init__(self, session_factory, topic_arns):\n self.topic_arns = topic_arns\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('sns')\n\n @staticmethod\n def _parse_arn(arn):\n parts = arn.split(':')\n region, topic_name = parts[3], parts[5]\n statement_id = 'sns-topic-' + topic_name\n return region, topic_name, statement_id\n\n def add(self, func):\n lambda_client = self.session.client('lambda')\n for arn in self.topic_arns:\n region, topic_name, statement_id = self._parse_arn(arn)\n\n log.info(\"Subscribing %s to %s\" % (func.name, topic_name))\n\n # Add permission to lambda for sns invocation.\n try:\n lambda_client.add_permission(\n FunctionName=func.name,\n StatementId='sns-topic-' + topic_name,\n SourceArn=arn,\n Action='lambda:InvokeFunction',\n Principal='sns.amazonaws.com')\n log.debug(\"Added permission for sns to invoke lambda\")\n # iam eventual consistency and propagation\n time.sleep(self.iam_delay)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n # Subscribe the lambda to the topic.\n topic = self.session.resource('sns').Topic(arn)\n topic.subscribe(Protocol='lambda', Endpoint=func.arn) # idempotent\n\n def remove(self, func):\n lambda_client = self.session.client('lambda')\n for topic_arn in self.topic_arns:\n region, topic_name, statement_id = self._parse_arn(topic_arn)\n\n try:\n response = lambda_client.remove_permission(\n FunctionName=func.name,\n StatementId=statement_id)\n log.debug(\"Removed lambda permission result: %s\" % response)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n paginator = self.client.get_paginator('list_subscriptions_by_topic')\n\n class Done(Exception):\n pass\n try:\n for page in paginator.paginate(TopicArn=topic_arn):\n for subscription in page['Subscriptions']:\n if subscription['Endpoint'] != func.arn:\n continue\n try:\n response = self.client.unsubscribe(\n SubscriptionArn=subscription['SubscriptionArn'])\n log.debug(\"Unsubscribed %s from %s\" %\n (func.name, topic_name))\n except ClientError as e:\n code = e.response['Error']['Code']\n if code != 'ResourceNotFoundException':\n raise\n raise Done # break out of both for loops\n except Done:\n pass\n\n\nclass BucketSNSNotification(SNSSubscription):\n \"\"\" Subscribe a lambda to bucket notifications via SNS. \"\"\"\n\n def __init__(self, session_factory, bucket, topic=None):\n # NB: We are overwriting __init__ vs. extending.\n self.session_factory = session_factory\n self.session = session_factory()\n self.topic_arns = self.get_topic(bucket) if topic is None else [topic]\n self.client = self.session.client('sns')\n\n def get_topic(self, bucket):\n session = local_session(self.session_factory)\n sns = session.client('sns')\n s3 = session.client('s3')\n\n notifies = bucket['Notification']\n if 'TopicConfigurations' not in notifies:\n notifies['TopicConfigurations'] = []\n all_topics = notifies['TopicConfigurations']\n topic_arns = [t['TopicArn'] for t in all_topics\n if 's3:ObjectCreated:*' in t['Events']]\n if not topic_arns:\n # No suitable existing topic. Create one.\n topic_arn = sns.create_topic(Name=bucket['Name'])['TopicArn']\n policy = {\n 'Statement': [{\n 'Action': 'SNS:Publish',\n 'Effect': 'Allow',\n 'Resource': topic_arn,\n 'Principal': {'Service': 's3.amazonaws.com'}}]}\n sns.set_topic_attributes(\n TopicArn=topic_arn,\n AttributeName='Policy',\n AttributeValue=json.dumps(policy))\n notifies['TopicConfigurations'].append({\n 'TopicArn': topic_arn,\n 'Events': ['s3:ObjectCreated:*']})\n s3.put_bucket_notification_configuration(Bucket=bucket['Name'],\n NotificationConfiguration=notifies)\n topic_arns = [topic_arn]\n return topic_arns\n\n\nclass ConfigRule(object):\n \"\"\"Use a lambda as a custom config rule.\n\n \"\"\"\n\n def __init__(self, data, session_factory):\n self.data = data\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('config')\n\n def __repr__(self):\n return \"<ConfigRule>\"\n\n def get_rule_params(self, func):\n # config does not support versions/aliases on lambda funcs\n func_arn = func.arn\n if func_arn.count(':') > 6:\n func_arn, version = func_arn.rsplit(':', 1)\n\n params = dict(\n ConfigRuleName=func.name,\n Description=func.description,\n Source={\n 'Owner': 'CUSTOM_LAMBDA',\n 'SourceIdentifier': func_arn,\n 'SourceDetails': [{\n 'EventSource': 'aws.config',\n 'MessageType': 'ConfigurationItemChangeNotification'}]\n }\n )\n\n if isinstance(func, PolicyLambda):\n manager = func.policy.get_resource_manager()\n if hasattr(manager.get_model(), 'config_type'):\n config_type = manager.get_model().config_type\n else:\n raise Exception(\"You may have attempted to deploy a config \"\n \"based lambda function with an unsupported config type. \"\n \"The most recent AWS config types are here: http://docs.aws\"\n \".amazon.com/config/latest/developerguide/resource\"\n \"-config-reference.html.\")\n params['Scope'] = {\n 'ComplianceResourceTypes': [config_type]}\n else:\n params['Scope']['ComplianceResourceTypes'] = self.data.get(\n 'resource-types', ())\n return params\n\n def get(self, rule_name):\n rules = resource_exists(\n self.client.describe_config_rules,\n ConfigRuleNames=[rule_name],\n NotFound=\"NoSuchConfigRuleException\")\n if not rules:\n return rules\n return rules['ConfigRules'][0]\n\n @staticmethod\n def delta(rule, params):\n # doesn't seem like we have anything mutable at the moment,\n # since we restrict params, maybe reusing the same policy name\n # with a different resource type.\n if rule['Scope'] != params['Scope']:\n return True\n if rule['Source'] != params['Source']:\n return True\n if rule.get('Description', '') != rule.get('Description', ''):\n return True\n return False\n\n def add(self, func):\n rule = self.get(func.name)\n params = self.get_rule_params(func)\n\n if rule and self.delta(rule, params):\n log.debug(\"Updating config rule for %s\" % self)\n rule.update(params)\n return self.client.put_config_rule(ConfigRule=rule)\n elif rule:\n log.debug(\"Config rule up to date\")\n return\n try:\n self.session.client('lambda').add_permission(\n FunctionName=func.name,\n StatementId=func.name,\n SourceAccount=func.arn.split(':')[4],\n Action='lambda:InvokeFunction',\n Principal='config.amazonaws.com')\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n log.debug(\"Adding config rule for %s\" % func.name)\n return self.client.put_config_rule(ConfigRule=params)\n\n def remove(self, func):\n rule = self.get(func.name)\n if not rule:\n return\n log.info(\"Removing config rule for %s\", func.name)\n try:\n self.client.delete_config_rule(\n ConfigRuleName=func.name)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n",
"path": "c7n/mu.py"
}
] | [
{
"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCloud Custodian Lambda Provisioning Support\n\ndocs/lambda.rst\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport abc\nimport base64\nimport hashlib\nimport importlib\nimport io\nimport json\nimport logging\nimport os\nimport time\nimport tempfile\nimport zipfile\n\nfrom boto3.s3.transfer import S3Transfer, TransferConfig\nfrom botocore.exceptions import ClientError\n\nfrom concurrent.futures import ThreadPoolExecutor\n\n# Static event mapping to help simplify cwe rules creation\nfrom c7n.cwe import CloudWatchEvents\nfrom c7n.logs_support import _timestamp_from_string\nfrom c7n.utils import parse_s3, local_session\n\n\nlog = logging.getLogger('custodian.lambda')\n\n\nclass PythonPackageArchive(object):\n \"\"\"Creates a zip file for python lambda functions.\n\n :param tuple modules: the Python modules to add to the archive\n\n Amazon doesn't give us straightforward docs here, only `an example\n <http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-deployment-pkg.html#with-s3-example-deployment-pkg-python>`_,\n from which we can infer that they simply unzip the file into a directory on\n ``sys.path``. So what we do is locate all of the ``modules`` specified, and\n add all of the ``.py`` files we find for these modules to a zip file.\n\n In addition to the modules specified during instantiation, you can add\n arbitrary additional files to the archive using :py:func:`add_file` and\n :py:func:`add_contents`. For example, since we only add ``*.py`` files for\n you, you'll need to manually add files for any compiled extension modules\n that your Lambda requires.\n\n \"\"\"\n\n def __init__(self, *modules):\n self._temp_archive_file = tempfile.NamedTemporaryFile()\n self._zip_file = zipfile.ZipFile(\n self._temp_archive_file, mode='w',\n compression=zipfile.ZIP_DEFLATED)\n self._closed = False\n self.add_modules(*modules)\n\n @property\n def path(self):\n return self._temp_archive_file.name\n\n @property\n def size(self):\n if not self._closed:\n raise ValueError(\"Archive not closed, size not accurate\")\n return os.stat(self._temp_archive_file.name).st_size\n\n def add_modules(self, *modules):\n \"\"\"Add the named Python modules to the archive. For consistency's sake\n we only add ``*.py`` files, not ``*.pyc``. We also don't add other\n files, including compiled modules. You'll have to add such files\n manually using :py:meth:`add_file`.\n \"\"\"\n for module_name in modules:\n module = importlib.import_module(module_name)\n\n if hasattr(module, '__path__'):\n # https://docs.python.org/3/reference/import.html#module-path\n for directory in module.__path__:\n self.add_directory(directory)\n if not hasattr(module, '__file__'):\n\n # Likely a namespace package. Try to add *.pth files so\n # submodules are importable under Python 2.7.\n\n sitedir = list(module.__path__)[0].rsplit('/', 1)[0]\n for filename in os.listdir(sitedir):\n s = filename.startswith\n e = filename.endswith\n if s(module_name) and e('-nspkg.pth'):\n self.add_file(os.path.join(sitedir, filename))\n\n elif hasattr(module, '__file__'):\n # https://docs.python.org/3/reference/import.html#__file__\n path = module.__file__\n\n if path.endswith('.pyc'):\n _path = path[:-1]\n if not os.path.isfile(_path):\n raise ValueError(\n 'Could not find a *.py source file behind ' + path)\n path = _path\n\n if not path.endswith('.py'):\n raise ValueError(\n 'We need a *.py source file instead of ' + path)\n\n self.add_file(path)\n\n def add_directory(self, path):\n \"\"\"Add ``*.py`` files under the directory ``path`` to the archive.\n \"\"\"\n for root, dirs, files in os.walk(path):\n arc_prefix = os.path.relpath(root, os.path.dirname(path))\n for f in files:\n if not f.endswith('.py'):\n continue\n f_path = os.path.join(root, f)\n dest_path = os.path.join(arc_prefix, f)\n self.add_file(f_path, dest_path)\n\n def add_file(self, src, dest=None):\n \"\"\"Add the file at ``src`` to the archive.\n\n If ``dest`` is ``None`` then it is added under just the original\n filename. So ``add_file('foo/bar.txt')`` ends up at ``bar.txt`` in the\n archive, while ``add_file('bar.txt', 'foo/bar.txt')`` ends up at\n ``foo/bar.txt``.\n\n \"\"\"\n dest = dest or os.path.basename(src)\n with open(src, 'rb') as fp:\n contents = fp.read()\n self.add_contents(dest, contents)\n\n def add_py_file(self, src, dest=None):\n \"\"\"This is a special case of :py:meth:`add_file` that helps for adding\n a ``py`` when a ``pyc`` may be present as well. So for example, if\n ``__file__`` is ``foo.pyc`` and you do:\n\n .. code-block:: python\n\n archive.add_py_file(__file__)\n\n then this method will add ``foo.py`` instead if it exists, and raise\n ``IOError`` if it doesn't.\n\n \"\"\"\n src = src[:-1] if src.endswith('.pyc') else src\n self.add_file(src, dest)\n\n def add_contents(self, dest, contents):\n \"\"\"Add file contents to the archive under ``dest``.\n\n If ``dest`` is a path, it will be added compressed and world-readable\n (user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for\n custom behavior.\n\n \"\"\"\n assert not self._closed, \"Archive closed\"\n if not isinstance(dest, zipfile.ZipInfo):\n dest = zinfo(dest) # see for some caveats\n self._zip_file.writestr(dest, contents)\n\n def close(self):\n \"\"\"Close the zip file.\n\n Note underlying tempfile is removed when archive is garbage collected.\n \"\"\"\n self._closed = True\n self._zip_file.close()\n log.debug(\n \"Created custodian lambda archive size: %0.2fmb\",\n (os.path.getsize(self._temp_archive_file.name) / (\n 1024.0 * 1024.0)))\n return self\n\n def remove(self):\n \"\"\"Dispose of the temp file for garbage collection.\"\"\"\n if self._temp_archive_file:\n self._temp_archive_file = None\n\n def get_checksum(self):\n \"\"\"Return the b64 encoded sha256 checksum of the archive.\"\"\"\n assert self._closed, \"Archive not closed\"\n with open(self._temp_archive_file.name, 'rb') as fh:\n return base64.b64encode(checksum(fh, hashlib.sha256()))\n\n def get_bytes(self):\n \"\"\"Return the entire zip file as a byte string. \"\"\"\n assert self._closed, \"Archive not closed\"\n return open(self._temp_archive_file.name, 'rb').read()\n\n def get_reader(self):\n \"\"\"Return a read-only :py:class:`~zipfile.ZipFile`.\"\"\"\n assert self._closed, \"Archive not closed\"\n buf = io.BytesIO(self.get_bytes())\n return zipfile.ZipFile(buf, mode='r')\n\n def get_filenames(self):\n \"\"\"Return a list of filenames in the archive.\"\"\"\n return [n.filename for n in self.get_reader().filelist]\n\n\ndef checksum(fh, hasher, blocksize=65536):\n buf = fh.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fh.read(blocksize)\n return hasher.digest()\n\n\ndef custodian_archive(packages=None):\n \"\"\"Create a lambda code archive for running custodian.\n\n Lambda archive currently always includes `c7n` and `pkg_resources`. Add additional\n packages in the mode block\n\n Example policy that includes additional packages\n\n .. code-block:: yaml\n\n policy:\n name: lambda-archive-example\n resource: s3\n mode:\n packages:\n - botocore\n\n Kwargs:\n packages (set): List of additional packages to include in the lambda archive.\n \"\"\"\n modules = {'c7n', 'pkg_resources'}\n if packages:\n modules = filter(None, modules.union(packages))\n return PythonPackageArchive(*modules)\n\n\nclass LambdaManager(object):\n \"\"\" Provides CRUD operations around lambda functions\n \"\"\"\n\n def __init__(self, session_factory, s3_asset_path=None):\n self.session_factory = session_factory\n self.client = self.session_factory().client('lambda')\n self.s3_asset_path = s3_asset_path\n\n def list_functions(self, prefix=None):\n p = self.client.get_paginator('list_functions')\n for rp in p.paginate():\n for f in rp.get('Functions', []):\n if not prefix:\n yield f\n elif f['FunctionName'].startswith(prefix):\n yield f\n\n def publish(self, func, alias=None, role=None, s3_uri=None):\n result, changed = self._create_or_update(\n func, role, s3_uri, qualifier=alias)\n func.arn = result['FunctionArn']\n if alias and changed:\n func.alias = self.publish_alias(result, alias)\n elif alias:\n func.alias = \"%s:%s\" % (func.arn, alias)\n else:\n func.alias = func.arn\n\n for e in func.get_events(self.session_factory):\n if e.add(func):\n log.debug(\n \"Added event source: %s to function: %s\",\n e, func.alias)\n return result\n\n def remove(self, func, alias=None):\n for e in func.get_events(self.session_factory):\n e.remove(func)\n log.info(\"Removing lambda function %s\", func.name)\n try:\n self.client.delete_function(FunctionName=func.name)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n def metrics(self, funcs, start, end, period=5 * 60):\n\n def func_metrics(f):\n metrics = local_session(self.session_factory).client('cloudwatch')\n values = {}\n for m in ('Errors', 'Invocations', 'Durations', 'Throttles'):\n values[m] = metrics.get_metric_statistics(\n Namespace=\"AWS/Lambda\",\n Dimensions=[{\n 'Name': 'FunctionName',\n 'Value': (\n isinstance(f, dict) and f['FunctionName'] or f.name)}],\n Statistics=[\"Sum\"],\n StartTime=start,\n EndTime=end,\n Period=period,\n MetricName=m)['Datapoints']\n return values\n\n with ThreadPoolExecutor(max_workers=3) as w:\n results = list(w.map(func_metrics, funcs))\n for m, f in zip(results, funcs):\n if isinstance(f, dict):\n f['Metrics'] = m\n return results\n\n def logs(self, func, start, end):\n logs = self.session_factory().client('logs')\n group_name = \"/aws/lambda/%s\" % func.name\n log.info(\"Fetching logs from group: %s\" % group_name)\n try:\n logs.describe_log_groups(\n logGroupNamePrefix=group_name)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n return\n raise\n try:\n log_streams = logs.describe_log_streams(\n logGroupName=group_name,\n orderBy=\"LastEventTime\", limit=3, descending=True)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n return\n raise\n start = _timestamp_from_string(start)\n end = _timestamp_from_string(end)\n for s in reversed(log_streams['logStreams']):\n result = logs.get_log_events(\n logGroupName=group_name,\n logStreamName=s['logStreamName'],\n startTime=start,\n endTime=end,\n )\n for e in result['events']:\n yield e\n\n @staticmethod\n def delta_function(old_config, new_config):\n for k in new_config:\n if k not in old_config or new_config[k] != old_config[k]:\n return True\n\n @staticmethod\n def diff_tags(old_tags, new_tags):\n add = {}\n remove = set()\n for k,v in new_tags.items():\n if k not in old_tags or old_tags[k] != v:\n add[k] = v\n for k in old_tags:\n if k not in new_tags:\n remove.add(k)\n return add, list(remove)\n\n def _create_or_update(self, func, role=None, s3_uri=None, qualifier=None):\n role = func.role or role\n assert role, \"Lambda function role must be specified\"\n archive = func.get_archive()\n existing = self.get(func.name, qualifier)\n\n if s3_uri:\n # TODO: support versioned buckets\n bucket, key = self._upload_func(s3_uri, func, archive)\n code_ref = {'S3Bucket': bucket, 'S3Key': key}\n else:\n code_ref = {'ZipFile': archive.get_bytes()}\n\n changed = False\n if existing:\n old_config = existing['Configuration']\n if archive.get_checksum() != old_config['CodeSha256']:\n log.debug(\"Updating function %s code\", func.name)\n params = dict(FunctionName=func.name, Publish=True)\n params.update(code_ref)\n result = self.client.update_function_code(**params)\n changed = True\n # TODO/Consider also set publish above to false, and publish\n # after configuration change?\n\n new_config = func.get_config()\n new_config['Role'] = role\n new_tags = new_config.pop('Tags', {})\n\n if self.delta_function(old_config, new_config):\n log.debug(\"Updating function: %s config\" % func.name)\n result = self.client.update_function_configuration(**new_config)\n changed = True\n\n # tag dance\n base_arn = old_config['FunctionArn']\n if base_arn.count(':') > 6: # trim version/alias\n base_arn = base_arn.rsplit(':', 1)[0]\n\n old_tags = self.client.list_tags(Resource=base_arn)['Tags']\n tags_to_add, tags_to_remove = self.diff_tags(old_tags, new_tags)\n\n if tags_to_add:\n log.debug(\"Adding/updating tags: %s config\" % func.name)\n self.client.tag_resource(\n Resource=base_arn, Tags=tags_to_add)\n if tags_to_remove:\n log.debug(\"Removing tags: %s config\" % func.name)\n self.client.untag_resource(\n Resource=base_arn, TagKeys=tags_to_remove)\n\n if not changed:\n result = old_config\n else:\n log.info('Publishing custodian policy lambda function %s', func.name)\n params = func.get_config()\n params.update({'Publish': True, 'Code': code_ref, 'Role': role})\n result = self.client.create_function(**params)\n changed = True\n\n return result, changed\n\n def _upload_func(self, s3_uri, func, archive):\n _, bucket, key_prefix = parse_s3(s3_uri)\n key = \"%s/%s\" % (key_prefix, func.name)\n transfer = S3Transfer(\n self.session_factory().client('s3'),\n config=TransferConfig(\n multipart_threshold=1024 * 1024 * 4))\n transfer.upload_file(\n archive.path,\n bucket=bucket,\n key=key,\n extra_args={\n 'ServerSideEncryption': 'AES256'})\n return bucket, key\n\n def publish_alias(self, func_data, alias):\n \"\"\"Create or update an alias for the given function.\n \"\"\"\n if not alias:\n return func_data['FunctionArn']\n func_name = func_data['FunctionName']\n func_version = func_data['Version']\n\n exists = resource_exists(\n self.client.get_alias, FunctionName=func_name, Name=alias)\n\n if not exists:\n log.debug(\"Publishing custodian lambda alias %s\", alias)\n alias_result = self.client.create_alias(\n FunctionName=func_name,\n Name=alias,\n FunctionVersion=func_version)\n else:\n if (exists['FunctionVersion'] == func_version and\n exists['Name'] == alias):\n return exists['AliasArn']\n log.debug('Updating custodian lambda alias %s', alias)\n alias_result = self.client.update_alias(\n FunctionName=func_name,\n Name=alias,\n FunctionVersion=func_version)\n return alias_result['AliasArn']\n\n def get(self, func_name, qualifier=None):\n params = {'FunctionName': func_name}\n if qualifier:\n params['Qualifier'] = qualifier\n return resource_exists(\n self.client.get_function, **params)\n\n\ndef resource_exists(op, NotFound=\"ResourceNotFoundException\", *args, **kw):\n try:\n return op(*args, **kw)\n except ClientError as e:\n if e.response['Error']['Code'] == NotFound:\n return False\n raise\n\n\nclass AbstractLambdaFunction:\n \"\"\"Abstract base class for lambda functions.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n alias = None\n\n @abc.abstractproperty\n def name(self):\n \"\"\"Name for the lambda function\"\"\"\n\n @abc.abstractproperty\n def runtime(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def description(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def handler(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def memory_size(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def timeout(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def role(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def subnets(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def security_groups(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def dead_letter_config(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def environment(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def kms_key_arn(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def tracing_config(self):\n \"\"\" \"\"\"\n\n @abc.abstractproperty\n def tags(self):\n \"\"\" \"\"\"\n\n @abc.abstractmethod\n def get_events(self, session_factory):\n \"\"\"event sources that should be bound to this lambda.\"\"\"\n\n @abc.abstractmethod\n def get_archive(self):\n \"\"\"Return the lambda distribution archive object.\"\"\"\n\n def get_config(self):\n conf = {\n 'FunctionName': self.name,\n 'MemorySize': self.memory_size,\n 'Role': self.role,\n 'Description': self.description,\n 'Runtime': self.runtime,\n 'Handler': self.handler,\n 'Timeout': self.timeout,\n 'DeadLetterConfig': self.dead_letter_config,\n 'Environment': self.environment,\n 'KMSKeyArn': self.kms_key_arn,\n 'TracingConfig': self.tracing_config,\n 'Tags': self.tags}\n if self.subnets and self.security_groups:\n conf['VpcConfig'] = {\n 'SubnetIds': self.subnets,\n 'SecurityGroupIds': self.security_groups}\n return conf\n\n\nclass LambdaFunction(AbstractLambdaFunction):\n\n def __init__(self, func_data, archive):\n self.func_data = func_data\n required = set((\n 'name', 'handler', 'memory_size',\n 'timeout', 'role', 'runtime',\n 'description'))\n missing = required.difference(func_data)\n if missing:\n raise ValueError(\"Missing required keys %s\" % \" \".join(missing))\n self.archive = archive\n\n @property\n def name(self):\n return self.func_data['name']\n\n @property\n def description(self):\n return self.func_data['description']\n\n @property\n def handler(self):\n return self.func_data['handler']\n\n @property\n def memory_size(self):\n return self.func_data['memory_size']\n\n @property\n def timeout(self):\n return self.func_data['timeout']\n\n @property\n def runtime(self):\n return self.func_data['runtime']\n\n @property\n def role(self):\n return self.func_data['role']\n\n @property\n def security_groups(self):\n return self.func_data.get('security_groups', None)\n\n @property\n def subnets(self):\n return self.func_data.get('subnets', None)\n\n @property\n def dead_letter_config(self):\n return self.func_data.get('dead_letter_config', {})\n\n @property\n def environment(self):\n return self.func_data.get('environment', {})\n\n @property\n def kms_key_arn(self):\n return self.func_data.get('kms_key_arn', '')\n\n @property\n def tracing_config(self):\n return self.func_data.get('tracing_config', {})\n\n @property\n def tags(self):\n return self.func_data.get('tags', {})\n\n def get_events(self, session_factory):\n return self.func_data.get('events', ())\n\n def get_archive(self):\n return self.archive\n\n\nPolicyHandlerTemplate = \"\"\"\\\nfrom c7n import handler\n\ndef run(event, context):\n return handler.dispatch_event(event, context)\n\n\"\"\"\n\n\nclass PolicyLambda(AbstractLambdaFunction):\n \"\"\"Wraps a custodian policy to turn it into a lambda function.\n \"\"\"\n handler = \"custodian_policy.run\"\n\n def __init__(self, policy):\n self.policy = policy\n self.archive = custodian_archive(packages=self.packages)\n\n @property\n def name(self):\n return \"custodian-%s\" % self.policy.name\n\n @property\n def description(self):\n return self.policy.data.get(\n 'description', 'cloud-custodian lambda policy')\n\n @property\n def role(self):\n return self.policy.data['mode'].get('role', '')\n\n @property\n def runtime(self):\n return self.policy.data['mode'].get('runtime', 'python2.7')\n\n @property\n def memory_size(self):\n return self.policy.data['mode'].get('memory', 512)\n\n @property\n def timeout(self):\n return self.policy.data['mode'].get('timeout', 60)\n\n @property\n def security_groups(self):\n return self.policy.data['mode'].get('security_groups', None)\n\n @property\n def subnets(self):\n return self.policy.data['mode'].get('subnets', None)\n\n @property\n def dead_letter_config(self):\n return self.policy.data['mode'].get('dead_letter_config', {})\n\n @property\n def environment(self):\n return self.policy.data['mode'].get('environment', {})\n\n @property\n def kms_key_arn(self):\n return self.policy.data['mode'].get('kms_key_arn', '')\n\n @property\n def tracing_config(self):\n return self.policy.data['mode'].get('tracing_config', {})\n\n @property\n def tags(self):\n return self.policy.data['mode'].get('tags', {})\n\n @property\n def packages(self):\n return self.policy.data['mode'].get('packages')\n\n def get_events(self, session_factory):\n events = []\n if self.policy.data['mode']['type'] == 'config-rule':\n events.append(\n ConfigRule(self.policy.data['mode'], session_factory))\n else:\n events.append(\n CloudWatchEventSource(\n self.policy.data['mode'], session_factory))\n return events\n\n def get_archive(self):\n self.archive.add_contents(\n 'config.json', json.dumps(\n {'policies': [self.policy.data]}, indent=2))\n self.archive.add_contents('custodian_policy.py', PolicyHandlerTemplate)\n self.archive.close()\n return self.archive\n\n\ndef zinfo(fname):\n \"\"\"Amazon lambda exec environment setup can break itself\n if zip files aren't constructed a particular way.\n\n ie. It respects file perm attributes from the zip including\n those that prevent lambda from working. Namely lambda\n extracts code as one user, and executes code as a different\n user. Without permissions for the executing user to read\n the file the lambda function is broken.\n\n Python's default zipfile.writestr does a 0600 perm which\n we modify here as a workaround.\n \"\"\"\n info = zipfile.ZipInfo(fname)\n # Grant other users permissions to read\n # http://unix.stackexchange.com/questions/14705/\n info.external_attr = 0o644 << 16\n return info\n\n\nclass CloudWatchEventSource(object):\n \"\"\"Subscribe a lambda to cloud watch events.\n\n Cloud watch events supports a number of different event\n sources, from periodic timers with cron syntax, to\n real time instance state notifications, cloud trail\n events, and realtime asg membership changes.\n\n Event Pattern for Instance State\n\n .. code-block:: json\n\n {\n \"source\": [\"aws.ec2\"],\n \"detail-type\": [\"EC2 Instance State-change Notification\"],\n \"detail\": { \"state\": [\"pending\"]}\n }\n\n Event Pattern for Cloud Trail API\n\n .. code-block:: json\n\n {\n \"detail-type\": [\"AWS API Call via CloudTrail\"],\n \"detail\": {\n \"eventSource\": [\"s3.amazonaws.com\"],\n \"eventName\": [\"CreateBucket\", \"DeleteBucket\"]\n }\n }\n \"\"\"\n ASG_EVENT_MAPPING = {\n 'launch-success': 'EC2 Instance Launch Successful',\n 'launch-failure': 'EC2 Instance Launch Unsuccessful',\n 'terminate-success': 'EC2 Instance Terminate Successful',\n 'terminate-failure': 'EC2 Instance Terminate Unsuccessful'}\n\n def __init__(self, data, session_factory, prefix=\"custodian-\"):\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('events')\n self.data = data\n self.prefix = prefix\n\n def _make_notification_id(self, function_name):\n if not function_name.startswith(self.prefix):\n return \"%s%s\" % (self.prefix, function_name)\n return function_name\n\n def get(self, rule_name):\n return resource_exists(\n self.client.describe_rule,\n Name=self._make_notification_id(rule_name))\n\n @staticmethod\n def delta(src, tgt):\n \"\"\"Given two cwe rules determine if the configuration is the same.\n\n Name is already implied.\n \"\"\"\n for k in ['State', 'EventPattern', 'ScheduleExpression']:\n if src.get(k) != tgt.get(k):\n return True\n return False\n\n def __repr__(self):\n return \"<CWEvent Type:%s Events:%s>\" % (\n self.data.get('type'),\n ', '.join(map(str, self.data.get('events', []))))\n\n def resolve_cloudtrail_payload(self, payload):\n sources = self.data.get('sources', [])\n events = []\n for e in self.data.get('events'):\n if not isinstance(e, dict):\n events.append(e)\n event_info = CloudWatchEvents.get(e)\n if event_info is None:\n continue\n else:\n event_info = e\n events.append(e['event'])\n sources.append(event_info['source'])\n\n payload['detail'] = {\n 'eventSource': list(set(sources)),\n 'eventName': events}\n\n def render_event_pattern(self):\n event_type = self.data.get('type')\n payload = {}\n if event_type == 'cloudtrail':\n payload['detail-type'] = ['AWS API Call via CloudTrail']\n self.resolve_cloudtrail_payload(payload)\n\n if event_type == 'cloudtrail':\n if 'signin.amazonaws.com' in payload['detail']['eventSource']:\n payload['detail-type'] = ['AWS Console Sign In via CloudTrail']\n elif event_type == 'guard-duty':\n payload['source'] = ['aws.guardduty']\n payload['detail-type'] = ['GuardDuty Finding']\n if 'resource-filter' in self.data:\n payload.update({\n 'detail': {'resource': {'resourceType': [self.data['resource-filter']]}}})\n elif event_type == \"ec2-instance-state\":\n payload['source'] = ['aws.ec2']\n payload['detail-type'] = [\n \"EC2 Instance State-change Notification\"]\n # Technically could let empty be all events, but likely misconfig\n payload['detail'] = {\"state\": self.data.get('events', [])}\n elif event_type == \"asg-instance-state\":\n payload['source'] = ['aws.autoscaling']\n events = []\n for e in self.data.get('events', []):\n events.append(self.ASG_EVENT_MAPPING.get(e, e))\n payload['detail-type'] = events\n elif event_type == 'periodic':\n pass\n else:\n raise ValueError(\n \"Unknown lambda event source type: %s\" % event_type)\n if not payload:\n return None\n return json.dumps(payload)\n\n def add(self, func):\n params = dict(\n Name=func.name, Description=func.description, State='ENABLED')\n\n pattern = self.render_event_pattern()\n if pattern:\n params['EventPattern'] = pattern\n schedule = self.data.get('schedule')\n if schedule:\n params['ScheduleExpression'] = schedule\n\n rule = self.get(func.name)\n\n if rule and self.delta(rule, params):\n log.debug(\"Updating cwe rule for %s\" % self)\n response = self.client.put_rule(**params)\n elif not rule:\n log.debug(\"Creating cwe rule for %s\" % (self))\n response = self.client.put_rule(**params)\n else:\n response = {'RuleArn': rule['Arn']}\n\n try:\n self.session.client('lambda').add_permission(\n FunctionName=func.name,\n StatementId=func.name,\n SourceArn=response['RuleArn'],\n Action='lambda:InvokeFunction',\n Principal='events.amazonaws.com')\n log.debug('Added lambda invoke cwe rule permission')\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n # Add Targets\n found = False\n response = self.client.list_targets_by_rule(Rule=func.name)\n # CWE seems to be quite picky about function arns (no aliases/versions)\n func_arn = func.arn\n\n if func_arn.count(':') > 6:\n func_arn, version = func_arn.rsplit(':', 1)\n for t in response['Targets']:\n if func_arn == t['Arn']:\n found = True\n\n if found:\n return\n\n log.debug('Creating cwe rule target for %s on func:%s' % (\n self, func_arn))\n\n self.client.put_targets(\n Rule=func.name, Targets=[{\"Id\": func.name, \"Arn\": func_arn}])\n\n return True\n\n def update(self, func):\n self.add(func)\n\n def pause(self, func):\n try:\n self.client.disable_rule(Name=func.name)\n except Exception:\n pass\n\n def resume(self, func):\n try:\n self.client.enable_rule(Name=func.name)\n except Exception:\n pass\n\n def remove(self, func):\n if self.get(func.name):\n log.info(\"Removing cwe targets and rule %s\", func.name)\n try:\n targets = self.client.list_targets_by_rule(\n Rule=func.name)['Targets']\n self.client.remove_targets(\n Rule=func.name,\n Ids=[t['Id'] for t in targets])\n except ClientError as e:\n log.warning(\n \"Could not remove targets for rule %s error: %s\",\n func.name, e)\n self.client.delete_rule(Name=func.name)\n\n\nclass BucketLambdaNotification(object):\n \"\"\" Subscribe a lambda to bucket notifications directly. \"\"\"\n\n def __init__(self, data, session_factory, bucket):\n self.data = data\n self.session_factory = session_factory\n self.session = session_factory()\n self.bucket = bucket\n\n def delta(self, src, tgt):\n for k in ['Id', 'LambdaFunctionArn', 'Events', 'Filters']:\n if src.get(k) != tgt.get(k):\n return True\n return False\n\n def _get_notifies(self, s3, func):\n notifies = s3.get_bucket_notification_configuration(\n Bucket=self.bucket['Name'])\n found = False\n for f in notifies.get('LambdaFunctionConfigurations', []):\n if f['Id'] != func.name:\n continue\n found = f\n return notifies, found\n\n def add(self, func):\n s3 = self.session.client('s3')\n notifies, found = self._get_notifies(s3, func)\n notifies.pop('ResponseMetadata', None)\n func_arn = func.arn\n if func_arn.rsplit(':', 1)[-1].isdigit():\n func_arn = func_arn.rsplit(':', 1)[0]\n n_params = {\n 'Id': func.name,\n 'LambdaFunctionArn': func_arn,\n 'Events': self.data.get('events', ['s3:ObjectCreated:*'])}\n if self.data.get('filters'):\n n_params['Filters'] = {\n 'Key': {'FilterRules': self.filters}}\n\n if found:\n if self.delta(found, n_params):\n notifies['LambdaFunctionConfigurations'].remove(found)\n else:\n log.info(\"Bucket lambda notification present\")\n return\n\n lambda_client = self.session.client('lambda')\n params = dict(\n FunctionName=func.name,\n StatementId=self.bucket['Name'],\n Action='lambda:InvokeFunction',\n Principal='s3.amazonaws.com')\n if self.data.get('account_s3'):\n params['SourceAccount'] = self.data['account_s3']\n params['SourceArn'] = 'arn:aws:s3:::*'\n else:\n params['SourceArn'] = 'arn:aws:s3:::%' % self.bucket['Name']\n try:\n lambda_client.add_permission(**params)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n notifies.setdefault('LambdaFunctionConfigurations', []).append(n_params)\n s3.put_bucket_notification_configuration(\n Bucket=self.bucket['Name'], NotificationConfiguration=notifies)\n\n return True\n\n def remove(self, func):\n s3 = self.session.client('s3')\n notifies, found = self._get_notifies(s3, func)\n if not found:\n return\n\n lambda_client = self.session.client('lambda')\n try:\n response = lambda_client.remove_permission(\n FunctionName=func['FunctionName'],\n StatementId=self.bucket['Name'])\n log.debug(\"Removed lambda permission result: %s\" % response)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n notifies['LambdaFunctionConfigurations'].remove(found)\n s3.put_bucket_notification_configuration(\n Bucket=self.bucket['Name'],\n NotificationConfiguration=notifies)\n\n\nclass CloudWatchLogSubscription(object):\n \"\"\" Subscribe a lambda to a log group[s]\n \"\"\"\n\n iam_delay = 1.5\n\n def __init__(self, session_factory, log_groups, filter_pattern):\n self.log_groups = log_groups\n self.filter_pattern = filter_pattern\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('logs')\n\n def add(self, func):\n lambda_client = self.session.client('lambda')\n for group in self.log_groups:\n log.info(\n \"Creating subscription filter for %s\" % group['logGroupName'])\n region = group['arn'].split(':', 4)[3]\n try:\n lambda_client.add_permission(\n FunctionName=func.name,\n StatementId=group['logGroupName'][1:].replace('/', '-'),\n SourceArn=group['arn'],\n Action='lambda:InvokeFunction',\n Principal='logs.%s.amazonaws.com' % region)\n log.debug(\"Added lambda ipo nvoke log group permission\")\n # iam eventual consistency and propagation\n time.sleep(self.iam_delay)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n # Consistent put semantics / ie no op if extant\n self.client.put_subscription_filter(\n logGroupName=group['logGroupName'],\n filterName=func.name,\n filterPattern=self.filter_pattern,\n destinationArn=func.alias or func.arn)\n\n def remove(self, func):\n lambda_client = self.session.client('lambda')\n for group in self.log_groups:\n try:\n response = lambda_client.remove_permission(\n FunctionName=func.name,\n StatementId=group['logGroupName'][1:].replace('/', '-'))\n log.debug(\"Removed lambda permission result: %s\" % response)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n try:\n response = self.client.delete_subscription_filter(\n logGroupName=group['logGroupName'], filterName=func.name)\n log.debug(\"Removed subscription filter from: %s\",\n group['logGroupName'])\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n\nclass SNSSubscription(object):\n \"\"\" Subscribe a lambda to one or more SNS topics.\n \"\"\"\n\n iam_delay = 1.5\n\n def __init__(self, session_factory, topic_arns):\n self.topic_arns = topic_arns\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('sns')\n\n @staticmethod\n def _parse_arn(arn):\n parts = arn.split(':')\n region, topic_name = parts[3], parts[5]\n statement_id = 'sns-topic-' + topic_name\n return region, topic_name, statement_id\n\n def add(self, func):\n lambda_client = self.session.client('lambda')\n for arn in self.topic_arns:\n region, topic_name, statement_id = self._parse_arn(arn)\n\n log.info(\"Subscribing %s to %s\" % (func.name, topic_name))\n\n # Add permission to lambda for sns invocation.\n try:\n lambda_client.add_permission(\n FunctionName=func.name,\n StatementId='sns-topic-' + topic_name,\n SourceArn=arn,\n Action='lambda:InvokeFunction',\n Principal='sns.amazonaws.com')\n log.debug(\"Added permission for sns to invoke lambda\")\n # iam eventual consistency and propagation\n time.sleep(self.iam_delay)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n # Subscribe the lambda to the topic.\n topic = self.session.resource('sns').Topic(arn)\n topic.subscribe(Protocol='lambda', Endpoint=func.arn) # idempotent\n\n def remove(self, func):\n lambda_client = self.session.client('lambda')\n for topic_arn in self.topic_arns:\n region, topic_name, statement_id = self._parse_arn(topic_arn)\n\n try:\n response = lambda_client.remove_permission(\n FunctionName=func.name,\n StatementId=statement_id)\n log.debug(\"Removed lambda permission result: %s\" % response)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n\n paginator = self.client.get_paginator('list_subscriptions_by_topic')\n\n class Done(Exception):\n pass\n try:\n for page in paginator.paginate(TopicArn=topic_arn):\n for subscription in page['Subscriptions']:\n if subscription['Endpoint'] != func.arn:\n continue\n try:\n response = self.client.unsubscribe(\n SubscriptionArn=subscription['SubscriptionArn'])\n log.debug(\"Unsubscribed %s from %s\" %\n (func.name, topic_name))\n except ClientError as e:\n code = e.response['Error']['Code']\n if code != 'ResourceNotFoundException':\n raise\n raise Done # break out of both for loops\n except Done:\n pass\n\n\nclass BucketSNSNotification(SNSSubscription):\n \"\"\" Subscribe a lambda to bucket notifications via SNS. \"\"\"\n\n def __init__(self, session_factory, bucket, topic=None):\n # NB: We are overwriting __init__ vs. extending.\n self.session_factory = session_factory\n self.session = session_factory()\n self.topic_arns = self.get_topic(bucket) if topic is None else [topic]\n self.client = self.session.client('sns')\n\n def get_topic(self, bucket):\n session = local_session(self.session_factory)\n sns = session.client('sns')\n s3 = session.client('s3')\n\n notifies = bucket['Notification']\n if 'TopicConfigurations' not in notifies:\n notifies['TopicConfigurations'] = []\n all_topics = notifies['TopicConfigurations']\n topic_arns = [t['TopicArn'] for t in all_topics\n if 's3:ObjectCreated:*' in t['Events']]\n if not topic_arns:\n # No suitable existing topic. Create one.\n topic_arn = sns.create_topic(Name=bucket['Name'])['TopicArn']\n policy = {\n 'Statement': [{\n 'Action': 'SNS:Publish',\n 'Effect': 'Allow',\n 'Resource': topic_arn,\n 'Principal': {'Service': 's3.amazonaws.com'}}]}\n sns.set_topic_attributes(\n TopicArn=topic_arn,\n AttributeName='Policy',\n AttributeValue=json.dumps(policy))\n notifies['TopicConfigurations'].append({\n 'TopicArn': topic_arn,\n 'Events': ['s3:ObjectCreated:*']})\n s3.put_bucket_notification_configuration(Bucket=bucket['Name'],\n NotificationConfiguration=notifies)\n topic_arns = [topic_arn]\n return topic_arns\n\n\nclass ConfigRule(object):\n \"\"\"Use a lambda as a custom config rule.\n\n \"\"\"\n\n def __init__(self, data, session_factory):\n self.data = data\n self.session_factory = session_factory\n self.session = session_factory()\n self.client = self.session.client('config')\n\n def __repr__(self):\n return \"<ConfigRule>\"\n\n def get_rule_params(self, func):\n # config does not support versions/aliases on lambda funcs\n func_arn = func.arn\n if func_arn.count(':') > 6:\n func_arn, version = func_arn.rsplit(':', 1)\n\n params = dict(\n ConfigRuleName=func.name,\n Description=func.description,\n Source={\n 'Owner': 'CUSTOM_LAMBDA',\n 'SourceIdentifier': func_arn,\n 'SourceDetails': [{\n 'EventSource': 'aws.config',\n 'MessageType': 'ConfigurationItemChangeNotification'}]\n }\n )\n\n if isinstance(func, PolicyLambda):\n manager = func.policy.get_resource_manager()\n if hasattr(manager.get_model(), 'config_type'):\n config_type = manager.get_model().config_type\n else:\n raise Exception(\"You may have attempted to deploy a config \"\n \"based lambda function with an unsupported config type. \"\n \"The most recent AWS config types are here: http://docs.aws\"\n \".amazon.com/config/latest/developerguide/resource\"\n \"-config-reference.html.\")\n params['Scope'] = {\n 'ComplianceResourceTypes': [config_type]}\n else:\n params['Scope']['ComplianceResourceTypes'] = self.data.get(\n 'resource-types', ())\n return params\n\n def get(self, rule_name):\n rules = resource_exists(\n self.client.describe_config_rules,\n ConfigRuleNames=[rule_name],\n NotFound=\"NoSuchConfigRuleException\")\n if not rules:\n return rules\n return rules['ConfigRules'][0]\n\n @staticmethod\n def delta(rule, params):\n # doesn't seem like we have anything mutable at the moment,\n # since we restrict params, maybe reusing the same policy name\n # with a different resource type.\n if rule['Scope'] != params['Scope']:\n return True\n if rule['Source'] != params['Source']:\n return True\n if rule.get('Description', '') != rule.get('Description', ''):\n return True\n return False\n\n def add(self, func):\n rule = self.get(func.name)\n params = self.get_rule_params(func)\n\n if rule and self.delta(rule, params):\n log.debug(\"Updating config rule for %s\" % self)\n rule.update(params)\n return self.client.put_config_rule(ConfigRule=rule)\n elif rule:\n log.debug(\"Config rule up to date\")\n return\n try:\n self.session.client('lambda').add_permission(\n FunctionName=func.name,\n StatementId=func.name,\n SourceAccount=func.arn.split(':')[4],\n Action='lambda:InvokeFunction',\n Principal='config.amazonaws.com')\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceConflictException':\n raise\n\n log.debug(\"Adding config rule for %s\" % func.name)\n return self.client.put_config_rule(ConfigRule=params)\n\n def remove(self, func):\n rule = self.get(func.name)\n if not rule:\n return\n log.info(\"Removing config rule for %s\", func.name)\n try:\n self.client.delete_config_rule(\n ConfigRuleName=func.name)\n except ClientError as e:\n if e.response['Error']['Code'] != 'ResourceNotFoundException':\n raise\n",
"path": "c7n/mu.py"
}
] | diff --git a/c7n/mu.py b/c7n/mu.py
index fcff0833ac0..d595b3a4c43 100644
--- a/c7n/mu.py
+++ b/c7n/mu.py
@@ -707,11 +707,11 @@ def timeout(self):
@property
def security_groups(self):
- return None
+ return self.policy.data['mode'].get('security_groups', None)
@property
def subnets(self):
- return None
+ return self.policy.data['mode'].get('subnets', None)
@property
def dead_letter_config(self):
|
translate__pootle-5179 | Severe performance degradation of sync_stores
Earlier today we updated production translation server (merged 'Raw font' PR branch with master and switched to master). So this release includes recent changes related to optimizations. Immediately after that our sync cycle time increased from typical 18-20 minutes to 1.5 hours. I had to write a tool to analyze and compare our logs from past sync cycles and extract timing information. Here's the output:

Some comments on the screenshot. It compares our main steps of sync cycle between four logs (vertical columns). The first log is from a morning run, some time before the release. The second one is soon after the release, and the last two are most recent ones. As one might see, the `pull-ts` step is the culprit. It went from mere 3 minutes up to more than an hour. During this step all we do is run `manage.py sync_stores --skip-missing --project=<project_id>` and wait for it to finish.
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport os\nfrom collections import namedtuple\n\nfrom translate.storage.factory import getclass\n\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import format_classes\nfrom pootle.core.log import log\nfrom pootle.core.url_helpers import split_pootle_path\n\nfrom .models import Unit\nfrom .util import get_change_str\n\n\nclass UnitSyncer(object):\n\n def __init__(self, unit):\n self.unit = unit\n\n @property\n def context(self):\n return self.unit.getcontext()\n\n @property\n def developer_notes(self):\n return self.unit.getnotes(origin=\"developer\")\n\n @property\n def isfuzzy(self):\n return self.unit.isfuzzy()\n\n @property\n def isobsolete(self):\n return self.unit.isobsolete()\n\n @property\n def locations(self):\n return self.unit.getlocations()\n\n @property\n def source(self):\n return self.unit.source\n\n @property\n def target(self):\n return self.unit.target\n\n @property\n def translator_notes(self):\n return self.unit.getnotes(origin=\"translator\")\n\n @property\n def unitid(self):\n return self.unit.getid()\n\n @property\n def unit_class(self):\n return self.unit.store.syncer.unit_class\n\n def convert(self, unitclass=None):\n newunit = self.create_unit(\n unitclass or self.unit_class)\n self.set_target(newunit)\n self.set_fuzzy(newunit)\n self.set_locations(newunit)\n self.set_developer_notes(newunit)\n self.set_translator_notes(newunit)\n self.set_unitid(newunit)\n self.set_context(newunit)\n self.set_obsolete(newunit)\n return newunit\n\n def create_unit(self, unitclass):\n return unitclass(self.source)\n\n def set_context(self, newunit):\n newunit.setcontext(self.context)\n\n def set_developer_notes(self, newunit):\n notes = self.developer_notes\n if notes:\n newunit.addnote(notes, origin=\"developer\")\n\n def set_fuzzy(self, newunit):\n newunit.markfuzzy(self.isfuzzy)\n\n def set_locations(self, newunit):\n locations = self.locations\n if locations:\n newunit.addlocations(locations)\n\n def set_obsolete(self, newunit):\n if self.isobsolete:\n newunit.makeobsolete()\n\n def set_target(self, newunit):\n newunit.target = self.target\n\n def set_translator_notes(self, newunit):\n notes = self.translator_notes\n if notes:\n newunit.addnote(notes, origin=\"translator\")\n\n def set_unitid(self, newunit):\n newunit.setid(self.unitid)\n\n\nclass StoreSyncer(object):\n unit_sync_class = UnitSyncer\n\n def __init__(self, store):\n self.store = store\n\n @cached_property\n def disk_store(self):\n return self.store.file.store\n\n @property\n def translation_project(self):\n return self.store.translation_project\n\n @property\n def language(self):\n return self.translation_project.language\n\n @property\n def project(self):\n return self.translation_project.project\n\n @property\n def source_language(self):\n return self.project.source_language\n\n @property\n def store_file_path(self):\n return os.path.join(\n self.translation_project.abs_real_path,\n *split_pootle_path(self.store.pootle_path)[2:])\n\n @property\n def relative_file_path(self):\n path_parts = split_pootle_path(self.store.pootle_path)\n path_prefix = [path_parts[1]]\n if self.project.get_treestyle() != \"gnu\":\n path_prefix.append(path_parts[0])\n return os.path.join(*(path_prefix + list(path_parts[2:])))\n\n @property\n def unit_class(self):\n return self.file_class.UnitClass\n\n @cached_property\n def file_class(self):\n # get a plugin adapted file_class\n fileclass = format_classes.gather().get(\n str(self.store.filetype.extension))\n if fileclass:\n return fileclass\n if self.store.is_template:\n # namedtuple is equiv here of object() with name attr\n return self._getclass(\n namedtuple(\"instance\", \"name\")(\n name=\".\".join(\n [os.path.splitext(self.store.name)[0],\n str(self.store.filetype.extension)])))\n return self._getclass(self.store)\n\n def convert(self, fileclass=None):\n \"\"\"export to fileclass\"\"\"\n fileclass = fileclass or self.file_class\n logging.debug(\n u\"Converting %s to %s\",\n self.store.pootle_path,\n fileclass)\n output = fileclass()\n output.settargetlanguage(self.language.code)\n # FIXME: we should add some headers\n for unit in self.store.units.iterator():\n output.addunit(\n self.unit_sync_class(unit).convert(output.UnitClass))\n return output\n\n def _getclass(self, obj):\n try:\n return getclass(obj)\n except ValueError:\n raise ValueError(\n \"Unable to find conversion class for Store '%s'\"\n % self.store.name)\n\n def get_new_units(self, old_ids, new_ids):\n return self.store.findid_bulk(\n [self.dbid_index.get(uid)\n for uid\n in new_ids - old_ids])\n\n def get_units_to_obsolete(self, old_ids, new_ids):\n for uid in old_ids - new_ids:\n unit = self.disk_store.findid(uid)\n if unit and not unit.isobsolete():\n yield unit\n\n def obsolete_unit(self, unit, conservative):\n deleted = not unit.istranslated()\n obsoleted = (\n not deleted\n and not conservative)\n if obsoleted:\n unit.makeobsolete()\n deleted = not unit.isobsolete()\n if deleted:\n del unit\n return obsoleted, deleted\n\n def update_structure(self, obsolete_units, new_units, conservative):\n obsolete = 0\n deleted = 0\n added = 0\n for unit in obsolete_units:\n _obsolete, _deleted = self.obsolete_unit(unit, conservative)\n if _obsolete:\n obsolete += 1\n if _deleted:\n deleted += 1\n for unit in new_units:\n newunit = unit.convert(self.disk_store.UnitClass)\n self.disk_store.addunit(newunit)\n added += 1\n return obsolete, deleted, added\n\n def create_store_file(self, last_revision, user):\n logging.debug(u\"Creating file %s\", self.store.pootle_path)\n store = self.convert()\n if not os.path.exists(os.path.dirname(self.store_file_path)):\n os.makedirs(os.path.dirname(self.store_file_path))\n self.store.file = self.relative_file_path\n store.savefile(self.store_file_path)\n log(u\"Created file for %s [revision: %d]\" %\n (self.store.pootle_path, last_revision))\n self.update_store_header(user=user)\n self.store.file.savestore()\n self.store.file_mtime = self.store.get_file_mtime()\n self.store.last_sync_revision = last_revision\n self.store.save()\n\n def update_newer(self, last_revision):\n return (\n not self.store.file.exists()\n or (last_revision >= self.store.last_sync_revision))\n\n @cached_property\n def dbid_index(self):\n \"\"\"build a quick mapping index between unit ids and database ids\"\"\"\n return dict(\n self.store.unit_set.live().values_list('unitid', 'id'))\n\n def sync(self, update_structure=False, conservative=True,\n user=None, only_newer=True):\n last_revision = self.store.get_max_unit_revision()\n\n # TODO only_newer -> not force\n if only_newer and not self.update_newer(last_revision):\n logging.info(\n u\"[sync] No updates for %s after [revision: %d]\",\n self.store.pootle_path, self.store.last_sync_revision)\n return\n\n if not self.store.file.exists():\n self.create_store_file(last_revision, user)\n return\n\n if conservative and self.store.is_template:\n return\n\n file_changed, changes = self.sync_store(\n last_revision,\n update_structure,\n conservative)\n self.save_store(\n last_revision,\n user,\n changes,\n (file_changed or not conservative))\n\n def sync_store(self, last_revision, update_structure, conservative):\n logging.info(u\"Syncing %s\", self.store.pootle_path)\n old_ids = set(self.disk_store.getids())\n new_ids = set(self.dbid_index.keys())\n file_changed = False\n changes = {}\n if update_structure:\n obsolete_units = self.get_units_to_obsolete(old_ids, new_ids)\n new_units = self.get_new_units(old_ids, new_ids)\n if obsolete_units or new_units:\n file_changed = True\n (changes['obsolete'],\n changes['deleted'],\n changes['added']) = self.update_structure(\n obsolete_units,\n new_units,\n conservative=conservative)\n changes[\"updated\"] = self.sync_units(\n self.get_common_units(\n set(self.dbid_index.get(uid)\n for uid\n in old_ids & new_ids),\n last_revision,\n conservative))\n return bool(file_changed or any(changes.values())), changes\n\n def save_store(self, last_revision, user, changes, updated):\n # TODO conservative -> not overwrite\n if updated:\n self.update_store_header(user=user)\n self.store.file.savestore()\n self.store.file_mtime = self.store.get_file_mtime()\n log(u\"[sync] File saved; %s units in %s [revision: %d]\" %\n (get_change_str(changes),\n self.store.pootle_path,\n last_revision))\n else:\n logging.info(\n u\"[sync] nothing changed in %s [revision: %d]\",\n self.store.pootle_path,\n last_revision)\n self.store.last_sync_revision = last_revision\n self.store.save()\n\n def get_revision_filters(self, last_revision):\n # Get units modified after last sync and before this sync started\n filter_by = {\n 'revision__lte': last_revision,\n 'store': self.store}\n # Sync all units if first sync\n if self.store.last_sync_revision is not None:\n filter_by.update({'revision__gt': self.store.last_sync_revision})\n return filter_by\n\n def get_modified_units(self, last_revision):\n return set(\n Unit.objects.filter(**self.get_revision_filters(last_revision))\n .values_list('id', flat=True).distinct()\n if last_revision > self.store.last_sync_revision\n else [])\n\n def get_common_units(self, common_dbids, last_revision, conservative):\n if conservative:\n # Sync only modified units\n common_dbids &= self.get_modified_units(last_revision)\n return self.store.findid_bulk(list(common_dbids))\n\n def sync_units(self, units):\n updated = 0\n for unit in units:\n match = self.disk_store.findid(unit.getid())\n if match is not None:\n changed = unit.sync(match)\n if changed:\n updated += 1\n return updated\n\n def update_store_header(self, **kwargs_):\n self.disk_store.settargetlanguage(self.language.code)\n self.disk_store.setsourcelanguage(self.source_language.code)\n",
"path": "pootle/apps/pootle_store/syncer.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport os\nfrom collections import namedtuple\n\nfrom translate.storage.factory import getclass\n\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import format_classes\nfrom pootle.core.log import log\nfrom pootle.core.url_helpers import split_pootle_path\n\nfrom .models import Unit\nfrom .util import get_change_str\n\n\nclass UnitSyncer(object):\n\n def __init__(self, unit):\n self.unit = unit\n\n @property\n def context(self):\n return self.unit.getcontext()\n\n @property\n def developer_notes(self):\n return self.unit.getnotes(origin=\"developer\")\n\n @property\n def isfuzzy(self):\n return self.unit.isfuzzy()\n\n @property\n def isobsolete(self):\n return self.unit.isobsolete()\n\n @property\n def locations(self):\n return self.unit.getlocations()\n\n @property\n def source(self):\n return self.unit.source\n\n @property\n def target(self):\n return self.unit.target\n\n @property\n def translator_notes(self):\n return self.unit.getnotes(origin=\"translator\")\n\n @property\n def unitid(self):\n return self.unit.getid()\n\n @property\n def unit_class(self):\n return self.unit.store.syncer.unit_class\n\n def convert(self, unitclass=None):\n newunit = self.create_unit(\n unitclass or self.unit_class)\n self.set_target(newunit)\n self.set_fuzzy(newunit)\n self.set_locations(newunit)\n self.set_developer_notes(newunit)\n self.set_translator_notes(newunit)\n self.set_unitid(newunit)\n self.set_context(newunit)\n self.set_obsolete(newunit)\n return newunit\n\n def create_unit(self, unitclass):\n return unitclass(self.source)\n\n def set_context(self, newunit):\n newunit.setcontext(self.context)\n\n def set_developer_notes(self, newunit):\n notes = self.developer_notes\n if notes:\n newunit.addnote(notes, origin=\"developer\")\n\n def set_fuzzy(self, newunit):\n newunit.markfuzzy(self.isfuzzy)\n\n def set_locations(self, newunit):\n locations = self.locations\n if locations:\n newunit.addlocations(locations)\n\n def set_obsolete(self, newunit):\n if self.isobsolete:\n newunit.makeobsolete()\n\n def set_target(self, newunit):\n newunit.target = self.target\n\n def set_translator_notes(self, newunit):\n notes = self.translator_notes\n if notes:\n newunit.addnote(notes, origin=\"translator\")\n\n def set_unitid(self, newunit):\n newunit.setid(self.unitid)\n\n\nclass StoreSyncer(object):\n unit_sync_class = UnitSyncer\n\n def __init__(self, store):\n self.store = store\n\n @cached_property\n def disk_store(self):\n return self.store.file.store\n\n @property\n def translation_project(self):\n return self.store.translation_project\n\n @property\n def language(self):\n return self.translation_project.language\n\n @property\n def project(self):\n return self.translation_project.project\n\n @property\n def source_language(self):\n return self.project.source_language\n\n @property\n def store_file_path(self):\n return os.path.join(\n self.translation_project.abs_real_path,\n *split_pootle_path(self.store.pootle_path)[2:])\n\n @property\n def relative_file_path(self):\n path_parts = split_pootle_path(self.store.pootle_path)\n path_prefix = [path_parts[1]]\n if self.project.get_treestyle() != \"gnu\":\n path_prefix.append(path_parts[0])\n return os.path.join(*(path_prefix + list(path_parts[2:])))\n\n @property\n def unit_class(self):\n return self.file_class.UnitClass\n\n @cached_property\n def file_class(self):\n # get a plugin adapted file_class\n fileclass = format_classes.gather().get(\n str(self.store.filetype.extension))\n if fileclass:\n return fileclass\n if self.store.is_template:\n # namedtuple is equiv here of object() with name attr\n return self._getclass(\n namedtuple(\"instance\", \"name\")(\n name=\".\".join(\n [os.path.splitext(self.store.name)[0],\n str(self.store.filetype.extension)])))\n return self._getclass(self.store)\n\n def convert(self, fileclass=None):\n \"\"\"export to fileclass\"\"\"\n fileclass = fileclass or self.file_class\n logging.debug(\n u\"Converting %s to %s\",\n self.store.pootle_path,\n fileclass)\n output = fileclass()\n output.settargetlanguage(self.language.code)\n # FIXME: we should add some headers\n for unit in self.store.units.iterator():\n output.addunit(\n self.unit_sync_class(unit).convert(output.UnitClass))\n return output\n\n def _getclass(self, obj):\n try:\n return getclass(obj)\n except ValueError:\n raise ValueError(\n \"Unable to find conversion class for Store '%s'\"\n % self.store.name)\n\n def get_new_units(self, old_ids, new_ids):\n return self.store.findid_bulk(\n [self.dbid_index.get(uid)\n for uid\n in new_ids - old_ids])\n\n def get_units_to_obsolete(self, old_ids, new_ids):\n for uid in old_ids - new_ids:\n unit = self.disk_store.findid(uid)\n if unit and not unit.isobsolete():\n yield unit\n\n def obsolete_unit(self, unit, conservative):\n deleted = not unit.istranslated()\n obsoleted = (\n not deleted\n and not conservative)\n if obsoleted:\n unit.makeobsolete()\n deleted = not unit.isobsolete()\n if deleted:\n del unit\n return obsoleted, deleted\n\n def update_structure(self, obsolete_units, new_units, conservative):\n obsolete = 0\n deleted = 0\n added = 0\n for unit in obsolete_units:\n _obsolete, _deleted = self.obsolete_unit(unit, conservative)\n if _obsolete:\n obsolete += 1\n if _deleted:\n deleted += 1\n for unit in new_units:\n newunit = unit.convert(self.disk_store.UnitClass)\n self.disk_store.addunit(newunit)\n added += 1\n return obsolete, deleted, added\n\n def create_store_file(self, last_revision, user):\n logging.debug(u\"Creating file %s\", self.store.pootle_path)\n store = self.convert()\n if not os.path.exists(os.path.dirname(self.store_file_path)):\n os.makedirs(os.path.dirname(self.store_file_path))\n self.store.file = self.relative_file_path\n store.savefile(self.store_file_path)\n log(u\"Created file for %s [revision: %d]\" %\n (self.store.pootle_path, last_revision))\n self.update_store_header(user=user)\n self.store.file.savestore()\n self.store.file_mtime = self.store.get_file_mtime()\n self.store.last_sync_revision = last_revision\n self.store.save()\n\n def update_newer(self, last_revision):\n return (\n not self.store.file.exists()\n or last_revision > self.store.last_sync_revision\n )\n\n @cached_property\n def dbid_index(self):\n \"\"\"build a quick mapping index between unit ids and database ids\"\"\"\n return dict(\n self.store.unit_set.live().values_list('unitid', 'id'))\n\n def sync(self, update_structure=False, conservative=True,\n user=None, only_newer=True):\n last_revision = self.store.get_max_unit_revision()\n\n # TODO only_newer -> not force\n if only_newer and not self.update_newer(last_revision):\n logging.info(\n u\"[sync] No updates for %s after [revision: %d]\",\n self.store.pootle_path, self.store.last_sync_revision)\n return\n\n if not self.store.file.exists():\n self.create_store_file(last_revision, user)\n return\n\n if conservative and self.store.is_template:\n return\n\n file_changed, changes = self.sync_store(\n last_revision,\n update_structure,\n conservative)\n self.save_store(\n last_revision,\n user,\n changes,\n (file_changed or not conservative))\n\n def sync_store(self, last_revision, update_structure, conservative):\n logging.info(u\"Syncing %s\", self.store.pootle_path)\n old_ids = set(self.disk_store.getids())\n new_ids = set(self.dbid_index.keys())\n file_changed = False\n changes = {}\n if update_structure:\n obsolete_units = self.get_units_to_obsolete(old_ids, new_ids)\n new_units = self.get_new_units(old_ids, new_ids)\n if obsolete_units or new_units:\n file_changed = True\n (changes['obsolete'],\n changes['deleted'],\n changes['added']) = self.update_structure(\n obsolete_units,\n new_units,\n conservative=conservative)\n changes[\"updated\"] = self.sync_units(\n self.get_common_units(\n set(self.dbid_index.get(uid)\n for uid\n in old_ids & new_ids),\n last_revision,\n conservative))\n return bool(file_changed or any(changes.values())), changes\n\n def save_store(self, last_revision, user, changes, updated):\n # TODO conservative -> not overwrite\n if updated:\n self.update_store_header(user=user)\n self.store.file.savestore()\n self.store.file_mtime = self.store.get_file_mtime()\n log(u\"[sync] File saved; %s units in %s [revision: %d]\" %\n (get_change_str(changes),\n self.store.pootle_path,\n last_revision))\n else:\n logging.info(\n u\"[sync] nothing changed in %s [revision: %d]\",\n self.store.pootle_path,\n last_revision)\n self.store.last_sync_revision = last_revision\n self.store.save()\n\n def get_revision_filters(self, last_revision):\n # Get units modified after last sync and before this sync started\n filter_by = {\n 'revision__lte': last_revision,\n 'store': self.store}\n # Sync all units if first sync\n if self.store.last_sync_revision is not None:\n filter_by.update({'revision__gt': self.store.last_sync_revision})\n return filter_by\n\n def get_modified_units(self, last_revision):\n return set(\n Unit.objects.filter(**self.get_revision_filters(last_revision))\n .values_list('id', flat=True).distinct()\n if last_revision > self.store.last_sync_revision\n else [])\n\n def get_common_units(self, common_dbids, last_revision, conservative):\n if conservative:\n # Sync only modified units\n common_dbids &= self.get_modified_units(last_revision)\n return self.store.findid_bulk(list(common_dbids))\n\n def sync_units(self, units):\n updated = 0\n for unit in units:\n match = self.disk_store.findid(unit.getid())\n if match is not None:\n changed = unit.sync(match)\n if changed:\n updated += 1\n return updated\n\n def update_store_header(self, **kwargs_):\n self.disk_store.settargetlanguage(self.language.code)\n self.disk_store.setsourcelanguage(self.source_language.code)\n",
"path": "pootle/apps/pootle_store/syncer.py"
}
] | diff --git a/pootle/apps/pootle_store/syncer.py b/pootle/apps/pootle_store/syncer.py
index 6642c88f9a4..97cb9a449c6 100644
--- a/pootle/apps/pootle_store/syncer.py
+++ b/pootle/apps/pootle_store/syncer.py
@@ -256,7 +256,8 @@ def create_store_file(self, last_revision, user):
def update_newer(self, last_revision):
return (
not self.store.file.exists()
- or (last_revision >= self.store.last_sync_revision))
+ or last_revision > self.store.last_sync_revision
+ )
@cached_property
def dbid_index(self):
|
twisted__twisted-12027 | twisted uses deprecated three-argument form of generator.throw
**Describe the incorrect behavior you saw**
`src/twisted/python/failure.py` does `return g.throw(self.type, self.value, self.tb)`. This is deprecated in Python 3.12: https://github.com/python/cpython/issues/96348 , https://docs.python.org/3.12/whatsnew/3.12.html#deprecated . This is a particular problem for https://github.com/fedora-infra/fedora-messaging consumers - they print a deprecation warning about once a second.
**Describe how to cause this behavior**
Try using twisted with Python 3.12, in a way that hits this codepath (I'm not sure precisely how to do that other than "use a fedora-messaging consumer", sorry). You get a deprecation warning:
```
Oct 27 23:11:08 openqa-lab01.iad2.fedoraproject.org fedora-messaging[172628]: /usr/lib/python3.12/site-packages/twisted/python/failure.py:518: DeprecationWarning: the (type, exc, tb) signature of throw() is deprecated, use the single-arg signature instead.
Oct 27 23:11:08 openqa-lab01.iad2.fedoraproject.org fedora-messaging[172628]: return g.throw(self.type, self.value, self.tb)
```
**Describe the correct behavior you'd like to see**
No deprecation warning.
**Testing environment**
Python 3.12 on Fedora 39, don't think anything else is relevant. The code is still using the deprecated form on current git trunk.
**Additional context**
I would send a pull request changing this, but I'm unclear on [whether it's safe to just use the modern one-argument form on Python 3.8](https://github.com/python/cpython/issues/96348#issuecomment-1783626480).
| [
{
"content": "# -*- test-case-name: twisted.test.test_failure -*-\n# See also test suite twisted.test.test_pbfailure\n\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\n\"\"\"\nAsynchronous-friendly error mechanism.\n\nSee L{Failure}.\n\"\"\"\n\n\n# System Imports\nimport builtins\nimport copy\nimport inspect\nimport linecache\nimport sys\nfrom inspect import getmro\nfrom io import StringIO\nfrom typing import Callable, NoReturn, TypeVar\n\nimport opcode\n\nfrom twisted.python import reflect\n\n_T_Callable = TypeVar(\"_T_Callable\", bound=Callable[..., object])\n\ncount = 0\ntraceupLength = 4\n\n\nclass DefaultException(Exception):\n pass\n\n\ndef format_frames(frames, write, detail=\"default\"):\n \"\"\"\n Format and write frames.\n\n @param frames: is a list of frames as used by Failure.frames, with\n each frame being a list of\n (funcName, fileName, lineNumber, locals.items(), globals.items())\n @type frames: list\n @param write: this will be called with formatted strings.\n @type write: callable\n @param detail: Four detail levels are available:\n default, brief, verbose, and verbose-vars-not-captured.\n C{Failure.printDetailedTraceback} uses the latter when the caller asks\n for verbose, but no vars were captured, so that an explicit warning\n about the missing data is shown.\n @type detail: string\n \"\"\"\n if detail not in (\"default\", \"brief\", \"verbose\", \"verbose-vars-not-captured\"):\n raise ValueError(\n \"Detail must be default, brief, verbose, or \"\n \"verbose-vars-not-captured. (not %r)\" % (detail,)\n )\n w = write\n if detail == \"brief\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(f\"{filename}:{lineno}:{method}\\n\")\n elif detail == \"default\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(f' File \"{filename}\", line {lineno}, in {method}\\n')\n w(\" %s\\n\" % linecache.getline(filename, lineno).strip())\n elif detail == \"verbose-vars-not-captured\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(\"%s:%d: %s(...)\\n\" % (filename, lineno, method))\n w(\" [Capture of Locals and Globals disabled (use captureVars=True)]\\n\")\n elif detail == \"verbose\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(\"%s:%d: %s(...)\\n\" % (filename, lineno, method))\n w(\" [ Locals ]\\n\")\n # Note: the repr(val) was (self.pickled and val) or repr(val)))\n for name, val in localVars:\n w(f\" {name} : {repr(val)}\\n\")\n w(\" ( Globals )\\n\")\n for name, val in globalVars:\n w(f\" {name} : {repr(val)}\\n\")\n\n\n# slyphon: i have a need to check for this value in trial\n# so I made it a module-level constant\nEXCEPTION_CAUGHT_HERE = \"--- <exception caught here> ---\"\n\n\nclass NoCurrentExceptionError(Exception):\n \"\"\"\n Raised when trying to create a Failure from the current interpreter\n exception state and there is no current exception state.\n \"\"\"\n\n\ndef _Traceback(stackFrames, tbFrames):\n \"\"\"\n Construct a fake traceback object using a list of frames.\n\n It should have the same API as stdlib to allow interaction with\n other tools.\n\n @param stackFrames: [(methodname, filename, lineno, locals, globals), ...]\n @param tbFrames: [(methodname, filename, lineno, locals, globals), ...]\n \"\"\"\n assert len(tbFrames) > 0, \"Must pass some frames\"\n # We deliberately avoid using recursion here, as the frames list may be\n # long.\n\n # 'stackFrames' is a list of frames above (ie, older than) the point the\n # exception was caught, with oldest at the start. Start by building these\n # into a linked list of _Frame objects (with the f_back links pointing back\n # towards the oldest frame).\n stack = None\n for sf in stackFrames:\n stack = _Frame(sf, stack)\n\n # 'tbFrames' is a list of frames from the point the exception was caught,\n # down to where it was thrown, with the oldest at the start. Add these to\n # the linked list of _Frames, but also wrap each one with a _Traceback\n # frame which is linked in the opposite direction (towards the newest\n # frame).\n stack = _Frame(tbFrames[0], stack)\n firstTb = tb = _TracebackFrame(stack)\n for sf in tbFrames[1:]:\n stack = _Frame(sf, stack)\n tb.tb_next = _TracebackFrame(stack)\n tb = tb.tb_next\n\n # Return the first _TracebackFrame.\n return firstTb\n\n\n# The set of attributes for _TracebackFrame, _Frame and _Code were taken from\n# https://docs.python.org/3.11/library/inspect.html Other Pythons may have a\n# few more attributes that should be added if needed.\nclass _TracebackFrame:\n \"\"\"\n Fake traceback object which can be passed to functions in the standard\n library L{traceback} module.\n \"\"\"\n\n def __init__(self, frame):\n \"\"\"\n @param frame: _Frame object\n \"\"\"\n self.tb_frame = frame\n self.tb_lineno = frame.f_lineno\n self.tb_lasti = frame.f_lasti\n self.tb_next = None\n\n\nclass _Frame:\n \"\"\"\n A fake frame object, used by L{_Traceback}.\n\n @ivar f_code: fake L{code<types.CodeType>} object\n @ivar f_lineno: line number\n @ivar f_globals: fake f_globals dictionary (usually empty)\n @ivar f_locals: fake f_locals dictionary (usually empty)\n @ivar f_back: previous stack frame (towards the caller)\n \"\"\"\n\n def __init__(self, frameinfo, back):\n \"\"\"\n @param frameinfo: (methodname, filename, lineno, locals, globals)\n @param back: previous (older) stack frame\n @type back: C{frame}\n \"\"\"\n name, filename, lineno, localz, globalz = frameinfo\n self.f_code = _Code(name, filename)\n self.f_lineno = lineno\n self.f_globals = dict(globalz or {})\n self.f_locals = dict(localz or {})\n self.f_back = back\n self.f_lasti = 0\n self.f_builtins = vars(builtins).copy()\n self.f_trace = None\n\n\nclass _Code:\n \"\"\"\n A fake code object, used by L{_Traceback} via L{_Frame}.\n\n It is intended to have the same API as the stdlib code type to allow\n interoperation with other tools based on that interface.\n \"\"\"\n\n def __init__(self, name, filename):\n self.co_name = name\n self.co_filename = filename\n self.co_lnotab = b\"\"\n self.co_firstlineno = 0\n self.co_argcount = 0\n self.co_varnames = []\n self.co_code = b\"\"\n self.co_cellvars = ()\n self.co_consts = ()\n self.co_flags = 0\n self.co_freevars = ()\n self.co_posonlyargcount = 0\n self.co_kwonlyargcount = 0\n self.co_names = ()\n self.co_nlocals = 0\n self.co_stacksize = 0\n\n def co_positions(self):\n return ((None, None, None, None),)\n\n\n_inlineCallbacksExtraneous = []\n\n\ndef _extraneous(f: _T_Callable) -> _T_Callable:\n \"\"\"\n Mark the given callable as extraneous to inlineCallbacks exception\n reporting; don't show these functions.\n\n @param f: a function that you NEVER WANT TO SEE AGAIN in ANY TRACEBACK\n reported by Failure.\n\n @type f: function\n\n @return: f\n \"\"\"\n _inlineCallbacksExtraneous.append(f.__code__)\n return f\n\n\nclass Failure(BaseException):\n \"\"\"\n A basic abstraction for an error that has occurred.\n\n This is necessary because Python's built-in error mechanisms are\n inconvenient for asynchronous communication.\n\n The C{stack} and C{frame} attributes contain frames. Each frame is a tuple\n of (funcName, fileName, lineNumber, localsItems, globalsItems), where\n localsItems and globalsItems are the contents of\n C{locals().items()}/C{globals().items()} for that frame, or an empty tuple\n if those details were not captured.\n\n @ivar value: The exception instance responsible for this failure.\n @ivar type: The exception's class.\n @ivar stack: list of frames, innermost last, excluding C{Failure.__init__}.\n @ivar frames: list of frames, innermost first.\n \"\"\"\n\n pickled = 0\n stack = None\n\n # The opcode of \"yield\" in Python bytecode. We need this in\n # _findFailure in order to identify whether an exception was\n # thrown by a throwExceptionIntoGenerator.\n # on PY3, b'a'[0] == 97 while in py2 b'a'[0] == b'a' opcodes\n # are stored in bytes so we need to properly account for this\n # difference.\n _yieldOpcode = opcode.opmap[\"YIELD_VALUE\"]\n\n def __init__(self, exc_value=None, exc_type=None, exc_tb=None, captureVars=False):\n \"\"\"\n Initialize me with an explanation of the error.\n\n By default, this will use the current C{exception}\n (L{sys.exc_info}()). However, if you want to specify a\n particular kind of failure, you can pass an exception as an\n argument.\n\n If no C{exc_value} is passed, then an \"original\" C{Failure} will\n be searched for. If the current exception handler that this\n C{Failure} is being constructed in is handling an exception\n raised by L{raiseException}, then this C{Failure} will act like\n the original C{Failure}.\n\n For C{exc_tb} only L{traceback} instances or L{None} are allowed.\n If L{None} is supplied for C{exc_value}, the value of C{exc_tb} is\n ignored, otherwise if C{exc_tb} is L{None}, it will be found from\n execution context (ie, L{sys.exc_info}).\n\n @param captureVars: if set, capture locals and globals of stack\n frames. This is pretty slow, and makes no difference unless you\n are going to use L{printDetailedTraceback}.\n \"\"\"\n global count\n count = count + 1\n self.count = count\n self.type = self.value = tb = None\n self.captureVars = captureVars\n\n if isinstance(exc_value, str) and exc_type is None:\n raise TypeError(\"Strings are not supported by Failure\")\n\n stackOffset = 0\n\n if exc_value is None:\n exc_value = self._findFailure()\n\n if exc_value is None:\n self.type, self.value, tb = sys.exc_info()\n if self.type is None:\n raise NoCurrentExceptionError()\n stackOffset = 1\n elif exc_type is None:\n if isinstance(exc_value, Exception):\n self.type = exc_value.__class__\n else:\n # Allow arbitrary objects.\n self.type = type(exc_value)\n self.value = exc_value\n else:\n self.type = exc_type\n self.value = exc_value\n\n if isinstance(self.value, Failure):\n self._extrapolate(self.value)\n return\n\n if hasattr(self.value, \"__failure__\"):\n # For exceptions propagated through coroutine-awaiting (see\n # Deferred.send, AKA Deferred.__next__), which can't be raised as\n # Failure because that would mess up the ability to except: them:\n self._extrapolate(self.value.__failure__)\n\n # Clean up the inherently circular reference established by storing\n # the failure there. This should make the common case of a Twisted\n # / Deferred-returning coroutine somewhat less hard on the garbage\n # collector.\n del self.value.__failure__\n return\n\n if tb is None:\n if exc_tb:\n tb = exc_tb\n elif getattr(self.value, \"__traceback__\", None):\n # Python 3\n tb = self.value.__traceback__\n\n frames = self.frames = []\n stack = self.stack = []\n\n # Added 2003-06-23 by Chris Armstrong. Yes, I actually have a\n # use case where I need this traceback object, and I've made\n # sure that it'll be cleaned up.\n self.tb = tb\n\n if tb:\n f = tb.tb_frame\n elif not isinstance(self.value, Failure):\n # We don't do frame introspection since it's expensive,\n # and if we were passed a plain exception with no\n # traceback, it's not useful anyway\n f = stackOffset = None\n\n while stackOffset and f:\n # This excludes this Failure.__init__ frame from the\n # stack, leaving it to start with our caller instead.\n f = f.f_back\n stackOffset -= 1\n\n # Keeps the *full* stack. Formerly in spread.pb.print_excFullStack:\n #\n # The need for this function arises from the fact that several\n # PB classes have the peculiar habit of discarding exceptions\n # with bareword \"except:\"s. This premature exception\n # catching means tracebacks generated here don't tend to show\n # what called upon the PB object.\n\n while f:\n if captureVars:\n localz = f.f_locals.copy()\n if f.f_locals is f.f_globals:\n globalz = {}\n else:\n globalz = f.f_globals.copy()\n for d in globalz, localz:\n if \"__builtins__\" in d:\n del d[\"__builtins__\"]\n localz = localz.items()\n globalz = globalz.items()\n else:\n localz = globalz = ()\n stack.insert(\n 0,\n (\n f.f_code.co_name,\n f.f_code.co_filename,\n f.f_lineno,\n localz,\n globalz,\n ),\n )\n f = f.f_back\n\n while tb is not None:\n f = tb.tb_frame\n if captureVars:\n localz = f.f_locals.copy()\n if f.f_locals is f.f_globals:\n globalz = {}\n else:\n globalz = f.f_globals.copy()\n for d in globalz, localz:\n if \"__builtins__\" in d:\n del d[\"__builtins__\"]\n localz = list(localz.items())\n globalz = list(globalz.items())\n else:\n localz = globalz = ()\n frames.append(\n (\n f.f_code.co_name,\n f.f_code.co_filename,\n tb.tb_lineno,\n localz,\n globalz,\n )\n )\n tb = tb.tb_next\n if inspect.isclass(self.type) and issubclass(self.type, Exception):\n parentCs = getmro(self.type)\n self.parents = list(map(reflect.qual, parentCs))\n else:\n self.parents = [self.type]\n\n def _extrapolate(self, otherFailure):\n \"\"\"\n Extrapolate from one failure into another, copying its stack frames.\n\n @param otherFailure: Another L{Failure}, whose traceback information,\n if any, should be preserved as part of the stack presented by this\n one.\n @type otherFailure: L{Failure}\n \"\"\"\n # Copy all infos from that failure (including self.frames).\n self.__dict__ = copy.copy(otherFailure.__dict__)\n\n # If we are re-throwing a Failure, we merge the stack-trace stored in\n # the failure with the current exception's stack. This integrated with\n # throwExceptionIntoGenerator and allows to provide full stack trace,\n # even if we go through several layers of inlineCallbacks.\n _, _, tb = sys.exc_info()\n frames = []\n while tb is not None:\n f = tb.tb_frame\n if f.f_code not in _inlineCallbacksExtraneous:\n frames.append(\n (f.f_code.co_name, f.f_code.co_filename, tb.tb_lineno, (), ())\n )\n tb = tb.tb_next\n # Merging current stack with stack stored in the Failure.\n frames.extend(self.frames)\n self.frames = frames\n\n def trap(self, *errorTypes):\n \"\"\"\n Trap this failure if its type is in a predetermined list.\n\n This allows you to trap a Failure in an error callback. It will be\n automatically re-raised if it is not a type that you expect.\n\n The reason for having this particular API is because it's very useful\n in Deferred errback chains::\n\n def _ebFoo(self, failure):\n r = failure.trap(Spam, Eggs)\n print('The Failure is due to either Spam or Eggs!')\n if r == Spam:\n print('Spam did it!')\n elif r == Eggs:\n print('Eggs did it!')\n\n If the failure is not a Spam or an Eggs, then the Failure will be\n 'passed on' to the next errback. In Python 2 the Failure will be\n raised; in Python 3 the underlying exception will be re-raised.\n\n @type errorTypes: L{Exception}\n \"\"\"\n error = self.check(*errorTypes)\n if not error:\n self.raiseException()\n return error\n\n def check(self, *errorTypes):\n \"\"\"\n Check if this failure's type is in a predetermined list.\n\n @type errorTypes: list of L{Exception} classes or\n fully-qualified class names.\n @returns: the matching L{Exception} type, or None if no match.\n \"\"\"\n for error in errorTypes:\n err = error\n if inspect.isclass(error) and issubclass(error, Exception):\n err = reflect.qual(error)\n if err in self.parents:\n return error\n return None\n\n def raiseException(self) -> NoReturn:\n \"\"\"\n raise the original exception, preserving traceback\n information if available.\n \"\"\"\n raise self.value.with_traceback(self.tb)\n\n @_extraneous\n def throwExceptionIntoGenerator(self, g):\n \"\"\"\n Throw the original exception into the given generator,\n preserving traceback information if available.\n\n @return: The next value yielded from the generator.\n @raise StopIteration: If there are no more values in the generator.\n @raise anything else: Anything that the generator raises.\n \"\"\"\n # Note that the actual magic to find the traceback information\n # is done in _findFailure.\n return g.throw(self.type, self.value, self.tb)\n\n @classmethod\n def _findFailure(cls):\n \"\"\"\n Find the failure that represents the exception currently in context.\n \"\"\"\n tb = sys.exc_info()[-1]\n if not tb:\n return\n\n secondLastTb = None\n lastTb = tb\n while lastTb.tb_next:\n secondLastTb = lastTb\n lastTb = lastTb.tb_next\n\n lastFrame = lastTb.tb_frame\n\n # NOTE: f_locals.get('self') is used rather than\n # f_locals['self'] because psyco frames do not contain\n # anything in their locals() dicts. psyco makes debugging\n # difficult anyhow, so losing the Failure objects (and thus\n # the tracebacks) here when it is used is not that big a deal.\n\n # Handle raiseException-originated exceptions\n if lastFrame.f_code is cls.raiseException.__code__:\n return lastFrame.f_locals.get(\"self\")\n\n # Handle throwExceptionIntoGenerator-originated exceptions\n # this is tricky, and differs if the exception was caught\n # inside the generator, or above it:\n\n # It is only really originating from\n # throwExceptionIntoGenerator if the bottom of the traceback\n # is a yield.\n # Pyrex and Cython extensions create traceback frames\n # with no co_code, but they can't yield so we know it's okay to\n # just return here.\n if (not lastFrame.f_code.co_code) or lastFrame.f_code.co_code[\n lastTb.tb_lasti\n ] != cls._yieldOpcode:\n return\n\n # If the exception was caught above the generator.throw\n # (outside the generator), it will appear in the tb (as the\n # second last item):\n if secondLastTb:\n frame = secondLastTb.tb_frame\n if frame.f_code is cls.throwExceptionIntoGenerator.__code__:\n return frame.f_locals.get(\"self\")\n\n # If the exception was caught below the generator.throw\n # (inside the generator), it will appear in the frames' linked\n # list, above the top-level traceback item (which must be the\n # generator frame itself, thus its caller is\n # throwExceptionIntoGenerator).\n frame = tb.tb_frame.f_back\n if frame and frame.f_code is cls.throwExceptionIntoGenerator.__code__:\n return frame.f_locals.get(\"self\")\n\n def __repr__(self) -> str:\n return \"<{} {}: {}>\".format(\n reflect.qual(self.__class__),\n reflect.qual(self.type),\n self.getErrorMessage(),\n )\n\n def __str__(self) -> str:\n return \"[Failure instance: %s]\" % self.getBriefTraceback()\n\n def __getstate__(self):\n \"\"\"Avoid pickling objects in the traceback.\"\"\"\n if self.pickled:\n return self.__dict__\n c = self.__dict__.copy()\n\n c[\"frames\"] = [\n [\n v[0],\n v[1],\n v[2],\n _safeReprVars(v[3]),\n _safeReprVars(v[4]),\n ]\n for v in self.frames\n ]\n\n # Added 2003-06-23. See comment above in __init__\n c[\"tb\"] = None\n\n if self.stack is not None:\n # XXX: This is a band-aid. I can't figure out where these\n # (failure.stack is None) instances are coming from.\n c[\"stack\"] = [\n [\n v[0],\n v[1],\n v[2],\n _safeReprVars(v[3]),\n _safeReprVars(v[4]),\n ]\n for v in self.stack\n ]\n\n c[\"pickled\"] = 1\n return c\n\n def cleanFailure(self):\n \"\"\"\n Remove references to other objects, replacing them with strings.\n\n On Python 3, this will also set the C{__traceback__} attribute of the\n exception instance to L{None}.\n \"\"\"\n self.__dict__ = self.__getstate__()\n if getattr(self.value, \"__traceback__\", None):\n # Python 3\n self.value.__traceback__ = None\n\n def getTracebackObject(self):\n \"\"\"\n Get an object that represents this Failure's stack that can be passed\n to traceback.extract_tb.\n\n If the original traceback object is still present, return that. If this\n traceback object has been lost but we still have the information,\n return a fake traceback object (see L{_Traceback}). If there is no\n traceback information at all, return None.\n \"\"\"\n if self.tb is not None:\n return self.tb\n elif len(self.frames) > 0:\n return _Traceback(self.stack, self.frames)\n else:\n return None\n\n def getErrorMessage(self) -> str:\n \"\"\"\n Get a string of the exception which caused this Failure.\n \"\"\"\n if isinstance(self.value, Failure):\n return self.value.getErrorMessage()\n return reflect.safe_str(self.value)\n\n def getBriefTraceback(self) -> str:\n io = StringIO()\n self.printBriefTraceback(file=io)\n return io.getvalue()\n\n def getTraceback(self, elideFrameworkCode: int = 0, detail: str = \"default\") -> str:\n io = StringIO()\n self.printTraceback(\n file=io, elideFrameworkCode=elideFrameworkCode, detail=detail\n )\n return io.getvalue()\n\n def printTraceback(self, file=None, elideFrameworkCode=False, detail=\"default\"):\n \"\"\"\n Emulate Python's standard error reporting mechanism.\n\n @param file: If specified, a file-like object to which to write the\n traceback.\n\n @param elideFrameworkCode: A flag indicating whether to attempt to\n remove uninteresting frames from within Twisted itself from the\n output.\n\n @param detail: A string indicating how much information to include\n in the traceback. Must be one of C{'brief'}, C{'default'}, or\n C{'verbose'}.\n \"\"\"\n if file is None:\n from twisted.python import log\n\n file = log.logerr\n w = file.write\n\n if detail == \"verbose\" and not self.captureVars:\n # We don't have any locals or globals, so rather than show them as\n # empty make the output explicitly say that we don't have them at\n # all.\n formatDetail = \"verbose-vars-not-captured\"\n else:\n formatDetail = detail\n\n # Preamble\n if detail == \"verbose\":\n w(\n \"*--- Failure #%d%s---\\n\"\n % (self.count, (self.pickled and \" (pickled) \") or \" \")\n )\n elif detail == \"brief\":\n if self.frames:\n hasFrames = \"Traceback\"\n else:\n hasFrames = \"Traceback (failure with no frames)\"\n w(\n \"%s: %s: %s\\n\"\n % (hasFrames, reflect.safe_str(self.type), reflect.safe_str(self.value))\n )\n else:\n w(\"Traceback (most recent call last):\\n\")\n\n # Frames, formatted in appropriate style\n if self.frames:\n if not elideFrameworkCode:\n format_frames(self.stack[-traceupLength:], w, formatDetail)\n w(f\"{EXCEPTION_CAUGHT_HERE}\\n\")\n format_frames(self.frames, w, formatDetail)\n elif not detail == \"brief\":\n # Yeah, it's not really a traceback, despite looking like one...\n w(\"Failure: \")\n\n # Postamble, if any\n if not detail == \"brief\":\n w(f\"{reflect.qual(self.type)}: {reflect.safe_str(self.value)}\\n\")\n\n # Chaining\n if isinstance(self.value, Failure):\n # TODO: indentation for chained failures?\n file.write(\" (chained Failure)\\n\")\n self.value.printTraceback(file, elideFrameworkCode, detail)\n if detail == \"verbose\":\n w(\"*--- End of Failure #%d ---\\n\" % self.count)\n\n def printBriefTraceback(self, file=None, elideFrameworkCode=0):\n \"\"\"\n Print a traceback as densely as possible.\n \"\"\"\n self.printTraceback(file, elideFrameworkCode, detail=\"brief\")\n\n def printDetailedTraceback(self, file=None, elideFrameworkCode=0):\n \"\"\"\n Print a traceback with detailed locals and globals information.\n \"\"\"\n self.printTraceback(file, elideFrameworkCode, detail=\"verbose\")\n\n\ndef _safeReprVars(varsDictItems):\n \"\"\"\n Convert a list of (name, object) pairs into (name, repr) pairs.\n\n L{twisted.python.reflect.safe_repr} is used to generate the repr, so no\n exceptions will be raised by faulty C{__repr__} methods.\n\n @param varsDictItems: a sequence of (name, value) pairs as returned by e.g.\n C{locals().items()}.\n @returns: a sequence of (name, repr) pairs.\n \"\"\"\n return [(name, reflect.safe_repr(obj)) for (name, obj) in varsDictItems]\n\n\n# slyphon: make post-morteming exceptions tweakable\n\nDO_POST_MORTEM = True\n\n\ndef _debuginit(\n self,\n exc_value=None,\n exc_type=None,\n exc_tb=None,\n captureVars=False,\n Failure__init__=Failure.__init__,\n):\n \"\"\"\n Initialize failure object, possibly spawning pdb.\n \"\"\"\n if (exc_value, exc_type, exc_tb) == (None, None, None):\n exc = sys.exc_info()\n if not exc[0] == self.__class__ and DO_POST_MORTEM:\n try:\n strrepr = str(exc[1])\n except BaseException:\n strrepr = \"broken str\"\n print(\n \"Jumping into debugger for post-mortem of exception '{}':\".format(\n strrepr\n )\n )\n import pdb\n\n pdb.post_mortem(exc[2])\n Failure__init__(self, exc_value, exc_type, exc_tb, captureVars)\n\n\ndef startDebugMode():\n \"\"\"\n Enable debug hooks for Failures.\n \"\"\"\n Failure.__init__ = _debuginit\n",
"path": "src/twisted/python/failure.py"
}
] | [
{
"content": "# -*- test-case-name: twisted.test.test_failure -*-\n# See also test suite twisted.test.test_pbfailure\n\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\n\"\"\"\nAsynchronous-friendly error mechanism.\n\nSee L{Failure}.\n\"\"\"\n\n\n# System Imports\nimport builtins\nimport copy\nimport inspect\nimport linecache\nimport sys\nfrom inspect import getmro\nfrom io import StringIO\nfrom typing import Callable, NoReturn, TypeVar\n\nimport opcode\n\nfrom twisted.python import reflect\n\n_T_Callable = TypeVar(\"_T_Callable\", bound=Callable[..., object])\n\ncount = 0\ntraceupLength = 4\n\n\nclass DefaultException(Exception):\n pass\n\n\ndef format_frames(frames, write, detail=\"default\"):\n \"\"\"\n Format and write frames.\n\n @param frames: is a list of frames as used by Failure.frames, with\n each frame being a list of\n (funcName, fileName, lineNumber, locals.items(), globals.items())\n @type frames: list\n @param write: this will be called with formatted strings.\n @type write: callable\n @param detail: Four detail levels are available:\n default, brief, verbose, and verbose-vars-not-captured.\n C{Failure.printDetailedTraceback} uses the latter when the caller asks\n for verbose, but no vars were captured, so that an explicit warning\n about the missing data is shown.\n @type detail: string\n \"\"\"\n if detail not in (\"default\", \"brief\", \"verbose\", \"verbose-vars-not-captured\"):\n raise ValueError(\n \"Detail must be default, brief, verbose, or \"\n \"verbose-vars-not-captured. (not %r)\" % (detail,)\n )\n w = write\n if detail == \"brief\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(f\"{filename}:{lineno}:{method}\\n\")\n elif detail == \"default\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(f' File \"{filename}\", line {lineno}, in {method}\\n')\n w(\" %s\\n\" % linecache.getline(filename, lineno).strip())\n elif detail == \"verbose-vars-not-captured\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(\"%s:%d: %s(...)\\n\" % (filename, lineno, method))\n w(\" [Capture of Locals and Globals disabled (use captureVars=True)]\\n\")\n elif detail == \"verbose\":\n for method, filename, lineno, localVars, globalVars in frames:\n w(\"%s:%d: %s(...)\\n\" % (filename, lineno, method))\n w(\" [ Locals ]\\n\")\n # Note: the repr(val) was (self.pickled and val) or repr(val)))\n for name, val in localVars:\n w(f\" {name} : {repr(val)}\\n\")\n w(\" ( Globals )\\n\")\n for name, val in globalVars:\n w(f\" {name} : {repr(val)}\\n\")\n\n\n# slyphon: i have a need to check for this value in trial\n# so I made it a module-level constant\nEXCEPTION_CAUGHT_HERE = \"--- <exception caught here> ---\"\n\n\nclass NoCurrentExceptionError(Exception):\n \"\"\"\n Raised when trying to create a Failure from the current interpreter\n exception state and there is no current exception state.\n \"\"\"\n\n\ndef _Traceback(stackFrames, tbFrames):\n \"\"\"\n Construct a fake traceback object using a list of frames.\n\n It should have the same API as stdlib to allow interaction with\n other tools.\n\n @param stackFrames: [(methodname, filename, lineno, locals, globals), ...]\n @param tbFrames: [(methodname, filename, lineno, locals, globals), ...]\n \"\"\"\n assert len(tbFrames) > 0, \"Must pass some frames\"\n # We deliberately avoid using recursion here, as the frames list may be\n # long.\n\n # 'stackFrames' is a list of frames above (ie, older than) the point the\n # exception was caught, with oldest at the start. Start by building these\n # into a linked list of _Frame objects (with the f_back links pointing back\n # towards the oldest frame).\n stack = None\n for sf in stackFrames:\n stack = _Frame(sf, stack)\n\n # 'tbFrames' is a list of frames from the point the exception was caught,\n # down to where it was thrown, with the oldest at the start. Add these to\n # the linked list of _Frames, but also wrap each one with a _Traceback\n # frame which is linked in the opposite direction (towards the newest\n # frame).\n stack = _Frame(tbFrames[0], stack)\n firstTb = tb = _TracebackFrame(stack)\n for sf in tbFrames[1:]:\n stack = _Frame(sf, stack)\n tb.tb_next = _TracebackFrame(stack)\n tb = tb.tb_next\n\n # Return the first _TracebackFrame.\n return firstTb\n\n\n# The set of attributes for _TracebackFrame, _Frame and _Code were taken from\n# https://docs.python.org/3.11/library/inspect.html Other Pythons may have a\n# few more attributes that should be added if needed.\nclass _TracebackFrame:\n \"\"\"\n Fake traceback object which can be passed to functions in the standard\n library L{traceback} module.\n \"\"\"\n\n def __init__(self, frame):\n \"\"\"\n @param frame: _Frame object\n \"\"\"\n self.tb_frame = frame\n self.tb_lineno = frame.f_lineno\n self.tb_lasti = frame.f_lasti\n self.tb_next = None\n\n\nclass _Frame:\n \"\"\"\n A fake frame object, used by L{_Traceback}.\n\n @ivar f_code: fake L{code<types.CodeType>} object\n @ivar f_lineno: line number\n @ivar f_globals: fake f_globals dictionary (usually empty)\n @ivar f_locals: fake f_locals dictionary (usually empty)\n @ivar f_back: previous stack frame (towards the caller)\n \"\"\"\n\n def __init__(self, frameinfo, back):\n \"\"\"\n @param frameinfo: (methodname, filename, lineno, locals, globals)\n @param back: previous (older) stack frame\n @type back: C{frame}\n \"\"\"\n name, filename, lineno, localz, globalz = frameinfo\n self.f_code = _Code(name, filename)\n self.f_lineno = lineno\n self.f_globals = dict(globalz or {})\n self.f_locals = dict(localz or {})\n self.f_back = back\n self.f_lasti = 0\n self.f_builtins = vars(builtins).copy()\n self.f_trace = None\n\n\nclass _Code:\n \"\"\"\n A fake code object, used by L{_Traceback} via L{_Frame}.\n\n It is intended to have the same API as the stdlib code type to allow\n interoperation with other tools based on that interface.\n \"\"\"\n\n def __init__(self, name, filename):\n self.co_name = name\n self.co_filename = filename\n self.co_lnotab = b\"\"\n self.co_firstlineno = 0\n self.co_argcount = 0\n self.co_varnames = []\n self.co_code = b\"\"\n self.co_cellvars = ()\n self.co_consts = ()\n self.co_flags = 0\n self.co_freevars = ()\n self.co_posonlyargcount = 0\n self.co_kwonlyargcount = 0\n self.co_names = ()\n self.co_nlocals = 0\n self.co_stacksize = 0\n\n def co_positions(self):\n return ((None, None, None, None),)\n\n\n_inlineCallbacksExtraneous = []\n\n\ndef _extraneous(f: _T_Callable) -> _T_Callable:\n \"\"\"\n Mark the given callable as extraneous to inlineCallbacks exception\n reporting; don't show these functions.\n\n @param f: a function that you NEVER WANT TO SEE AGAIN in ANY TRACEBACK\n reported by Failure.\n\n @type f: function\n\n @return: f\n \"\"\"\n _inlineCallbacksExtraneous.append(f.__code__)\n return f\n\n\nclass Failure(BaseException):\n \"\"\"\n A basic abstraction for an error that has occurred.\n\n This is necessary because Python's built-in error mechanisms are\n inconvenient for asynchronous communication.\n\n The C{stack} and C{frame} attributes contain frames. Each frame is a tuple\n of (funcName, fileName, lineNumber, localsItems, globalsItems), where\n localsItems and globalsItems are the contents of\n C{locals().items()}/C{globals().items()} for that frame, or an empty tuple\n if those details were not captured.\n\n @ivar value: The exception instance responsible for this failure.\n @ivar type: The exception's class.\n @ivar stack: list of frames, innermost last, excluding C{Failure.__init__}.\n @ivar frames: list of frames, innermost first.\n \"\"\"\n\n pickled = 0\n stack = None\n\n # The opcode of \"yield\" in Python bytecode. We need this in\n # _findFailure in order to identify whether an exception was\n # thrown by a throwExceptionIntoGenerator.\n # on PY3, b'a'[0] == 97 while in py2 b'a'[0] == b'a' opcodes\n # are stored in bytes so we need to properly account for this\n # difference.\n _yieldOpcode = opcode.opmap[\"YIELD_VALUE\"]\n\n def __init__(self, exc_value=None, exc_type=None, exc_tb=None, captureVars=False):\n \"\"\"\n Initialize me with an explanation of the error.\n\n By default, this will use the current C{exception}\n (L{sys.exc_info}()). However, if you want to specify a\n particular kind of failure, you can pass an exception as an\n argument.\n\n If no C{exc_value} is passed, then an \"original\" C{Failure} will\n be searched for. If the current exception handler that this\n C{Failure} is being constructed in is handling an exception\n raised by L{raiseException}, then this C{Failure} will act like\n the original C{Failure}.\n\n For C{exc_tb} only L{traceback} instances or L{None} are allowed.\n If L{None} is supplied for C{exc_value}, the value of C{exc_tb} is\n ignored, otherwise if C{exc_tb} is L{None}, it will be found from\n execution context (ie, L{sys.exc_info}).\n\n @param captureVars: if set, capture locals and globals of stack\n frames. This is pretty slow, and makes no difference unless you\n are going to use L{printDetailedTraceback}.\n \"\"\"\n global count\n count = count + 1\n self.count = count\n self.type = self.value = tb = None\n self.captureVars = captureVars\n\n if isinstance(exc_value, str) and exc_type is None:\n raise TypeError(\"Strings are not supported by Failure\")\n\n stackOffset = 0\n\n if exc_value is None:\n exc_value = self._findFailure()\n\n if exc_value is None:\n self.type, self.value, tb = sys.exc_info()\n if self.type is None:\n raise NoCurrentExceptionError()\n stackOffset = 1\n elif exc_type is None:\n if isinstance(exc_value, Exception):\n self.type = exc_value.__class__\n else:\n # Allow arbitrary objects.\n self.type = type(exc_value)\n self.value = exc_value\n else:\n self.type = exc_type\n self.value = exc_value\n\n if isinstance(self.value, Failure):\n self._extrapolate(self.value)\n return\n\n if hasattr(self.value, \"__failure__\"):\n # For exceptions propagated through coroutine-awaiting (see\n # Deferred.send, AKA Deferred.__next__), which can't be raised as\n # Failure because that would mess up the ability to except: them:\n self._extrapolate(self.value.__failure__)\n\n # Clean up the inherently circular reference established by storing\n # the failure there. This should make the common case of a Twisted\n # / Deferred-returning coroutine somewhat less hard on the garbage\n # collector.\n del self.value.__failure__\n return\n\n if tb is None:\n if exc_tb:\n tb = exc_tb\n elif getattr(self.value, \"__traceback__\", None):\n # Python 3\n tb = self.value.__traceback__\n\n frames = self.frames = []\n stack = self.stack = []\n\n # Added 2003-06-23 by Chris Armstrong. Yes, I actually have a\n # use case where I need this traceback object, and I've made\n # sure that it'll be cleaned up.\n self.tb = tb\n\n if tb:\n f = tb.tb_frame\n elif not isinstance(self.value, Failure):\n # We don't do frame introspection since it's expensive,\n # and if we were passed a plain exception with no\n # traceback, it's not useful anyway\n f = stackOffset = None\n\n while stackOffset and f:\n # This excludes this Failure.__init__ frame from the\n # stack, leaving it to start with our caller instead.\n f = f.f_back\n stackOffset -= 1\n\n # Keeps the *full* stack. Formerly in spread.pb.print_excFullStack:\n #\n # The need for this function arises from the fact that several\n # PB classes have the peculiar habit of discarding exceptions\n # with bareword \"except:\"s. This premature exception\n # catching means tracebacks generated here don't tend to show\n # what called upon the PB object.\n\n while f:\n if captureVars:\n localz = f.f_locals.copy()\n if f.f_locals is f.f_globals:\n globalz = {}\n else:\n globalz = f.f_globals.copy()\n for d in globalz, localz:\n if \"__builtins__\" in d:\n del d[\"__builtins__\"]\n localz = localz.items()\n globalz = globalz.items()\n else:\n localz = globalz = ()\n stack.insert(\n 0,\n (\n f.f_code.co_name,\n f.f_code.co_filename,\n f.f_lineno,\n localz,\n globalz,\n ),\n )\n f = f.f_back\n\n while tb is not None:\n f = tb.tb_frame\n if captureVars:\n localz = f.f_locals.copy()\n if f.f_locals is f.f_globals:\n globalz = {}\n else:\n globalz = f.f_globals.copy()\n for d in globalz, localz:\n if \"__builtins__\" in d:\n del d[\"__builtins__\"]\n localz = list(localz.items())\n globalz = list(globalz.items())\n else:\n localz = globalz = ()\n frames.append(\n (\n f.f_code.co_name,\n f.f_code.co_filename,\n tb.tb_lineno,\n localz,\n globalz,\n )\n )\n tb = tb.tb_next\n if inspect.isclass(self.type) and issubclass(self.type, Exception):\n parentCs = getmro(self.type)\n self.parents = list(map(reflect.qual, parentCs))\n else:\n self.parents = [self.type]\n\n def _extrapolate(self, otherFailure):\n \"\"\"\n Extrapolate from one failure into another, copying its stack frames.\n\n @param otherFailure: Another L{Failure}, whose traceback information,\n if any, should be preserved as part of the stack presented by this\n one.\n @type otherFailure: L{Failure}\n \"\"\"\n # Copy all infos from that failure (including self.frames).\n self.__dict__ = copy.copy(otherFailure.__dict__)\n\n # If we are re-throwing a Failure, we merge the stack-trace stored in\n # the failure with the current exception's stack. This integrated with\n # throwExceptionIntoGenerator and allows to provide full stack trace,\n # even if we go through several layers of inlineCallbacks.\n _, _, tb = sys.exc_info()\n frames = []\n while tb is not None:\n f = tb.tb_frame\n if f.f_code not in _inlineCallbacksExtraneous:\n frames.append(\n (f.f_code.co_name, f.f_code.co_filename, tb.tb_lineno, (), ())\n )\n tb = tb.tb_next\n # Merging current stack with stack stored in the Failure.\n frames.extend(self.frames)\n self.frames = frames\n\n def trap(self, *errorTypes):\n \"\"\"\n Trap this failure if its type is in a predetermined list.\n\n This allows you to trap a Failure in an error callback. It will be\n automatically re-raised if it is not a type that you expect.\n\n The reason for having this particular API is because it's very useful\n in Deferred errback chains::\n\n def _ebFoo(self, failure):\n r = failure.trap(Spam, Eggs)\n print('The Failure is due to either Spam or Eggs!')\n if r == Spam:\n print('Spam did it!')\n elif r == Eggs:\n print('Eggs did it!')\n\n If the failure is not a Spam or an Eggs, then the Failure will be\n 'passed on' to the next errback. In Python 2 the Failure will be\n raised; in Python 3 the underlying exception will be re-raised.\n\n @type errorTypes: L{Exception}\n \"\"\"\n error = self.check(*errorTypes)\n if not error:\n self.raiseException()\n return error\n\n def check(self, *errorTypes):\n \"\"\"\n Check if this failure's type is in a predetermined list.\n\n @type errorTypes: list of L{Exception} classes or\n fully-qualified class names.\n @returns: the matching L{Exception} type, or None if no match.\n \"\"\"\n for error in errorTypes:\n err = error\n if inspect.isclass(error) and issubclass(error, Exception):\n err = reflect.qual(error)\n if err in self.parents:\n return error\n return None\n\n def raiseException(self) -> NoReturn:\n \"\"\"\n raise the original exception, preserving traceback\n information if available.\n \"\"\"\n raise self.value.with_traceback(self.tb)\n\n @_extraneous\n def throwExceptionIntoGenerator(self, g):\n \"\"\"\n Throw the original exception into the given generator,\n preserving traceback information if available.\n\n @return: The next value yielded from the generator.\n @raise StopIteration: If there are no more values in the generator.\n @raise anything else: Anything that the generator raises.\n \"\"\"\n # Note that the actual magic to find the traceback information\n # is done in _findFailure.\n return g.throw(self.value.with_traceback(self.tb))\n\n @classmethod\n def _findFailure(cls):\n \"\"\"\n Find the failure that represents the exception currently in context.\n \"\"\"\n tb = sys.exc_info()[-1]\n if not tb:\n return\n\n secondLastTb = None\n lastTb = tb\n while lastTb.tb_next:\n secondLastTb = lastTb\n lastTb = lastTb.tb_next\n\n lastFrame = lastTb.tb_frame\n\n # NOTE: f_locals.get('self') is used rather than\n # f_locals['self'] because psyco frames do not contain\n # anything in their locals() dicts. psyco makes debugging\n # difficult anyhow, so losing the Failure objects (and thus\n # the tracebacks) here when it is used is not that big a deal.\n\n # Handle raiseException-originated exceptions\n if lastFrame.f_code is cls.raiseException.__code__:\n return lastFrame.f_locals.get(\"self\")\n\n # Handle throwExceptionIntoGenerator-originated exceptions\n # this is tricky, and differs if the exception was caught\n # inside the generator, or above it:\n\n # It is only really originating from\n # throwExceptionIntoGenerator if the bottom of the traceback\n # is a yield.\n # Pyrex and Cython extensions create traceback frames\n # with no co_code, but they can't yield so we know it's okay to\n # just return here.\n if (not lastFrame.f_code.co_code) or lastFrame.f_code.co_code[\n lastTb.tb_lasti\n ] != cls._yieldOpcode:\n return\n\n # If the exception was caught above the generator.throw\n # (outside the generator), it will appear in the tb (as the\n # second last item):\n if secondLastTb:\n frame = secondLastTb.tb_frame\n if frame.f_code is cls.throwExceptionIntoGenerator.__code__:\n return frame.f_locals.get(\"self\")\n\n # If the exception was caught below the generator.throw\n # (inside the generator), it will appear in the frames' linked\n # list, above the top-level traceback item (which must be the\n # generator frame itself, thus its caller is\n # throwExceptionIntoGenerator).\n frame = tb.tb_frame.f_back\n if frame and frame.f_code is cls.throwExceptionIntoGenerator.__code__:\n return frame.f_locals.get(\"self\")\n\n def __repr__(self) -> str:\n return \"<{} {}: {}>\".format(\n reflect.qual(self.__class__),\n reflect.qual(self.type),\n self.getErrorMessage(),\n )\n\n def __str__(self) -> str:\n return \"[Failure instance: %s]\" % self.getBriefTraceback()\n\n def __getstate__(self):\n \"\"\"Avoid pickling objects in the traceback.\"\"\"\n if self.pickled:\n return self.__dict__\n c = self.__dict__.copy()\n\n c[\"frames\"] = [\n [\n v[0],\n v[1],\n v[2],\n _safeReprVars(v[3]),\n _safeReprVars(v[4]),\n ]\n for v in self.frames\n ]\n\n # Added 2003-06-23. See comment above in __init__\n c[\"tb\"] = None\n\n if self.stack is not None:\n # XXX: This is a band-aid. I can't figure out where these\n # (failure.stack is None) instances are coming from.\n c[\"stack\"] = [\n [\n v[0],\n v[1],\n v[2],\n _safeReprVars(v[3]),\n _safeReprVars(v[4]),\n ]\n for v in self.stack\n ]\n\n c[\"pickled\"] = 1\n return c\n\n def cleanFailure(self):\n \"\"\"\n Remove references to other objects, replacing them with strings.\n\n On Python 3, this will also set the C{__traceback__} attribute of the\n exception instance to L{None}.\n \"\"\"\n self.__dict__ = self.__getstate__()\n if getattr(self.value, \"__traceback__\", None):\n # Python 3\n self.value.__traceback__ = None\n\n def getTracebackObject(self):\n \"\"\"\n Get an object that represents this Failure's stack that can be passed\n to traceback.extract_tb.\n\n If the original traceback object is still present, return that. If this\n traceback object has been lost but we still have the information,\n return a fake traceback object (see L{_Traceback}). If there is no\n traceback information at all, return None.\n \"\"\"\n if self.tb is not None:\n return self.tb\n elif len(self.frames) > 0:\n return _Traceback(self.stack, self.frames)\n else:\n return None\n\n def getErrorMessage(self) -> str:\n \"\"\"\n Get a string of the exception which caused this Failure.\n \"\"\"\n if isinstance(self.value, Failure):\n return self.value.getErrorMessage()\n return reflect.safe_str(self.value)\n\n def getBriefTraceback(self) -> str:\n io = StringIO()\n self.printBriefTraceback(file=io)\n return io.getvalue()\n\n def getTraceback(self, elideFrameworkCode: int = 0, detail: str = \"default\") -> str:\n io = StringIO()\n self.printTraceback(\n file=io, elideFrameworkCode=elideFrameworkCode, detail=detail\n )\n return io.getvalue()\n\n def printTraceback(self, file=None, elideFrameworkCode=False, detail=\"default\"):\n \"\"\"\n Emulate Python's standard error reporting mechanism.\n\n @param file: If specified, a file-like object to which to write the\n traceback.\n\n @param elideFrameworkCode: A flag indicating whether to attempt to\n remove uninteresting frames from within Twisted itself from the\n output.\n\n @param detail: A string indicating how much information to include\n in the traceback. Must be one of C{'brief'}, C{'default'}, or\n C{'verbose'}.\n \"\"\"\n if file is None:\n from twisted.python import log\n\n file = log.logerr\n w = file.write\n\n if detail == \"verbose\" and not self.captureVars:\n # We don't have any locals or globals, so rather than show them as\n # empty make the output explicitly say that we don't have them at\n # all.\n formatDetail = \"verbose-vars-not-captured\"\n else:\n formatDetail = detail\n\n # Preamble\n if detail == \"verbose\":\n w(\n \"*--- Failure #%d%s---\\n\"\n % (self.count, (self.pickled and \" (pickled) \") or \" \")\n )\n elif detail == \"brief\":\n if self.frames:\n hasFrames = \"Traceback\"\n else:\n hasFrames = \"Traceback (failure with no frames)\"\n w(\n \"%s: %s: %s\\n\"\n % (hasFrames, reflect.safe_str(self.type), reflect.safe_str(self.value))\n )\n else:\n w(\"Traceback (most recent call last):\\n\")\n\n # Frames, formatted in appropriate style\n if self.frames:\n if not elideFrameworkCode:\n format_frames(self.stack[-traceupLength:], w, formatDetail)\n w(f\"{EXCEPTION_CAUGHT_HERE}\\n\")\n format_frames(self.frames, w, formatDetail)\n elif not detail == \"brief\":\n # Yeah, it's not really a traceback, despite looking like one...\n w(\"Failure: \")\n\n # Postamble, if any\n if not detail == \"brief\":\n w(f\"{reflect.qual(self.type)}: {reflect.safe_str(self.value)}\\n\")\n\n # Chaining\n if isinstance(self.value, Failure):\n # TODO: indentation for chained failures?\n file.write(\" (chained Failure)\\n\")\n self.value.printTraceback(file, elideFrameworkCode, detail)\n if detail == \"verbose\":\n w(\"*--- End of Failure #%d ---\\n\" % self.count)\n\n def printBriefTraceback(self, file=None, elideFrameworkCode=0):\n \"\"\"\n Print a traceback as densely as possible.\n \"\"\"\n self.printTraceback(file, elideFrameworkCode, detail=\"brief\")\n\n def printDetailedTraceback(self, file=None, elideFrameworkCode=0):\n \"\"\"\n Print a traceback with detailed locals and globals information.\n \"\"\"\n self.printTraceback(file, elideFrameworkCode, detail=\"verbose\")\n\n\ndef _safeReprVars(varsDictItems):\n \"\"\"\n Convert a list of (name, object) pairs into (name, repr) pairs.\n\n L{twisted.python.reflect.safe_repr} is used to generate the repr, so no\n exceptions will be raised by faulty C{__repr__} methods.\n\n @param varsDictItems: a sequence of (name, value) pairs as returned by e.g.\n C{locals().items()}.\n @returns: a sequence of (name, repr) pairs.\n \"\"\"\n return [(name, reflect.safe_repr(obj)) for (name, obj) in varsDictItems]\n\n\n# slyphon: make post-morteming exceptions tweakable\n\nDO_POST_MORTEM = True\n\n\ndef _debuginit(\n self,\n exc_value=None,\n exc_type=None,\n exc_tb=None,\n captureVars=False,\n Failure__init__=Failure.__init__,\n):\n \"\"\"\n Initialize failure object, possibly spawning pdb.\n \"\"\"\n if (exc_value, exc_type, exc_tb) == (None, None, None):\n exc = sys.exc_info()\n if not exc[0] == self.__class__ and DO_POST_MORTEM:\n try:\n strrepr = str(exc[1])\n except BaseException:\n strrepr = \"broken str\"\n print(\n \"Jumping into debugger for post-mortem of exception '{}':\".format(\n strrepr\n )\n )\n import pdb\n\n pdb.post_mortem(exc[2])\n Failure__init__(self, exc_value, exc_type, exc_tb, captureVars)\n\n\ndef startDebugMode():\n \"\"\"\n Enable debug hooks for Failures.\n \"\"\"\n Failure.__init__ = _debuginit\n",
"path": "src/twisted/python/failure.py"
}
] | diff --git a/src/twisted/newsfragments/12026.bugfix b/src/twisted/newsfragments/12026.bugfix
new file mode 100644
index 00000000000..4f06cf79a4a
--- /dev/null
+++ b/src/twisted/newsfragments/12026.bugfix
@@ -0,0 +1 @@
+twisted.python.failure.Failure now throws exception for generators without triggering a deprecation warnings on Python 3.12.
diff --git a/src/twisted/python/failure.py b/src/twisted/python/failure.py
index ca893ca4c94..c006d555e55 100644
--- a/src/twisted/python/failure.py
+++ b/src/twisted/python/failure.py
@@ -516,7 +516,7 @@ def throwExceptionIntoGenerator(self, g):
"""
# Note that the actual magic to find the traceback information
# is done in _findFailure.
- return g.throw(self.type, self.value, self.tb)
+ return g.throw(self.value.with_traceback(self.tb))
@classmethod
def _findFailure(cls):
|
googleapis__google-cloud-python-2533 | Pubsub message getting wrong attribute for publishTime
According the [REST docs](https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage), a `PubsubMessage` has the field `publishTime`
In [message.py](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/pubsub/google/cloud/pubsub/message.py), `from_api_repr` is getting the field `publishTimestamp` below:
```
instance._service_timestamp = api_repr.get('publishTimestamp')
```
The current tests are self-confirming of this issue as they simply set up the api_repr with `publishTimestamp`
A quick fix seems to adjust the following:
**message.py**
``` python
@classmethod
def from_api_repr(cls, api_repr):
"""Factory: construct message from API representation.
:type api_repr: dict or None
:param api_repr: The API representation of the message
:rtype: :class:`Message`
:returns: The message created from the response.
"""
data = base64.b64decode(api_repr.get('data', b''))
instance = cls(
data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
instance._service_timestamp = api_repr.get('publishTime')
return instance
```
**test_message.py**
``` python
def test_from_api_repr_no_attributes(self):
from base64 import b64encode as b64
DATA = b'DEADBEEF'
B64_DATA = b64(DATA)
MESSAGE_ID = '12345'
TIMESTAMP = '2016-03-18-19:38:22.001393427Z'
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
'publishTime': TIMESTAMP,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
self.assertEqual(message.message_id, MESSAGE_ID)
self.assertEqual(message.attributes, {})
self.assertEqual(message.service_timestamp, TIMESTAMP)
def test_from_api_repr_w_attributes(self):
from base64 import b64encode as b64
DATA = b'DEADBEEF'
B64_DATA = b64(DATA)
MESSAGE_ID = '12345'
ATTRS = {'a': 'b'}
TIMESTAMP = '2016-03-18-19:38:22.001393427Z'
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
'publishTime': TIMESTAMP,
'attributes': ATTRS,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
self.assertEqual(message.message_id, MESSAGE_ID)
self.assertEqual(message.service_timestamp, TIMESTAMP)
self.assertEqual(message.attributes, ATTRS)
```
I don't currently have a contributor license signed, but will work on that. In the meantime, hoping that someone can pick this up.
| [
{
"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom google.cloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message.\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n _service_timestamp = None\n\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary.\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @property\n def service_timestamp(self):\n \"\"\"Return server-set timestamp.\n\n :rtype: string\n :returns: timestamp (in UTC timezone) in RFC 3339 format\n \"\"\"\n return self._service_timestamp\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTimestamp')\n return instance\n",
"path": "pubsub/google/cloud/pubsub/message.py"
}
] | [
{
"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom google.cloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message.\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n _service_timestamp = None\n\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary.\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @property\n def service_timestamp(self):\n \"\"\"Return server-set timestamp.\n\n :rtype: string\n :returns: timestamp (in UTC timezone) in RFC 3339 format\n \"\"\"\n return self._service_timestamp\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTime')\n return instance\n",
"path": "pubsub/google/cloud/pubsub/message.py"
}
] | diff --git a/pubsub/google/cloud/pubsub/message.py b/pubsub/google/cloud/pubsub/message.py
index 2f810baa5e2e..b20b901639be 100644
--- a/pubsub/google/cloud/pubsub/message.py
+++ b/pubsub/google/cloud/pubsub/message.py
@@ -89,5 +89,5 @@ def from_api_repr(cls, api_repr):
instance = cls(
data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
- instance._service_timestamp = api_repr.get('publishTimestamp')
+ instance._service_timestamp = api_repr.get('publishTime')
return instance
diff --git a/pubsub/unit_tests/test_message.py b/pubsub/unit_tests/test_message.py
index 5d08972e5430..8187eea3cf06 100644
--- a/pubsub/unit_tests/test_message.py
+++ b/pubsub/unit_tests/test_message.py
@@ -98,7 +98,7 @@ def test_from_api_repr_no_attributes(self):
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
- 'publishTimestamp': TIMESTAMP,
+ 'publishTime': TIMESTAMP,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
@@ -116,7 +116,7 @@ def test_from_api_repr_w_attributes(self):
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
- 'publishTimestamp': TIMESTAMP,
+ 'publishTime': TIMESTAMP,
'attributes': ATTRS,
}
message = self._getTargetClass().from_api_repr(api_repr)
diff --git a/system_tests/pubsub.py b/system_tests/pubsub.py
index 5e4343066de0..25f24427d0f7 100644
--- a/system_tests/pubsub.py
+++ b/system_tests/pubsub.py
@@ -198,10 +198,14 @@ def suction(self):
message1, message2 = sorted(hoover.received,
key=operator.attrgetter('timestamp'))
+
self.assertEqual(message1.data, MESSAGE_1)
self.assertEqual(message1.attributes['extra'], EXTRA_1)
+ self.assertIsNotNone(message1.service_timestamp)
+
self.assertEqual(message2.data, MESSAGE_2)
self.assertEqual(message2.attributes['extra'], EXTRA_2)
+ self.assertIsNotNone(message2.service_timestamp)
def _maybe_emulator_skip(self):
# NOTE: This method is necessary because ``Config.IN_EMULATOR``
|
Lightning-Universe__lightning-flash-1486 | The type of `n_gram` is mislabeled as bool, which should be int type.
## 🐛 Bug
In Translation Task:
The type of `n_gram` is mislabeled as bool, which should be int type.
### To Reproduce
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
flash translation from_hf_datasets --help
```
The error raised:
```
translation: error: Configuration check failed :: Parser key "model.n_gram": Expected a <class 'bool'> but got "4"
```
| [
{
"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: bool = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n",
"path": "flash/text/seq2seq/translation/model.py"
}
] | [
{
"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n",
"path": "flash/text/seq2seq/translation/model.py"
}
] | diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py
index 0c421999b4..d6365d3864 100644
--- a/flash/text/seq2seq/translation/model.py
+++ b/flash/text/seq2seq/translation/model.py
@@ -58,7 +58,7 @@ def __init__(
metrics: METRICS_TYPE = None,
learning_rate: Optional[float] = None,
num_beams: Optional[int] = 4,
- n_gram: bool = 4,
+ n_gram: int = 4,
smooth: bool = True,
enable_ort: bool = False,
):
|
Pycord-Development__pycord-795 | discord.Permissions.all() create/edit = 401 unauthorized
### Summary
discord.Permissions.all() seems to create something different or more than a manually created fully permissioned Permissions
### Reproduction Steps
My bot gets a 401 unauthorized error when trying to create or edit a role with a `discord.Permissions.all()` object, but will succeed when creating a blank `discord.Permissions()` object and using the `update()` method to set _all_ permissions to `True`
### Minimal Reproducible Code
```python
# fails 401 unauthorized
admin_permissions = discord.Permissions.all()
await self.bot.guild.create_role(
name='Admin',
permissions=admin_permissions,
)
# Succeeds
admin_permissions = discord.Permissions()
admin_permissions.update(
add_reactions=True,
administrator=True,
attach_files=True,
ban_members=True,
change_nickname=True,
connect=True,
create_instant_invite=True,
create_private_threads=True,
create_public_threads=True,
deafen_members=True,
embed_links=True,
external_emojis=True,
external_stickers=True,
kick_members=True,
manage_channels=True,
manage_emojis=True,
manage_emojis_and_stickers=True,
manage_events=True,
manage_guild=True,
manage_messages=True,
manage_nicknames=True,
manage_permissions=True,
manage_roles=True,
manage_threads=True,
manage_webhooks=True,
mention_everyone=True,
moderate_members=True,
move_members=True,
mute_members=True,
priority_speaker=True,
read_message_history=True,
read_messages=True,
request_to_speak=True,
send_messages=True,
send_messages_in_threads=True,
send_tts_messages=True,
speak=True,
start_embedded_activities=True,
stream=True,
use_application_commands=True,
use_external_emojis=True,
use_external_stickers=True,
use_slash_commands=True,
use_voice_activation=False,
view_audit_log=True,
view_channel=True,
view_guild_insights=True,
)
await self.bot.guild.create_role(
name='Admin',
permissions=admin_permissions,
)
```
### Expected Results
using discord.Permissions.all() will create a fully permissioned Permissions object that can actually be created or edited without error.
### Actual Results
401 unauthorized
### Intents
All of them
### System Information
- Python v3.9.9-final
- py-cord v2.0.0-alpha
- py-cord pkg_resources: v2.0.0a4757+g02e346db
- aiohttp v3.8.1
- system info: Linux 4.9.0-17-amd64 #1 SMP Debian 4.9.290-1 (2021-12-12)
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
_No response_
| [
{
"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Callable, Any, ClassVar, Dict, Iterator, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Optional\nfrom .flags import BaseFlags, flag_value, fill_with_flags, alias_flag_value\n\n__all__ = (\n 'Permissions',\n 'PermissionOverwrite',\n)\n\n# A permission alias works like a regular flag but is marked\n# So the PermissionOverwrite knows to work with it\nclass permission_alias(alias_flag_value):\n alias: str\n\n\ndef make_permission_alias(alias: str) -> Callable[[Callable[[Any], int]], permission_alias]:\n def decorator(func: Callable[[Any], int]) -> permission_alias:\n ret = permission_alias(func)\n ret.alias = alias\n return ret\n\n return decorator\n\nP = TypeVar('P', bound='Permissions')\n\n@fill_with_flags()\nclass Permissions(BaseFlags):\n \"\"\"Wraps up the Discord permission value.\n\n The properties provided are two way. You can set and retrieve individual\n bits using the properties as if they were regular bools. This allows\n you to edit permissions.\n\n .. versionchanged:: 1.3\n You can now use keyword arguments to initialize :class:`Permissions`\n similar to :meth:`update`.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two permissions are equal.\n .. describe:: x != y\n\n Checks if two permissions are not equal.\n .. describe:: x <= y\n\n Checks if a permission is a subset of another permission.\n .. describe:: x >= y\n\n Checks if a permission is a superset of another permission.\n .. describe:: x < y\n\n Checks if a permission is a strict subset of another permission.\n .. describe:: x > y\n\n Checks if a permission is a strict superset of another permission.\n .. describe:: hash(x)\n\n Return the permission's hash.\n .. describe:: iter(x)\n\n Returns an iterator of ``(perm, value)`` pairs. This allows it\n to be, for example, constructed as a dict or a list of pairs.\n Note that aliases are not shown.\n\n Attributes\n -----------\n value: :class:`int`\n The raw value. This value is a bit array field of a 53-bit integer\n representing the currently available permissions. You should query\n permissions via the properties rather than using this raw value.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, permissions: int = 0, **kwargs: bool):\n if not isinstance(permissions, int):\n raise TypeError(f'Expected int parameter, received {permissions.__class__.__name__} instead.')\n\n self.value = permissions\n for key, value in kwargs.items():\n if key not in self.VALID_FLAGS:\n raise TypeError(f'{key!r} is not a valid permission name.')\n setattr(self, key, value)\n\n def is_subset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if self has the same or fewer permissions as other.\"\"\"\n if isinstance(other, Permissions):\n return (self.value & other.value) == self.value\n else:\n raise TypeError(f\"cannot compare {self.__class__.__name__} with {other.__class__.__name__}\")\n\n def is_superset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if self has the same or more permissions as other.\"\"\"\n if isinstance(other, Permissions):\n return (self.value | other.value) == self.value\n else:\n raise TypeError(f\"cannot compare {self.__class__.__name__} with {other.__class__.__name__}\")\n\n def is_strict_subset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if the permissions on other are a strict subset of those on self.\"\"\"\n return self.is_subset(other) and self != other\n\n def is_strict_superset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if the permissions on other are a strict superset of those on self.\"\"\"\n return self.is_superset(other) and self != other\n\n __le__: Callable[[Permissions], bool] = is_subset\n __ge__: Callable[[Permissions], bool] = is_superset\n __lt__: Callable[[Permissions], bool] = is_strict_subset\n __gt__: Callable[[Permissions], bool] = is_strict_superset\n\n @classmethod\n def none(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n permissions set to ``False``.\"\"\"\n return cls(0)\n\n @classmethod\n def all(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n permissions set to ``True``.\n \"\"\"\n return cls(-1)\n\n @classmethod\n def all_channel(cls: Type[P]) -> P:\n \"\"\"A :class:`Permissions` with all channel-specific permissions set to\n ``True`` and the guild-specific ones set to ``False``. The guild-specific\n permissions are currently:\n\n - :attr:`manage_emojis`\n - :attr:`view_audit_log`\n - :attr:`view_guild_insights`\n - :attr:`manage_guild`\n - :attr:`change_nickname`\n - :attr:`manage_nicknames`\n - :attr:`kick_members`\n - :attr:`ban_members`\n - :attr:`administrator`\n\n .. versionchanged:: 1.7\n Added :attr:`stream`, :attr:`priority_speaker` and :attr:`use_slash_commands` permissions.\n\n .. versionchanged:: 2.0\n Added :attr:`create_public_threads`, :attr:`create_private_threads`, :attr:`manage_threads`,\n :attr:`use_external_stickers`, :attr:`send_messages_in_threads` and\n :attr:`request_to_speak` permissions.\n \"\"\"\n return cls(0b111110110110011111101111111111101010001)\n\n @classmethod\n def general(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"General\" permissions from the official Discord UI set to ``True``.\n\n .. versionchanged:: 1.7\n Permission :attr:`read_messages` is now included in the general permissions, but\n permissions :attr:`administrator`, :attr:`create_instant_invite`, :attr:`kick_members`,\n :attr:`ban_members`, :attr:`change_nickname` and :attr:`manage_nicknames` are\n no longer part of the general permissions.\n \"\"\"\n return cls(0b01110000000010000000010010110000)\n\n @classmethod\n def membership(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Membership\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(0b00001100000000000000000000000111)\n\n @classmethod\n def text(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Text\" permissions from the official Discord UI set to ``True``.\n\n .. versionchanged:: 1.7\n Permission :attr:`read_messages` is no longer part of the text permissions.\n Added :attr:`use_slash_commands` permission.\n\n .. versionchanged:: 2.0\n Added :attr:`create_public_threads`, :attr:`create_private_threads`, :attr:`manage_threads`,\n :attr:`send_messages_in_threads` and :attr:`use_external_stickers` permissions.\n \"\"\"\n return cls(0b111110010000000000001111111100001000000)\n\n @classmethod\n def voice(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Voice\" permissions from the official Discord UI set to ``True``.\"\"\"\n return cls(0b00000011111100000000001100000000)\n\n @classmethod\n def stage(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Stage Channel\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(1 << 32)\n\n @classmethod\n def stage_moderator(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Stage Moderator\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(0b100000001010000000000000000000000)\n\n @classmethod\n def advanced(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Advanced\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(1 << 3)\n\n def update(self, **kwargs: bool) -> None:\n r\"\"\"Bulk updates this permission object.\n\n Allows you to set multiple attributes by using keyword\n arguments. The names must be equivalent to the properties\n listed. Extraneous key/value pairs will be silently ignored.\n\n Parameters\n ------------\n \\*\\*kwargs\n A list of key/value pairs to bulk update permissions with.\n \"\"\"\n for key, value in kwargs.items():\n if key in self.VALID_FLAGS:\n setattr(self, key, value)\n\n def handle_overwrite(self, allow: int, deny: int) -> None:\n # Basically this is what's happening here.\n # We have an original bit array, e.g. 1010\n # Then we have another bit array that is 'denied', e.g. 1111\n # And then we have the last one which is 'allowed', e.g. 0101\n # We want original OP denied to end up resulting in\n # whatever is in denied to be set to 0.\n # So 1010 OP 1111 -> 0000\n # Then we take this value and look at the allowed values.\n # And whatever is allowed is set to 1.\n # So 0000 OP2 0101 -> 0101\n # The OP is base & ~denied.\n # The OP2 is base | allowed.\n self.value = (self.value & ~deny) | allow\n\n @flag_value\n def create_instant_invite(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if the user can create instant invites.\"\"\"\n return 1 << 0\n\n @flag_value\n def kick_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if the user can kick users from the guild.\"\"\"\n return 1 << 1\n\n @flag_value\n def ban_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can ban users from the guild.\"\"\"\n return 1 << 2\n\n @flag_value\n def administrator(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user is an administrator. This role overrides all other permissions.\n\n This also bypasses all channel-specific overrides.\n \"\"\"\n return 1 << 3\n\n @flag_value\n def manage_channels(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can edit, delete, or create channels in the guild.\n\n This also corresponds to the \"Manage Channel\" channel-specific override.\"\"\"\n return 1 << 4\n\n @flag_value\n def manage_guild(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can edit guild properties.\"\"\"\n return 1 << 5\n\n @flag_value\n def add_reactions(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can add reactions to messages.\"\"\"\n return 1 << 6\n\n @flag_value\n def view_audit_log(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can view the guild's audit log.\"\"\"\n return 1 << 7\n\n @flag_value\n def priority_speaker(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can be more easily heard while talking.\"\"\"\n return 1 << 8\n\n @flag_value\n def stream(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can stream in a voice channel.\"\"\"\n return 1 << 9\n\n @flag_value\n def read_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can read messages from all or specific text channels.\"\"\"\n return 1 << 10\n\n @make_permission_alias('read_messages')\n def view_channel(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`read_messages`.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 10\n\n @flag_value\n def send_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send messages from all or specific text channels.\"\"\"\n return 1 << 11\n\n @flag_value\n def send_tts_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send TTS messages from all or specific text channels.\"\"\"\n return 1 << 12\n\n @flag_value\n def manage_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can delete or pin messages in a text channel.\n\n .. note::\n\n Note that there are currently no ways to edit other people's messages.\n \"\"\"\n return 1 << 13\n\n @flag_value\n def embed_links(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user's messages will automatically be embedded by Discord.\"\"\"\n return 1 << 14\n\n @flag_value\n def attach_files(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send files in their messages.\"\"\"\n return 1 << 15\n\n @flag_value\n def read_message_history(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can read a text channel's previous messages.\"\"\"\n return 1 << 16\n\n @flag_value\n def mention_everyone(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user's @everyone or @here will mention everyone in the text channel.\"\"\"\n return 1 << 17\n\n @flag_value\n def external_emojis(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use emojis from other guilds.\"\"\"\n return 1 << 18\n\n @make_permission_alias('external_emojis')\n def use_external_emojis(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`external_emojis`.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 18\n\n @flag_value\n def view_guild_insights(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can view the guild's insights.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 19\n\n @flag_value\n def connect(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can connect to a voice channel.\"\"\"\n return 1 << 20\n\n @flag_value\n def speak(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can speak in a voice channel.\"\"\"\n return 1 << 21\n\n @flag_value\n def mute_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can mute other users.\"\"\"\n return 1 << 22\n\n @flag_value\n def deafen_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can deafen other users.\"\"\"\n return 1 << 23\n\n @flag_value\n def move_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can move users between other voice channels.\"\"\"\n return 1 << 24\n\n @flag_value\n def use_voice_activation(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use voice activation in voice channels.\"\"\"\n return 1 << 25\n\n @flag_value\n def change_nickname(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can change their nickname in the guild.\"\"\"\n return 1 << 26\n\n @flag_value\n def manage_nicknames(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can change other user's nickname in the guild.\"\"\"\n return 1 << 27\n\n @flag_value\n def manage_roles(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create or edit roles less than their role's position.\n\n This also corresponds to the \"Manage Permissions\" channel-specific override.\n \"\"\"\n return 1 << 28\n\n @make_permission_alias('manage_roles')\n def manage_permissions(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`manage_roles`.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 28\n\n @flag_value\n def manage_webhooks(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create, edit, or delete webhooks.\"\"\"\n return 1 << 29\n\n @flag_value\n def manage_emojis(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create, edit, or delete emojis.\"\"\"\n return 1 << 30\n\n @make_permission_alias('manage_emojis')\n def manage_emojis_and_stickers(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`manage_emojis`.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 30\n\n @flag_value\n def use_slash_commands(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use slash commands.\n\n .. versionadded:: 1.7\n \"\"\"\n return 1 << 31\n \n @make_permission_alias('use_slash_commands')\n def use_application_commands(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`use_slash_commands`.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 31\n\n @flag_value\n def request_to_speak(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can request to speak in a stage channel.\n\n .. versionadded:: 1.7\n \"\"\"\n return 1 << 32\n\n @flag_value\n def manage_events(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can manage guild events.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 33\n\n @flag_value\n def manage_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can manage threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 34\n\n @flag_value\n def create_public_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create public threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 35\n\n @flag_value\n def create_private_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create private threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 36\n\n @flag_value\n def external_stickers(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use stickers from other guilds.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 37\n\n @make_permission_alias('external_stickers')\n def use_external_stickers(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`external_stickers`.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 37\n\n @flag_value\n def send_messages_in_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send messages in threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 38\n \n @flag_value\n def start_embedded_activities(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can launch an activity flagged 'EMBEDDED' in a voice channel.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 39\n \n @flag_value\n def moderate_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can moderate members (timeout).\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 40\n\nPO = TypeVar('PO', bound='PermissionOverwrite')\n\ndef _augment_from_permissions(cls):\n cls.VALID_NAMES = set(Permissions.VALID_FLAGS)\n aliases = set()\n\n # make descriptors for all the valid names and aliases\n for name, value in Permissions.__dict__.items():\n if isinstance(value, permission_alias):\n key = value.alias\n aliases.add(name)\n elif isinstance(value, flag_value):\n key = name\n else:\n continue\n\n # god bless Python\n def getter(self, x=key):\n return self._values.get(x)\n\n def setter(self, value, x=key):\n self._set(x, value)\n\n prop = property(getter, setter)\n setattr(cls, name, prop)\n\n cls.PURE_FLAGS = cls.VALID_NAMES - aliases\n return cls\n\n\n@_augment_from_permissions\nclass PermissionOverwrite:\n r\"\"\"A type that is used to represent a channel specific permission.\n\n Unlike a regular :class:`Permissions`\\, the default value of a\n permission is equivalent to ``None`` and not ``False``. Setting\n a value to ``False`` is **explicitly** denying that permission,\n while setting a value to ``True`` is **explicitly** allowing\n that permission.\n\n The values supported by this are the same as :class:`Permissions`\n with the added possibility of it being set to ``None``.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two overwrites are equal.\n .. describe:: x != y\n\n Checks if two overwrites are not equal.\n .. describe:: iter(x)\n\n Returns an iterator of ``(perm, value)`` pairs. This allows it\n to be, for example, constructed as a dict or a list of pairs.\n Note that aliases are not shown.\n\n Parameters\n -----------\n \\*\\*kwargs\n Set the value of permissions by their name.\n \"\"\"\n\n __slots__ = ('_values',)\n\n if TYPE_CHECKING:\n VALID_NAMES: ClassVar[Set[str]]\n PURE_FLAGS: ClassVar[Set[str]]\n # I wish I didn't have to do this\n create_instant_invite: Optional[bool]\n kick_members: Optional[bool]\n ban_members: Optional[bool]\n administrator: Optional[bool]\n manage_channels: Optional[bool]\n manage_guild: Optional[bool]\n add_reactions: Optional[bool]\n view_audit_log: Optional[bool]\n priority_speaker: Optional[bool]\n stream: Optional[bool]\n read_messages: Optional[bool]\n view_channel: Optional[bool]\n send_messages: Optional[bool]\n send_tts_messages: Optional[bool]\n manage_messages: Optional[bool]\n embed_links: Optional[bool]\n attach_files: Optional[bool]\n read_message_history: Optional[bool]\n mention_everyone: Optional[bool]\n external_emojis: Optional[bool]\n use_external_emojis: Optional[bool]\n view_guild_insights: Optional[bool]\n connect: Optional[bool]\n speak: Optional[bool]\n mute_members: Optional[bool]\n deafen_members: Optional[bool]\n move_members: Optional[bool]\n use_voice_activation: Optional[bool]\n change_nickname: Optional[bool]\n manage_nicknames: Optional[bool]\n manage_roles: Optional[bool]\n manage_permissions: Optional[bool]\n manage_webhooks: Optional[bool]\n manage_emojis: Optional[bool]\n manage_emojis_and_stickers: Optional[bool]\n use_slash_commands: Optional[bool]\n request_to_speak: Optional[bool]\n manage_events: Optional[bool]\n manage_threads: Optional[bool]\n create_public_threads: Optional[bool]\n create_private_threads: Optional[bool]\n send_messages_in_threads: Optional[bool]\n external_stickers: Optional[bool]\n use_external_stickers: Optional[bool]\n start_embedded_activities: Optional[bool]\n moderate_members: Optional[bool]\n\n def __init__(self, **kwargs: Optional[bool]):\n self._values: Dict[str, Optional[bool]] = {}\n\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n raise ValueError(f'no permission called {key}.')\n\n setattr(self, key, value)\n\n def __eq__(self, other: Any) -> bool:\n return isinstance(other, PermissionOverwrite) and self._values == other._values\n\n def _set(self, key: str, value: Optional[bool]) -> None:\n if value not in (True, None, False):\n raise TypeError(f'Expected bool or NoneType, received {value.__class__.__name__}')\n\n if value is None:\n self._values.pop(key, None)\n else:\n self._values[key] = value\n\n def pair(self) -> Tuple[Permissions, Permissions]:\n \"\"\"Tuple[:class:`Permissions`, :class:`Permissions`]: Returns the (allow, deny) pair from this overwrite.\"\"\"\n\n allow = Permissions.none()\n deny = Permissions.none()\n\n for key, value in self._values.items():\n if value is True:\n setattr(allow, key, True)\n elif value is False:\n setattr(deny, key, True)\n\n return allow, deny\n\n @classmethod\n def from_pair(cls: Type[PO], allow: Permissions, deny: Permissions) -> PO:\n \"\"\"Creates an overwrite from an allow/deny pair of :class:`Permissions`.\"\"\"\n ret = cls()\n for key, value in allow:\n if value is True:\n setattr(ret, key, True)\n\n for key, value in deny:\n if value is True:\n setattr(ret, key, False)\n\n return ret\n\n def is_empty(self) -> bool:\n \"\"\"Checks if the permission overwrite is currently empty.\n\n An empty permission overwrite is one that has no overwrites set\n to ``True`` or ``False``.\n\n Returns\n -------\n :class:`bool`\n Indicates if the overwrite is empty.\n \"\"\"\n return len(self._values) == 0\n\n def update(self, **kwargs: bool) -> None:\n r\"\"\"Bulk updates this permission overwrite object.\n\n Allows you to set multiple attributes by using keyword\n arguments. The names must be equivalent to the properties\n listed. Extraneous key/value pairs will be silently ignored.\n\n Parameters\n ------------\n \\*\\*kwargs\n A list of key/value pairs to bulk update with.\n \"\"\"\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n continue\n\n setattr(self, key, value)\n\n def __iter__(self) -> Iterator[Tuple[str, Optional[bool]]]:\n for key in self.PURE_FLAGS:\n yield key, self._values.get(key)\n",
"path": "discord/permissions.py"
}
] | [
{
"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Callable, Any, ClassVar, Dict, Iterator, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Optional\nfrom .flags import BaseFlags, flag_value, fill_with_flags, alias_flag_value\n\n__all__ = (\n 'Permissions',\n 'PermissionOverwrite',\n)\n\n# A permission alias works like a regular flag but is marked\n# So the PermissionOverwrite knows to work with it\nclass permission_alias(alias_flag_value):\n alias: str\n\n\ndef make_permission_alias(alias: str) -> Callable[[Callable[[Any], int]], permission_alias]:\n def decorator(func: Callable[[Any], int]) -> permission_alias:\n ret = permission_alias(func)\n ret.alias = alias\n return ret\n\n return decorator\n\nP = TypeVar('P', bound='Permissions')\n\n@fill_with_flags()\nclass Permissions(BaseFlags):\n \"\"\"Wraps up the Discord permission value.\n\n The properties provided are two way. You can set and retrieve individual\n bits using the properties as if they were regular bools. This allows\n you to edit permissions.\n\n .. versionchanged:: 1.3\n You can now use keyword arguments to initialize :class:`Permissions`\n similar to :meth:`update`.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two permissions are equal.\n .. describe:: x != y\n\n Checks if two permissions are not equal.\n .. describe:: x <= y\n\n Checks if a permission is a subset of another permission.\n .. describe:: x >= y\n\n Checks if a permission is a superset of another permission.\n .. describe:: x < y\n\n Checks if a permission is a strict subset of another permission.\n .. describe:: x > y\n\n Checks if a permission is a strict superset of another permission.\n .. describe:: hash(x)\n\n Return the permission's hash.\n .. describe:: iter(x)\n\n Returns an iterator of ``(perm, value)`` pairs. This allows it\n to be, for example, constructed as a dict or a list of pairs.\n Note that aliases are not shown.\n\n Attributes\n -----------\n value: :class:`int`\n The raw value. This value is a bit array field of a 53-bit integer\n representing the currently available permissions. You should query\n permissions via the properties rather than using this raw value.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, permissions: int = 0, **kwargs: bool):\n if not isinstance(permissions, int):\n raise TypeError(f'Expected int parameter, received {permissions.__class__.__name__} instead.')\n\n self.value = permissions\n for key, value in kwargs.items():\n if key not in self.VALID_FLAGS:\n raise TypeError(f'{key!r} is not a valid permission name.')\n setattr(self, key, value)\n\n def is_subset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if self has the same or fewer permissions as other.\"\"\"\n if isinstance(other, Permissions):\n return (self.value & other.value) == self.value\n else:\n raise TypeError(f\"cannot compare {self.__class__.__name__} with {other.__class__.__name__}\")\n\n def is_superset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if self has the same or more permissions as other.\"\"\"\n if isinstance(other, Permissions):\n return (self.value | other.value) == self.value\n else:\n raise TypeError(f\"cannot compare {self.__class__.__name__} with {other.__class__.__name__}\")\n\n def is_strict_subset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if the permissions on other are a strict subset of those on self.\"\"\"\n return self.is_subset(other) and self != other\n\n def is_strict_superset(self, other: Permissions) -> bool:\n \"\"\"Returns ``True`` if the permissions on other are a strict superset of those on self.\"\"\"\n return self.is_superset(other) and self != other\n\n __le__: Callable[[Permissions], bool] = is_subset\n __ge__: Callable[[Permissions], bool] = is_superset\n __lt__: Callable[[Permissions], bool] = is_strict_subset\n __gt__: Callable[[Permissions], bool] = is_strict_superset\n\n @classmethod\n def none(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n permissions set to ``False``.\"\"\"\n return cls(0)\n\n @classmethod\n def all(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n permissions set to ``True``.\n \"\"\"\n return cls(0b11111111111111111111111111111111111111111)\n\n @classmethod\n def all_channel(cls: Type[P]) -> P:\n \"\"\"A :class:`Permissions` with all channel-specific permissions set to\n ``True`` and the guild-specific ones set to ``False``. The guild-specific\n permissions are currently:\n\n - :attr:`manage_emojis`\n - :attr:`view_audit_log`\n - :attr:`view_guild_insights`\n - :attr:`manage_guild`\n - :attr:`change_nickname`\n - :attr:`manage_nicknames`\n - :attr:`kick_members`\n - :attr:`ban_members`\n - :attr:`administrator`\n\n .. versionchanged:: 1.7\n Added :attr:`stream`, :attr:`priority_speaker` and :attr:`use_slash_commands` permissions.\n\n .. versionchanged:: 2.0\n Added :attr:`create_public_threads`, :attr:`create_private_threads`, :attr:`manage_threads`,\n :attr:`use_external_stickers`, :attr:`send_messages_in_threads` and\n :attr:`request_to_speak` permissions.\n \"\"\"\n return cls(0b111110110110011111101111111111101010001)\n\n @classmethod\n def general(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"General\" permissions from the official Discord UI set to ``True``.\n\n .. versionchanged:: 1.7\n Permission :attr:`read_messages` is now included in the general permissions, but\n permissions :attr:`administrator`, :attr:`create_instant_invite`, :attr:`kick_members`,\n :attr:`ban_members`, :attr:`change_nickname` and :attr:`manage_nicknames` are\n no longer part of the general permissions.\n \"\"\"\n return cls(0b01110000000010000000010010110000)\n\n @classmethod\n def membership(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Membership\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(0b00001100000000000000000000000111)\n\n @classmethod\n def text(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Text\" permissions from the official Discord UI set to ``True``.\n\n .. versionchanged:: 1.7\n Permission :attr:`read_messages` is no longer part of the text permissions.\n Added :attr:`use_slash_commands` permission.\n\n .. versionchanged:: 2.0\n Added :attr:`create_public_threads`, :attr:`create_private_threads`, :attr:`manage_threads`,\n :attr:`send_messages_in_threads` and :attr:`use_external_stickers` permissions.\n \"\"\"\n return cls(0b111110010000000000001111111100001000000)\n\n @classmethod\n def voice(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Voice\" permissions from the official Discord UI set to ``True``.\"\"\"\n return cls(0b00000011111100000000001100000000)\n\n @classmethod\n def stage(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Stage Channel\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(1 << 32)\n\n @classmethod\n def stage_moderator(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Stage Moderator\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(0b100000001010000000000000000000000)\n\n @classmethod\n def advanced(cls: Type[P]) -> P:\n \"\"\"A factory method that creates a :class:`Permissions` with all\n \"Advanced\" permissions from the official Discord UI set to ``True``.\n\n .. versionadded:: 1.7\n \"\"\"\n return cls(1 << 3)\n\n def update(self, **kwargs: bool) -> None:\n r\"\"\"Bulk updates this permission object.\n\n Allows you to set multiple attributes by using keyword\n arguments. The names must be equivalent to the properties\n listed. Extraneous key/value pairs will be silently ignored.\n\n Parameters\n ------------\n \\*\\*kwargs\n A list of key/value pairs to bulk update permissions with.\n \"\"\"\n for key, value in kwargs.items():\n if key in self.VALID_FLAGS:\n setattr(self, key, value)\n\n def handle_overwrite(self, allow: int, deny: int) -> None:\n # Basically this is what's happening here.\n # We have an original bit array, e.g. 1010\n # Then we have another bit array that is 'denied', e.g. 1111\n # And then we have the last one which is 'allowed', e.g. 0101\n # We want original OP denied to end up resulting in\n # whatever is in denied to be set to 0.\n # So 1010 OP 1111 -> 0000\n # Then we take this value and look at the allowed values.\n # And whatever is allowed is set to 1.\n # So 0000 OP2 0101 -> 0101\n # The OP is base & ~denied.\n # The OP2 is base | allowed.\n self.value = (self.value & ~deny) | allow\n\n @flag_value\n def create_instant_invite(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if the user can create instant invites.\"\"\"\n return 1 << 0\n\n @flag_value\n def kick_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if the user can kick users from the guild.\"\"\"\n return 1 << 1\n\n @flag_value\n def ban_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can ban users from the guild.\"\"\"\n return 1 << 2\n\n @flag_value\n def administrator(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user is an administrator. This role overrides all other permissions.\n\n This also bypasses all channel-specific overrides.\n \"\"\"\n return 1 << 3\n\n @flag_value\n def manage_channels(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can edit, delete, or create channels in the guild.\n\n This also corresponds to the \"Manage Channel\" channel-specific override.\"\"\"\n return 1 << 4\n\n @flag_value\n def manage_guild(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can edit guild properties.\"\"\"\n return 1 << 5\n\n @flag_value\n def add_reactions(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can add reactions to messages.\"\"\"\n return 1 << 6\n\n @flag_value\n def view_audit_log(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can view the guild's audit log.\"\"\"\n return 1 << 7\n\n @flag_value\n def priority_speaker(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can be more easily heard while talking.\"\"\"\n return 1 << 8\n\n @flag_value\n def stream(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can stream in a voice channel.\"\"\"\n return 1 << 9\n\n @flag_value\n def read_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can read messages from all or specific text channels.\"\"\"\n return 1 << 10\n\n @make_permission_alias('read_messages')\n def view_channel(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`read_messages`.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 10\n\n @flag_value\n def send_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send messages from all or specific text channels.\"\"\"\n return 1 << 11\n\n @flag_value\n def send_tts_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send TTS messages from all or specific text channels.\"\"\"\n return 1 << 12\n\n @flag_value\n def manage_messages(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can delete or pin messages in a text channel.\n\n .. note::\n\n Note that there are currently no ways to edit other people's messages.\n \"\"\"\n return 1 << 13\n\n @flag_value\n def embed_links(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user's messages will automatically be embedded by Discord.\"\"\"\n return 1 << 14\n\n @flag_value\n def attach_files(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send files in their messages.\"\"\"\n return 1 << 15\n\n @flag_value\n def read_message_history(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can read a text channel's previous messages.\"\"\"\n return 1 << 16\n\n @flag_value\n def mention_everyone(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user's @everyone or @here will mention everyone in the text channel.\"\"\"\n return 1 << 17\n\n @flag_value\n def external_emojis(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use emojis from other guilds.\"\"\"\n return 1 << 18\n\n @make_permission_alias('external_emojis')\n def use_external_emojis(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`external_emojis`.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 18\n\n @flag_value\n def view_guild_insights(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can view the guild's insights.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 19\n\n @flag_value\n def connect(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can connect to a voice channel.\"\"\"\n return 1 << 20\n\n @flag_value\n def speak(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can speak in a voice channel.\"\"\"\n return 1 << 21\n\n @flag_value\n def mute_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can mute other users.\"\"\"\n return 1 << 22\n\n @flag_value\n def deafen_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can deafen other users.\"\"\"\n return 1 << 23\n\n @flag_value\n def move_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can move users between other voice channels.\"\"\"\n return 1 << 24\n\n @flag_value\n def use_voice_activation(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use voice activation in voice channels.\"\"\"\n return 1 << 25\n\n @flag_value\n def change_nickname(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can change their nickname in the guild.\"\"\"\n return 1 << 26\n\n @flag_value\n def manage_nicknames(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can change other user's nickname in the guild.\"\"\"\n return 1 << 27\n\n @flag_value\n def manage_roles(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create or edit roles less than their role's position.\n\n This also corresponds to the \"Manage Permissions\" channel-specific override.\n \"\"\"\n return 1 << 28\n\n @make_permission_alias('manage_roles')\n def manage_permissions(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`manage_roles`.\n\n .. versionadded:: 1.3\n \"\"\"\n return 1 << 28\n\n @flag_value\n def manage_webhooks(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create, edit, or delete webhooks.\"\"\"\n return 1 << 29\n\n @flag_value\n def manage_emojis(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create, edit, or delete emojis.\"\"\"\n return 1 << 30\n\n @make_permission_alias('manage_emojis')\n def manage_emojis_and_stickers(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`manage_emojis`.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 30\n\n @flag_value\n def use_slash_commands(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use slash commands.\n\n .. versionadded:: 1.7\n \"\"\"\n return 1 << 31\n \n @make_permission_alias('use_slash_commands')\n def use_application_commands(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`use_slash_commands`.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 31\n\n @flag_value\n def request_to_speak(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can request to speak in a stage channel.\n\n .. versionadded:: 1.7\n \"\"\"\n return 1 << 32\n\n @flag_value\n def manage_events(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can manage guild events.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 33\n\n @flag_value\n def manage_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can manage threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 34\n\n @flag_value\n def create_public_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create public threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 35\n\n @flag_value\n def create_private_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can create private threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 36\n\n @flag_value\n def external_stickers(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can use stickers from other guilds.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 37\n\n @make_permission_alias('external_stickers')\n def use_external_stickers(self) -> int:\n \"\"\":class:`bool`: An alias for :attr:`external_stickers`.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 37\n\n @flag_value\n def send_messages_in_threads(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can send messages in threads.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 38\n \n @flag_value\n def start_embedded_activities(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can launch an activity flagged 'EMBEDDED' in a voice channel.\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 39\n \n @flag_value\n def moderate_members(self) -> int:\n \"\"\":class:`bool`: Returns ``True`` if a user can moderate members (timeout).\n\n .. versionadded:: 2.0\n \"\"\"\n return 1 << 40\n\nPO = TypeVar('PO', bound='PermissionOverwrite')\n\ndef _augment_from_permissions(cls):\n cls.VALID_NAMES = set(Permissions.VALID_FLAGS)\n aliases = set()\n\n # make descriptors for all the valid names and aliases\n for name, value in Permissions.__dict__.items():\n if isinstance(value, permission_alias):\n key = value.alias\n aliases.add(name)\n elif isinstance(value, flag_value):\n key = name\n else:\n continue\n\n # god bless Python\n def getter(self, x=key):\n return self._values.get(x)\n\n def setter(self, value, x=key):\n self._set(x, value)\n\n prop = property(getter, setter)\n setattr(cls, name, prop)\n\n cls.PURE_FLAGS = cls.VALID_NAMES - aliases\n return cls\n\n\n@_augment_from_permissions\nclass PermissionOverwrite:\n r\"\"\"A type that is used to represent a channel specific permission.\n\n Unlike a regular :class:`Permissions`\\, the default value of a\n permission is equivalent to ``None`` and not ``False``. Setting\n a value to ``False`` is **explicitly** denying that permission,\n while setting a value to ``True`` is **explicitly** allowing\n that permission.\n\n The values supported by this are the same as :class:`Permissions`\n with the added possibility of it being set to ``None``.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two overwrites are equal.\n .. describe:: x != y\n\n Checks if two overwrites are not equal.\n .. describe:: iter(x)\n\n Returns an iterator of ``(perm, value)`` pairs. This allows it\n to be, for example, constructed as a dict or a list of pairs.\n Note that aliases are not shown.\n\n Parameters\n -----------\n \\*\\*kwargs\n Set the value of permissions by their name.\n \"\"\"\n\n __slots__ = ('_values',)\n\n if TYPE_CHECKING:\n VALID_NAMES: ClassVar[Set[str]]\n PURE_FLAGS: ClassVar[Set[str]]\n # I wish I didn't have to do this\n create_instant_invite: Optional[bool]\n kick_members: Optional[bool]\n ban_members: Optional[bool]\n administrator: Optional[bool]\n manage_channels: Optional[bool]\n manage_guild: Optional[bool]\n add_reactions: Optional[bool]\n view_audit_log: Optional[bool]\n priority_speaker: Optional[bool]\n stream: Optional[bool]\n read_messages: Optional[bool]\n view_channel: Optional[bool]\n send_messages: Optional[bool]\n send_tts_messages: Optional[bool]\n manage_messages: Optional[bool]\n embed_links: Optional[bool]\n attach_files: Optional[bool]\n read_message_history: Optional[bool]\n mention_everyone: Optional[bool]\n external_emojis: Optional[bool]\n use_external_emojis: Optional[bool]\n view_guild_insights: Optional[bool]\n connect: Optional[bool]\n speak: Optional[bool]\n mute_members: Optional[bool]\n deafen_members: Optional[bool]\n move_members: Optional[bool]\n use_voice_activation: Optional[bool]\n change_nickname: Optional[bool]\n manage_nicknames: Optional[bool]\n manage_roles: Optional[bool]\n manage_permissions: Optional[bool]\n manage_webhooks: Optional[bool]\n manage_emojis: Optional[bool]\n manage_emojis_and_stickers: Optional[bool]\n use_slash_commands: Optional[bool]\n request_to_speak: Optional[bool]\n manage_events: Optional[bool]\n manage_threads: Optional[bool]\n create_public_threads: Optional[bool]\n create_private_threads: Optional[bool]\n send_messages_in_threads: Optional[bool]\n external_stickers: Optional[bool]\n use_external_stickers: Optional[bool]\n start_embedded_activities: Optional[bool]\n moderate_members: Optional[bool]\n\n def __init__(self, **kwargs: Optional[bool]):\n self._values: Dict[str, Optional[bool]] = {}\n\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n raise ValueError(f'no permission called {key}.')\n\n setattr(self, key, value)\n\n def __eq__(self, other: Any) -> bool:\n return isinstance(other, PermissionOverwrite) and self._values == other._values\n\n def _set(self, key: str, value: Optional[bool]) -> None:\n if value not in (True, None, False):\n raise TypeError(f'Expected bool or NoneType, received {value.__class__.__name__}')\n\n if value is None:\n self._values.pop(key, None)\n else:\n self._values[key] = value\n\n def pair(self) -> Tuple[Permissions, Permissions]:\n \"\"\"Tuple[:class:`Permissions`, :class:`Permissions`]: Returns the (allow, deny) pair from this overwrite.\"\"\"\n\n allow = Permissions.none()\n deny = Permissions.none()\n\n for key, value in self._values.items():\n if value is True:\n setattr(allow, key, True)\n elif value is False:\n setattr(deny, key, True)\n\n return allow, deny\n\n @classmethod\n def from_pair(cls: Type[PO], allow: Permissions, deny: Permissions) -> PO:\n \"\"\"Creates an overwrite from an allow/deny pair of :class:`Permissions`.\"\"\"\n ret = cls()\n for key, value in allow:\n if value is True:\n setattr(ret, key, True)\n\n for key, value in deny:\n if value is True:\n setattr(ret, key, False)\n\n return ret\n\n def is_empty(self) -> bool:\n \"\"\"Checks if the permission overwrite is currently empty.\n\n An empty permission overwrite is one that has no overwrites set\n to ``True`` or ``False``.\n\n Returns\n -------\n :class:`bool`\n Indicates if the overwrite is empty.\n \"\"\"\n return len(self._values) == 0\n\n def update(self, **kwargs: bool) -> None:\n r\"\"\"Bulk updates this permission overwrite object.\n\n Allows you to set multiple attributes by using keyword\n arguments. The names must be equivalent to the properties\n listed. Extraneous key/value pairs will be silently ignored.\n\n Parameters\n ------------\n \\*\\*kwargs\n A list of key/value pairs to bulk update with.\n \"\"\"\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n continue\n\n setattr(self, key, value)\n\n def __iter__(self) -> Iterator[Tuple[str, Optional[bool]]]:\n for key in self.PURE_FLAGS:\n yield key, self._values.get(key)\n",
"path": "discord/permissions.py"
}
] | diff --git a/discord/permissions.py b/discord/permissions.py
index 72217d609f..c0db3445ec 100644
--- a/discord/permissions.py
+++ b/discord/permissions.py
@@ -148,7 +148,7 @@ def all(cls: Type[P]) -> P:
"""A factory method that creates a :class:`Permissions` with all
permissions set to ``True``.
"""
- return cls(-1)
+ return cls(0b11111111111111111111111111111111111111111)
@classmethod
def all_channel(cls: Type[P]) -> P:
|
pallets__werkzeug-2129 | Stronger typing for `request.headers.get` method
Since v2 and the typing stubs, the `get` method of the `Headers` data structure always returns a `Optional[str]`. However in some cases we can know statically that it will return a `str`. Indeed when a default string parameter is given, then the function always return a `str`.
Here's a preview of my code before v2:
```py
auth_token = flask.request.headers.get('Authorization', '').replace('Bearer ', '')
```
Now that `werkzeug` is typed, the `.replace` will raise an typing error when analyzed with `mypy`.
```
error: Item "None" of "Optional[str]" has no attribute "replace"
```
I can fix it like this:
```py
auth_token = typing.cast(str, flask.request.headers.get('Authorization', '')).replace('Bearer ', '')
```
or even like this
```py
auth_header = flask.request.headers.get('Authorization', '')
if auth_header:
auth_token = auth_header.replace('Bearer ', '')
else:
auth_token = ''
```
But I feel like all of those patches shouldn't be required. I'll send a type fix.
| [
{
"content": "import typing as t\nfrom datetime import datetime\n\nfrom .._internal import _to_str\nfrom ..datastructures import Accept\nfrom ..datastructures import Authorization\nfrom ..datastructures import CharsetAccept\nfrom ..datastructures import ETags\nfrom ..datastructures import Headers\nfrom ..datastructures import HeaderSet\nfrom ..datastructures import IfRange\nfrom ..datastructures import ImmutableList\nfrom ..datastructures import ImmutableMultiDict\nfrom ..datastructures import LanguageAccept\nfrom ..datastructures import MIMEAccept\nfrom ..datastructures import MultiDict\nfrom ..datastructures import Range\nfrom ..datastructures import RequestCacheControl\nfrom ..http import parse_accept_header\nfrom ..http import parse_authorization_header\nfrom ..http import parse_cache_control_header\nfrom ..http import parse_cookie\nfrom ..http import parse_date\nfrom ..http import parse_etags\nfrom ..http import parse_if_range_header\nfrom ..http import parse_list_header\nfrom ..http import parse_options_header\nfrom ..http import parse_range_header\nfrom ..http import parse_set_header\nfrom ..urls import url_decode\nfrom ..user_agent import UserAgent\nfrom ..useragents import _UserAgent as _DeprecatedUserAgent\nfrom ..utils import cached_property\nfrom ..utils import header_property\nfrom .utils import get_current_url\nfrom .utils import get_host\n\n\nclass Request:\n \"\"\"Represents the non-IO parts of a HTTP request, including the\n method, URL info, and headers.\n\n This class is not meant for general use. It should only be used when\n implementing WSGI, ASGI, or another HTTP application spec. Werkzeug\n provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.\n\n :param method: The method the request was made with, such as\n ``GET``.\n :param scheme: The URL scheme of the protocol the request used, such\n as ``https`` or ``wss``.\n :param server: The address of the server. ``(host, port)``,\n ``(path, None)`` for unix sockets, or ``None`` if not known.\n :param root_path: The prefix that the application is mounted under.\n This is prepended to generated URLs, but is not part of route\n matching.\n :param path: The path part of the URL after ``root_path``.\n :param query_string: The part of the URL after the \"?\".\n :param headers: The headers received with the request.\n :param remote_addr: The address of the client sending the request.\n\n .. versionadded:: 2.0\n \"\"\"\n\n #: The charset used to decode most data in the request.\n charset = \"utf-8\"\n\n #: the error handling procedure for errors, defaults to 'replace'\n encoding_errors = \"replace\"\n\n #: the class to use for `args` and `form`. The default is an\n #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports\n #: multiple values per key. alternatively it makes sense to use an\n #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which\n #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`\n #: which is the fastest but only remembers the last key. It is also\n #: possible to use mutable structures, but this is not recommended.\n #:\n #: .. versionadded:: 0.6\n parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict\n\n #: The type to be used for dict values from the incoming WSGI\n #: environment. (For example for :attr:`cookies`.) By default an\n #: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.\n #:\n #: .. versionchanged:: 1.0.0\n #: Changed to ``ImmutableMultiDict`` to support multiple values.\n #:\n #: .. versionadded:: 0.6\n dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict\n\n #: the type to be used for list values from the incoming WSGI environment.\n #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used\n #: (for example for :attr:`access_list`).\n #:\n #: .. versionadded:: 0.6\n list_storage_class: t.Type[t.List] = ImmutableList\n\n user_agent_class = _DeprecatedUserAgent\n \"\"\"The class used and returned by the :attr:`user_agent` property to\n parse the header. Defaults to\n :class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An\n extension can provide a subclass that uses a parser to provide other\n data.\n\n .. versionadded:: 2.0\n \"\"\"\n\n #: Valid host names when handling requests. By default all hosts are\n #: trusted, which means that whatever the client says the host is\n #: will be accepted.\n #:\n #: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to\n #: any value by a malicious client, it is recommended to either set\n #: this property or implement similar validation in the proxy (if\n #: the application is being run behind one).\n #:\n #: .. versionadded:: 0.9\n trusted_hosts: t.Optional[t.List[str]] = None\n\n def __init__(\n self,\n method: str,\n scheme: str,\n server: t.Optional[t.Tuple[str, t.Optional[int]]],\n root_path: str,\n path: str,\n query_string: bytes,\n headers: Headers,\n remote_addr: t.Optional[str],\n ) -> None:\n #: The method the request was made with, such as ``GET``.\n self.method = method.upper()\n #: The URL scheme of the protocol the request used, such as\n #: ``https`` or ``wss``.\n self.scheme = scheme\n #: The address of the server. ``(host, port)``, ``(path, None)``\n #: for unix sockets, or ``None`` if not known.\n self.server = server\n #: The prefix that the application is mounted under, without a\n #: trailing slash. :attr:`path` comes after this.\n self.root_path = root_path.rstrip(\"/\")\n #: The path part of the URL after :attr:`root_path`. This is the\n #: path used for routing within the application.\n self.path = \"/\" + path.lstrip(\"/\")\n #: The part of the URL after the \"?\". This is the raw value, use\n #: :attr:`args` for the parsed values.\n self.query_string = query_string\n #: The headers received with the request.\n self.headers = headers\n #: The address of the client sending the request.\n self.remote_addr = remote_addr\n\n def __repr__(self) -> str:\n try:\n url = self.url\n except Exception as e:\n url = f\"(invalid URL: {e})\"\n\n return f\"<{type(self).__name__} {url!r} [{self.method}]>\"\n\n @property\n def url_charset(self) -> str:\n \"\"\"The charset that is assumed for URLs. Defaults to the value\n of :attr:`charset`.\n\n .. versionadded:: 0.6\n \"\"\"\n return self.charset\n\n @cached_property\n def args(self) -> \"MultiDict[str, str]\":\n \"\"\"The parsed URL parameters (the part in the URL after the question\n mark).\n\n By default an\n :class:`~werkzeug.datastructures.ImmutableMultiDict`\n is returned from this function. This can be changed by setting\n :attr:`parameter_storage_class` to a different type. This might\n be necessary if the order of the form data is important.\n \"\"\"\n return url_decode(\n self.query_string,\n self.url_charset,\n errors=self.encoding_errors,\n cls=self.parameter_storage_class,\n )\n\n @cached_property\n def access_route(self) -> t.List[str]:\n \"\"\"If a forwarded header exists this is a list of all ip addresses\n from the client ip to the last proxy server.\n \"\"\"\n if \"X-Forwarded-For\" in self.headers:\n return self.list_storage_class(\n parse_list_header(self.headers[\"X-Forwarded-For\"])\n )\n elif self.remote_addr is not None:\n return self.list_storage_class([self.remote_addr])\n return self.list_storage_class()\n\n @cached_property\n def full_path(self) -> str:\n \"\"\"Requested path, including the query string.\"\"\"\n return f\"{self.path}?{_to_str(self.query_string, self.url_charset)}\"\n\n @property\n def is_secure(self) -> bool:\n \"\"\"``True`` if the request was made with a secure protocol\n (HTTPS or WSS).\n \"\"\"\n return self.scheme in {\"https\", \"wss\"}\n\n @cached_property\n def url(self) -> str:\n \"\"\"The full request URL with the scheme, host, root path, path,\n and query string.\"\"\"\n return get_current_url(\n self.scheme, self.host, self.root_path, self.path, self.query_string\n )\n\n @cached_property\n def base_url(self) -> str:\n \"\"\"Like :attr:`url` but without the query string.\"\"\"\n return get_current_url(self.scheme, self.host, self.root_path, self.path)\n\n @cached_property\n def root_url(self) -> str:\n \"\"\"The request URL scheme, host, and root path. This is the root\n that the application is accessed from.\n \"\"\"\n return get_current_url(self.scheme, self.host, self.root_path)\n\n @cached_property\n def host_url(self) -> str:\n \"\"\"The request URL scheme and host only.\"\"\"\n return get_current_url(self.scheme, self.host)\n\n @cached_property\n def host(self) -> str:\n \"\"\"The host name the request was made to, including the port if\n it's non-standard. Validated with :attr:`trusted_hosts`.\n \"\"\"\n return get_host(\n self.scheme, self.headers.get(\"host\"), self.server, self.trusted_hosts\n )\n\n @cached_property\n def cookies(self) -> \"ImmutableMultiDict[str, str]\":\n \"\"\"A :class:`dict` with the contents of all cookies transmitted with\n the request.\"\"\"\n wsgi_combined_cookie = \";\".join(self.headers.getlist(\"Cookie\"))\n return parse_cookie( # type: ignore\n wsgi_combined_cookie,\n self.charset,\n self.encoding_errors,\n cls=self.dict_storage_class,\n )\n\n # Common Descriptors\n\n content_type = header_property[str](\n \"Content-Type\",\n doc=\"\"\"The Content-Type entity-header field indicates the media\n type of the entity-body sent to the recipient or, in the case of\n the HEAD method, the media type that would have been sent had\n the request been a GET.\"\"\",\n read_only=True,\n )\n\n @cached_property\n def content_length(self) -> t.Optional[int]:\n \"\"\"The Content-Length entity-header field indicates the size of the\n entity-body in bytes or, in the case of the HEAD method, the size of\n the entity-body that would have been sent had the request been a\n GET.\n \"\"\"\n if self.headers.get(\"Transfer-Encoding\", \"\") == \"chunked\":\n return None\n\n content_length = self.headers.get(\"Content-Length\")\n if content_length is not None:\n try:\n return max(0, int(content_length))\n except (ValueError, TypeError):\n pass\n\n return None\n\n content_encoding = header_property[str](\n \"Content-Encoding\",\n doc=\"\"\"The Content-Encoding entity-header field is used as a\n modifier to the media-type. When present, its value indicates\n what additional content codings have been applied to the\n entity-body, and thus what decoding mechanisms must be applied\n in order to obtain the media-type referenced by the Content-Type\n header field.\n\n .. versionadded:: 0.9\"\"\",\n read_only=True,\n )\n content_md5 = header_property[str](\n \"Content-MD5\",\n doc=\"\"\"The Content-MD5 entity-header field, as defined in\n RFC 1864, is an MD5 digest of the entity-body for the purpose of\n providing an end-to-end message integrity check (MIC) of the\n entity-body. (Note: a MIC is good for detecting accidental\n modification of the entity-body in transit, but is not proof\n against malicious attacks.)\n\n .. versionadded:: 0.9\"\"\",\n read_only=True,\n )\n referrer = header_property[str](\n \"Referer\",\n doc=\"\"\"The Referer[sic] request-header field allows the client\n to specify, for the server's benefit, the address (URI) of the\n resource from which the Request-URI was obtained (the\n \"referrer\", although the header field is misspelled).\"\"\",\n read_only=True,\n )\n date = header_property(\n \"Date\",\n None,\n parse_date,\n doc=\"\"\"The Date general-header field represents the date and\n time at which the message was originated, having the same\n semantics as orig-date in RFC 822.\n\n .. versionchanged:: 2.0\n The datetime object is timezone-aware.\n \"\"\",\n read_only=True,\n )\n max_forwards = header_property(\n \"Max-Forwards\",\n None,\n int,\n doc=\"\"\"The Max-Forwards request-header field provides a\n mechanism with the TRACE and OPTIONS methods to limit the number\n of proxies or gateways that can forward the request to the next\n inbound server.\"\"\",\n read_only=True,\n )\n\n def _parse_content_type(self) -> None:\n if not hasattr(self, \"_parsed_content_type\"):\n self._parsed_content_type = parse_options_header(\n self.headers.get(\"Content-Type\", \"\")\n )\n\n @property\n def mimetype(self) -> str:\n \"\"\"Like :attr:`content_type`, but without parameters (eg, without\n charset, type etc.) and always lowercase. For example if the content\n type is ``text/HTML; charset=utf-8`` the mimetype would be\n ``'text/html'``.\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[0].lower()\n\n @property\n def mimetype_params(self) -> t.Dict[str, str]:\n \"\"\"The mimetype parameters as dict. For example if the content\n type is ``text/html; charset=utf-8`` the params would be\n ``{'charset': 'utf-8'}``.\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[1]\n\n @cached_property\n def pragma(self) -> HeaderSet:\n \"\"\"The Pragma general-header field is used to include\n implementation-specific directives that might apply to any recipient\n along the request/response chain. All pragma directives specify\n optional behavior from the viewpoint of the protocol; however, some\n systems MAY require that behavior be consistent with the directives.\n \"\"\"\n return parse_set_header(self.headers.get(\"Pragma\", \"\"))\n\n # Accept\n\n @cached_property\n def accept_mimetypes(self) -> MIMEAccept:\n \"\"\"List of mimetypes this client supports as\n :class:`~werkzeug.datastructures.MIMEAccept` object.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept\"), MIMEAccept)\n\n @cached_property\n def accept_charsets(self) -> CharsetAccept:\n \"\"\"List of charsets this client supports as\n :class:`~werkzeug.datastructures.CharsetAccept` object.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept-Charset\"), CharsetAccept)\n\n @cached_property\n def accept_encodings(self) -> Accept:\n \"\"\"List of encodings this client accepts. Encodings in a HTTP term\n are compression encodings such as gzip. For charsets have a look at\n :attr:`accept_charset`.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept-Encoding\"))\n\n @cached_property\n def accept_languages(self) -> LanguageAccept:\n \"\"\"List of languages this client accepts as\n :class:`~werkzeug.datastructures.LanguageAccept` object.\n\n .. versionchanged 0.5\n In previous versions this was a regular\n :class:`~werkzeug.datastructures.Accept` object.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept-Language\"), LanguageAccept)\n\n # ETag\n\n @cached_property\n def cache_control(self) -> RequestCacheControl:\n \"\"\"A :class:`~werkzeug.datastructures.RequestCacheControl` object\n for the incoming cache control headers.\n \"\"\"\n cache_control = self.headers.get(\"Cache-Control\")\n return parse_cache_control_header(cache_control, None, RequestCacheControl)\n\n @cached_property\n def if_match(self) -> ETags:\n \"\"\"An object containing all the etags in the `If-Match` header.\n\n :rtype: :class:`~werkzeug.datastructures.ETags`\n \"\"\"\n return parse_etags(self.headers.get(\"If-Match\"))\n\n @cached_property\n def if_none_match(self) -> ETags:\n \"\"\"An object containing all the etags in the `If-None-Match` header.\n\n :rtype: :class:`~werkzeug.datastructures.ETags`\n \"\"\"\n return parse_etags(self.headers.get(\"If-None-Match\"))\n\n @cached_property\n def if_modified_since(self) -> t.Optional[datetime]:\n \"\"\"The parsed `If-Modified-Since` header as a datetime object.\n\n .. versionchanged:: 2.0\n The datetime object is timezone-aware.\n \"\"\"\n return parse_date(self.headers.get(\"If-Modified-Since\"))\n\n @cached_property\n def if_unmodified_since(self) -> t.Optional[datetime]:\n \"\"\"The parsed `If-Unmodified-Since` header as a datetime object.\n\n .. versionchanged:: 2.0\n The datetime object is timezone-aware.\n \"\"\"\n return parse_date(self.headers.get(\"If-Unmodified-Since\"))\n\n @cached_property\n def if_range(self) -> IfRange:\n \"\"\"The parsed ``If-Range`` header.\n\n .. versionchanged:: 2.0\n ``IfRange.date`` is timezone-aware.\n\n .. versionadded:: 0.7\n \"\"\"\n return parse_if_range_header(self.headers.get(\"If-Range\"))\n\n @cached_property\n def range(self) -> t.Optional[Range]:\n \"\"\"The parsed `Range` header.\n\n .. versionadded:: 0.7\n\n :rtype: :class:`~werkzeug.datastructures.Range`\n \"\"\"\n return parse_range_header(self.headers.get(\"Range\"))\n\n # User Agent\n\n @cached_property\n def user_agent(self) -> UserAgent:\n \"\"\"The user agent. Use ``user_agent.string`` to get the header\n value. Set :attr:`user_agent_class` to a subclass of\n :class:`~werkzeug.user_agent.UserAgent` to provide parsing for\n the other properties or other extended data.\n\n .. versionchanged:: 2.0\n The built in parser is deprecated and will be removed in\n Werkzeug 2.1. A ``UserAgent`` subclass must be set to parse\n data from the string.\n \"\"\"\n return self.user_agent_class(t.cast(str, self.headers.get(\"User-Agent\", \"\")))\n\n # Authorization\n\n @cached_property\n def authorization(self) -> t.Optional[Authorization]:\n \"\"\"The `Authorization` object in parsed form.\"\"\"\n return parse_authorization_header(self.headers.get(\"Authorization\"))\n\n # CORS\n\n origin = header_property[str](\n \"Origin\",\n doc=(\n \"The host that the request originated from. Set\"\n \" :attr:`~CORSResponseMixin.access_control_allow_origin` on\"\n \" the response to indicate which origins are allowed.\"\n ),\n read_only=True,\n )\n\n access_control_request_headers = header_property(\n \"Access-Control-Request-Headers\",\n load_func=parse_set_header,\n doc=(\n \"Sent with a preflight request to indicate which headers\"\n \" will be sent with the cross origin request. Set\"\n \" :attr:`~CORSResponseMixin.access_control_allow_headers`\"\n \" on the response to indicate which headers are allowed.\"\n ),\n read_only=True,\n )\n\n access_control_request_method = header_property[str](\n \"Access-Control-Request-Method\",\n doc=(\n \"Sent with a preflight request to indicate which method\"\n \" will be used for the cross origin request. Set\"\n \" :attr:`~CORSResponseMixin.access_control_allow_methods`\"\n \" on the response to indicate which methods are allowed.\"\n ),\n read_only=True,\n )\n\n @property\n def is_json(self) -> bool:\n \"\"\"Check if the mimetype indicates JSON data, either\n :mimetype:`application/json` or :mimetype:`application/*+json`.\n \"\"\"\n mt = self.mimetype\n return (\n mt == \"application/json\"\n or mt.startswith(\"application/\")\n and mt.endswith(\"+json\")\n )\n",
"path": "src/werkzeug/sansio/request.py"
}
] | [
{
"content": "import typing as t\nfrom datetime import datetime\n\nfrom .._internal import _to_str\nfrom ..datastructures import Accept\nfrom ..datastructures import Authorization\nfrom ..datastructures import CharsetAccept\nfrom ..datastructures import ETags\nfrom ..datastructures import Headers\nfrom ..datastructures import HeaderSet\nfrom ..datastructures import IfRange\nfrom ..datastructures import ImmutableList\nfrom ..datastructures import ImmutableMultiDict\nfrom ..datastructures import LanguageAccept\nfrom ..datastructures import MIMEAccept\nfrom ..datastructures import MultiDict\nfrom ..datastructures import Range\nfrom ..datastructures import RequestCacheControl\nfrom ..http import parse_accept_header\nfrom ..http import parse_authorization_header\nfrom ..http import parse_cache_control_header\nfrom ..http import parse_cookie\nfrom ..http import parse_date\nfrom ..http import parse_etags\nfrom ..http import parse_if_range_header\nfrom ..http import parse_list_header\nfrom ..http import parse_options_header\nfrom ..http import parse_range_header\nfrom ..http import parse_set_header\nfrom ..urls import url_decode\nfrom ..user_agent import UserAgent\nfrom ..useragents import _UserAgent as _DeprecatedUserAgent\nfrom ..utils import cached_property\nfrom ..utils import header_property\nfrom .utils import get_current_url\nfrom .utils import get_host\n\n\nclass Request:\n \"\"\"Represents the non-IO parts of a HTTP request, including the\n method, URL info, and headers.\n\n This class is not meant for general use. It should only be used when\n implementing WSGI, ASGI, or another HTTP application spec. Werkzeug\n provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.\n\n :param method: The method the request was made with, such as\n ``GET``.\n :param scheme: The URL scheme of the protocol the request used, such\n as ``https`` or ``wss``.\n :param server: The address of the server. ``(host, port)``,\n ``(path, None)`` for unix sockets, or ``None`` if not known.\n :param root_path: The prefix that the application is mounted under.\n This is prepended to generated URLs, but is not part of route\n matching.\n :param path: The path part of the URL after ``root_path``.\n :param query_string: The part of the URL after the \"?\".\n :param headers: The headers received with the request.\n :param remote_addr: The address of the client sending the request.\n\n .. versionadded:: 2.0\n \"\"\"\n\n #: The charset used to decode most data in the request.\n charset = \"utf-8\"\n\n #: the error handling procedure for errors, defaults to 'replace'\n encoding_errors = \"replace\"\n\n #: the class to use for `args` and `form`. The default is an\n #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports\n #: multiple values per key. alternatively it makes sense to use an\n #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which\n #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`\n #: which is the fastest but only remembers the last key. It is also\n #: possible to use mutable structures, but this is not recommended.\n #:\n #: .. versionadded:: 0.6\n parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict\n\n #: The type to be used for dict values from the incoming WSGI\n #: environment. (For example for :attr:`cookies`.) By default an\n #: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.\n #:\n #: .. versionchanged:: 1.0.0\n #: Changed to ``ImmutableMultiDict`` to support multiple values.\n #:\n #: .. versionadded:: 0.6\n dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict\n\n #: the type to be used for list values from the incoming WSGI environment.\n #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used\n #: (for example for :attr:`access_list`).\n #:\n #: .. versionadded:: 0.6\n list_storage_class: t.Type[t.List] = ImmutableList\n\n user_agent_class = _DeprecatedUserAgent\n \"\"\"The class used and returned by the :attr:`user_agent` property to\n parse the header. Defaults to\n :class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An\n extension can provide a subclass that uses a parser to provide other\n data.\n\n .. versionadded:: 2.0\n \"\"\"\n\n #: Valid host names when handling requests. By default all hosts are\n #: trusted, which means that whatever the client says the host is\n #: will be accepted.\n #:\n #: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to\n #: any value by a malicious client, it is recommended to either set\n #: this property or implement similar validation in the proxy (if\n #: the application is being run behind one).\n #:\n #: .. versionadded:: 0.9\n trusted_hosts: t.Optional[t.List[str]] = None\n\n def __init__(\n self,\n method: str,\n scheme: str,\n server: t.Optional[t.Tuple[str, t.Optional[int]]],\n root_path: str,\n path: str,\n query_string: bytes,\n headers: Headers,\n remote_addr: t.Optional[str],\n ) -> None:\n #: The method the request was made with, such as ``GET``.\n self.method = method.upper()\n #: The URL scheme of the protocol the request used, such as\n #: ``https`` or ``wss``.\n self.scheme = scheme\n #: The address of the server. ``(host, port)``, ``(path, None)``\n #: for unix sockets, or ``None`` if not known.\n self.server = server\n #: The prefix that the application is mounted under, without a\n #: trailing slash. :attr:`path` comes after this.\n self.root_path = root_path.rstrip(\"/\")\n #: The path part of the URL after :attr:`root_path`. This is the\n #: path used for routing within the application.\n self.path = \"/\" + path.lstrip(\"/\")\n #: The part of the URL after the \"?\". This is the raw value, use\n #: :attr:`args` for the parsed values.\n self.query_string = query_string\n #: The headers received with the request.\n self.headers = headers\n #: The address of the client sending the request.\n self.remote_addr = remote_addr\n\n def __repr__(self) -> str:\n try:\n url = self.url\n except Exception as e:\n url = f\"(invalid URL: {e})\"\n\n return f\"<{type(self).__name__} {url!r} [{self.method}]>\"\n\n @property\n def url_charset(self) -> str:\n \"\"\"The charset that is assumed for URLs. Defaults to the value\n of :attr:`charset`.\n\n .. versionadded:: 0.6\n \"\"\"\n return self.charset\n\n @cached_property\n def args(self) -> \"MultiDict[str, str]\":\n \"\"\"The parsed URL parameters (the part in the URL after the question\n mark).\n\n By default an\n :class:`~werkzeug.datastructures.ImmutableMultiDict`\n is returned from this function. This can be changed by setting\n :attr:`parameter_storage_class` to a different type. This might\n be necessary if the order of the form data is important.\n \"\"\"\n return url_decode(\n self.query_string,\n self.url_charset,\n errors=self.encoding_errors,\n cls=self.parameter_storage_class,\n )\n\n @cached_property\n def access_route(self) -> t.List[str]:\n \"\"\"If a forwarded header exists this is a list of all ip addresses\n from the client ip to the last proxy server.\n \"\"\"\n if \"X-Forwarded-For\" in self.headers:\n return self.list_storage_class(\n parse_list_header(self.headers[\"X-Forwarded-For\"])\n )\n elif self.remote_addr is not None:\n return self.list_storage_class([self.remote_addr])\n return self.list_storage_class()\n\n @cached_property\n def full_path(self) -> str:\n \"\"\"Requested path, including the query string.\"\"\"\n return f\"{self.path}?{_to_str(self.query_string, self.url_charset)}\"\n\n @property\n def is_secure(self) -> bool:\n \"\"\"``True`` if the request was made with a secure protocol\n (HTTPS or WSS).\n \"\"\"\n return self.scheme in {\"https\", \"wss\"}\n\n @cached_property\n def url(self) -> str:\n \"\"\"The full request URL with the scheme, host, root path, path,\n and query string.\"\"\"\n return get_current_url(\n self.scheme, self.host, self.root_path, self.path, self.query_string\n )\n\n @cached_property\n def base_url(self) -> str:\n \"\"\"Like :attr:`url` but without the query string.\"\"\"\n return get_current_url(self.scheme, self.host, self.root_path, self.path)\n\n @cached_property\n def root_url(self) -> str:\n \"\"\"The request URL scheme, host, and root path. This is the root\n that the application is accessed from.\n \"\"\"\n return get_current_url(self.scheme, self.host, self.root_path)\n\n @cached_property\n def host_url(self) -> str:\n \"\"\"The request URL scheme and host only.\"\"\"\n return get_current_url(self.scheme, self.host)\n\n @cached_property\n def host(self) -> str:\n \"\"\"The host name the request was made to, including the port if\n it's non-standard. Validated with :attr:`trusted_hosts`.\n \"\"\"\n return get_host(\n self.scheme, self.headers.get(\"host\"), self.server, self.trusted_hosts\n )\n\n @cached_property\n def cookies(self) -> \"ImmutableMultiDict[str, str]\":\n \"\"\"A :class:`dict` with the contents of all cookies transmitted with\n the request.\"\"\"\n wsgi_combined_cookie = \";\".join(self.headers.getlist(\"Cookie\"))\n return parse_cookie( # type: ignore\n wsgi_combined_cookie,\n self.charset,\n self.encoding_errors,\n cls=self.dict_storage_class,\n )\n\n # Common Descriptors\n\n content_type = header_property[str](\n \"Content-Type\",\n doc=\"\"\"The Content-Type entity-header field indicates the media\n type of the entity-body sent to the recipient or, in the case of\n the HEAD method, the media type that would have been sent had\n the request been a GET.\"\"\",\n read_only=True,\n )\n\n @cached_property\n def content_length(self) -> t.Optional[int]:\n \"\"\"The Content-Length entity-header field indicates the size of the\n entity-body in bytes or, in the case of the HEAD method, the size of\n the entity-body that would have been sent had the request been a\n GET.\n \"\"\"\n if self.headers.get(\"Transfer-Encoding\", \"\") == \"chunked\":\n return None\n\n content_length = self.headers.get(\"Content-Length\")\n if content_length is not None:\n try:\n return max(0, int(content_length))\n except (ValueError, TypeError):\n pass\n\n return None\n\n content_encoding = header_property[str](\n \"Content-Encoding\",\n doc=\"\"\"The Content-Encoding entity-header field is used as a\n modifier to the media-type. When present, its value indicates\n what additional content codings have been applied to the\n entity-body, and thus what decoding mechanisms must be applied\n in order to obtain the media-type referenced by the Content-Type\n header field.\n\n .. versionadded:: 0.9\"\"\",\n read_only=True,\n )\n content_md5 = header_property[str](\n \"Content-MD5\",\n doc=\"\"\"The Content-MD5 entity-header field, as defined in\n RFC 1864, is an MD5 digest of the entity-body for the purpose of\n providing an end-to-end message integrity check (MIC) of the\n entity-body. (Note: a MIC is good for detecting accidental\n modification of the entity-body in transit, but is not proof\n against malicious attacks.)\n\n .. versionadded:: 0.9\"\"\",\n read_only=True,\n )\n referrer = header_property[str](\n \"Referer\",\n doc=\"\"\"The Referer[sic] request-header field allows the client\n to specify, for the server's benefit, the address (URI) of the\n resource from which the Request-URI was obtained (the\n \"referrer\", although the header field is misspelled).\"\"\",\n read_only=True,\n )\n date = header_property(\n \"Date\",\n None,\n parse_date,\n doc=\"\"\"The Date general-header field represents the date and\n time at which the message was originated, having the same\n semantics as orig-date in RFC 822.\n\n .. versionchanged:: 2.0\n The datetime object is timezone-aware.\n \"\"\",\n read_only=True,\n )\n max_forwards = header_property(\n \"Max-Forwards\",\n None,\n int,\n doc=\"\"\"The Max-Forwards request-header field provides a\n mechanism with the TRACE and OPTIONS methods to limit the number\n of proxies or gateways that can forward the request to the next\n inbound server.\"\"\",\n read_only=True,\n )\n\n def _parse_content_type(self) -> None:\n if not hasattr(self, \"_parsed_content_type\"):\n self._parsed_content_type = parse_options_header(\n self.headers.get(\"Content-Type\", \"\")\n )\n\n @property\n def mimetype(self) -> str:\n \"\"\"Like :attr:`content_type`, but without parameters (eg, without\n charset, type etc.) and always lowercase. For example if the content\n type is ``text/HTML; charset=utf-8`` the mimetype would be\n ``'text/html'``.\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[0].lower()\n\n @property\n def mimetype_params(self) -> t.Dict[str, str]:\n \"\"\"The mimetype parameters as dict. For example if the content\n type is ``text/html; charset=utf-8`` the params would be\n ``{'charset': 'utf-8'}``.\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[1]\n\n @cached_property\n def pragma(self) -> HeaderSet:\n \"\"\"The Pragma general-header field is used to include\n implementation-specific directives that might apply to any recipient\n along the request/response chain. All pragma directives specify\n optional behavior from the viewpoint of the protocol; however, some\n systems MAY require that behavior be consistent with the directives.\n \"\"\"\n return parse_set_header(self.headers.get(\"Pragma\", \"\"))\n\n # Accept\n\n @cached_property\n def accept_mimetypes(self) -> MIMEAccept:\n \"\"\"List of mimetypes this client supports as\n :class:`~werkzeug.datastructures.MIMEAccept` object.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept\"), MIMEAccept)\n\n @cached_property\n def accept_charsets(self) -> CharsetAccept:\n \"\"\"List of charsets this client supports as\n :class:`~werkzeug.datastructures.CharsetAccept` object.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept-Charset\"), CharsetAccept)\n\n @cached_property\n def accept_encodings(self) -> Accept:\n \"\"\"List of encodings this client accepts. Encodings in a HTTP term\n are compression encodings such as gzip. For charsets have a look at\n :attr:`accept_charset`.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept-Encoding\"))\n\n @cached_property\n def accept_languages(self) -> LanguageAccept:\n \"\"\"List of languages this client accepts as\n :class:`~werkzeug.datastructures.LanguageAccept` object.\n\n .. versionchanged 0.5\n In previous versions this was a regular\n :class:`~werkzeug.datastructures.Accept` object.\n \"\"\"\n return parse_accept_header(self.headers.get(\"Accept-Language\"), LanguageAccept)\n\n # ETag\n\n @cached_property\n def cache_control(self) -> RequestCacheControl:\n \"\"\"A :class:`~werkzeug.datastructures.RequestCacheControl` object\n for the incoming cache control headers.\n \"\"\"\n cache_control = self.headers.get(\"Cache-Control\")\n return parse_cache_control_header(cache_control, None, RequestCacheControl)\n\n @cached_property\n def if_match(self) -> ETags:\n \"\"\"An object containing all the etags in the `If-Match` header.\n\n :rtype: :class:`~werkzeug.datastructures.ETags`\n \"\"\"\n return parse_etags(self.headers.get(\"If-Match\"))\n\n @cached_property\n def if_none_match(self) -> ETags:\n \"\"\"An object containing all the etags in the `If-None-Match` header.\n\n :rtype: :class:`~werkzeug.datastructures.ETags`\n \"\"\"\n return parse_etags(self.headers.get(\"If-None-Match\"))\n\n @cached_property\n def if_modified_since(self) -> t.Optional[datetime]:\n \"\"\"The parsed `If-Modified-Since` header as a datetime object.\n\n .. versionchanged:: 2.0\n The datetime object is timezone-aware.\n \"\"\"\n return parse_date(self.headers.get(\"If-Modified-Since\"))\n\n @cached_property\n def if_unmodified_since(self) -> t.Optional[datetime]:\n \"\"\"The parsed `If-Unmodified-Since` header as a datetime object.\n\n .. versionchanged:: 2.0\n The datetime object is timezone-aware.\n \"\"\"\n return parse_date(self.headers.get(\"If-Unmodified-Since\"))\n\n @cached_property\n def if_range(self) -> IfRange:\n \"\"\"The parsed ``If-Range`` header.\n\n .. versionchanged:: 2.0\n ``IfRange.date`` is timezone-aware.\n\n .. versionadded:: 0.7\n \"\"\"\n return parse_if_range_header(self.headers.get(\"If-Range\"))\n\n @cached_property\n def range(self) -> t.Optional[Range]:\n \"\"\"The parsed `Range` header.\n\n .. versionadded:: 0.7\n\n :rtype: :class:`~werkzeug.datastructures.Range`\n \"\"\"\n return parse_range_header(self.headers.get(\"Range\"))\n\n # User Agent\n\n @cached_property\n def user_agent(self) -> UserAgent:\n \"\"\"The user agent. Use ``user_agent.string`` to get the header\n value. Set :attr:`user_agent_class` to a subclass of\n :class:`~werkzeug.user_agent.UserAgent` to provide parsing for\n the other properties or other extended data.\n\n .. versionchanged:: 2.0\n The built in parser is deprecated and will be removed in\n Werkzeug 2.1. A ``UserAgent`` subclass must be set to parse\n data from the string.\n \"\"\"\n return self.user_agent_class(self.headers.get(\"User-Agent\", \"\"))\n\n # Authorization\n\n @cached_property\n def authorization(self) -> t.Optional[Authorization]:\n \"\"\"The `Authorization` object in parsed form.\"\"\"\n return parse_authorization_header(self.headers.get(\"Authorization\"))\n\n # CORS\n\n origin = header_property[str](\n \"Origin\",\n doc=(\n \"The host that the request originated from. Set\"\n \" :attr:`~CORSResponseMixin.access_control_allow_origin` on\"\n \" the response to indicate which origins are allowed.\"\n ),\n read_only=True,\n )\n\n access_control_request_headers = header_property(\n \"Access-Control-Request-Headers\",\n load_func=parse_set_header,\n doc=(\n \"Sent with a preflight request to indicate which headers\"\n \" will be sent with the cross origin request. Set\"\n \" :attr:`~CORSResponseMixin.access_control_allow_headers`\"\n \" on the response to indicate which headers are allowed.\"\n ),\n read_only=True,\n )\n\n access_control_request_method = header_property[str](\n \"Access-Control-Request-Method\",\n doc=(\n \"Sent with a preflight request to indicate which method\"\n \" will be used for the cross origin request. Set\"\n \" :attr:`~CORSResponseMixin.access_control_allow_methods`\"\n \" on the response to indicate which methods are allowed.\"\n ),\n read_only=True,\n )\n\n @property\n def is_json(self) -> bool:\n \"\"\"Check if the mimetype indicates JSON data, either\n :mimetype:`application/json` or :mimetype:`application/*+json`.\n \"\"\"\n mt = self.mimetype\n return (\n mt == \"application/json\"\n or mt.startswith(\"application/\")\n and mt.endswith(\"+json\")\n )\n",
"path": "src/werkzeug/sansio/request.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 85ccae878..f8a2d0f93 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -14,6 +14,7 @@ Unreleased
decorated with it report the correct type. :issue:`2113`
- Fix multipart parsing bug when boundary contains special regex
characters. :issue:`2125`
+- Enhance type annotation for ``headers.get``. :issue:`2128`
Version 2.0.0
diff --git a/src/werkzeug/datastructures.pyi b/src/werkzeug/datastructures.pyi
index 7279d3a73..292e1a262 100644
--- a/src/werkzeug/datastructures.pyi
+++ b/src/werkzeug/datastructures.pyi
@@ -220,6 +220,8 @@ class Headers(Dict[str, str]):
def __getitem__(self, key: str, _get_mode: Literal[True] = ...) -> str: ...
def __eq__(self, other: object) -> bool: ...
@overload # type: ignore
+ def get(self, key: str, default: str) -> str: ...
+ @overload
def get(self, key: str, default: Optional[str] = None) -> Optional[str]: ...
@overload
def get(
diff --git a/src/werkzeug/sansio/request.py b/src/werkzeug/sansio/request.py
index 6f2bceb16..2c21a2134 100644
--- a/src/werkzeug/sansio/request.py
+++ b/src/werkzeug/sansio/request.py
@@ -491,7 +491,7 @@ def user_agent(self) -> UserAgent:
Werkzeug 2.1. A ``UserAgent`` subclass must be set to parse
data from the string.
"""
- return self.user_agent_class(t.cast(str, self.headers.get("User-Agent", "")))
+ return self.user_agent_class(self.headers.get("User-Agent", ""))
# Authorization
|
spyder-ide__spyder-3909 | Profiler error when no filename is passed to it
Hello!
* Spyder Version: Git version (4dev)
* Python Version: 3.5
* Operating system: Arch Linux
When I click on Run > Profile , It produces this error:
`Traceback (most recent call last):
File "/usr/lib/python3.5/site-packages/spyder_profiler/profiler.py", line 138, in run_profiler
self.analyze(self.main.editor.get_current_filename())
File "/usr/lib/python3.5/site-packages/spyder_profiler/profiler.py", line 155, in analyze
pythonpath=pythonpath)
TypeError: analyze() got multiple values for argument 'wdir'`
| [
{
"content": "# -*- coding:utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# based on p_pylint.py by Pierre Raybaut\n#\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Profiler Plugin.\"\"\"\n\n# Standard library imports\nimport os.path as osp\n\n# Third party imports\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QGroupBox, QLabel, QVBoxLayout\n\n# Local imports\nfrom spyder.config.base import get_translation\nfrom spyder.api.plugins import SpyderPluginWidget\nfrom spyder.api.preferences import PluginConfigPage\nfrom spyder.plugins.runconfig import get_run_configuration\nfrom spyder.utils import icon_manager as ima\nfrom spyder.utils.qthelpers import create_action\nfrom .widgets.profilergui import (ProfilerWidget, is_profiler_installed)\n\n\n_ = get_translation(\"profiler\", \"spyder_profiler\")\n\n\nclass ProfilerConfigPage(PluginConfigPage):\n def setup_page(self):\n results_group = QGroupBox(_(\"Results\"))\n results_label1 = QLabel(_(\"Profiler plugin results \"\n \"(the output of python's profile/cProfile)\\n\"\n \"are stored here:\"))\n results_label1.setWordWrap(True)\n\n # Warning: do not try to regroup the following QLabel contents with\n # widgets above -- this string was isolated here in a single QLabel\n # on purpose: to fix Issue 863\n results_label2 = QLabel(ProfilerWidget.DATAPATH)\n\n results_label2.setTextInteractionFlags(Qt.TextSelectableByMouse)\n results_label2.setWordWrap(True)\n\n results_layout = QVBoxLayout()\n results_layout.addWidget(results_label1)\n results_layout.addWidget(results_label2)\n results_group.setLayout(results_layout)\n\n vlayout = QVBoxLayout()\n vlayout.addWidget(results_group)\n vlayout.addStretch(1)\n self.setLayout(vlayout)\n\n\nclass Profiler(SpyderPluginWidget):\n \"\"\"Profiler (after python's profile and pstats).\"\"\"\n\n CONF_SECTION = 'profiler'\n CONFIGWIDGET_CLASS = ProfilerConfigPage\n\n def __init__(self, parent=None):\n SpyderPluginWidget.__init__(self, parent)\n\n max_entries = self.get_option('max_entries', 50)\n self.profiler = ProfilerWidget(self, max_entries)\n\n layout = QVBoxLayout()\n layout.addWidget(self.profiler)\n self.setLayout(layout)\n\n # Initialize plugin\n self.initialize_plugin()\n \n #------ SpyderPluginWidget API --------------------------------------------- \n def get_plugin_title(self):\n \"\"\"Return widget title\"\"\"\n return _(\"Profiler\")\n\n def get_plugin_icon(self):\n \"\"\"Return widget icon\"\"\"\n path = osp.join(self.PLUGIN_PATH, self.IMG_PATH)\n return ima.icon('profiler', icon_path=path)\n\n def get_focus_widget(self):\n \"\"\"\n Return the widget to give focus to when\n this plugin's dockwidget is raised on top-level\n \"\"\"\n return self.profiler.datatree\n \n def get_plugin_actions(self):\n \"\"\"Return a list of actions related to plugin\"\"\"\n return []\n\n def on_first_registration(self):\n \"\"\"Action to be performed on first plugin registration\"\"\"\n self.main.tabify_plugins(self.main.help, self)\n self.dockwidget.hide()\n\n def register_plugin(self):\n \"\"\"Register plugin in Spyder's main window\"\"\"\n self.profiler.datatree.sig_edit_goto.connect(self.main.editor.load)\n self.profiler.redirect_stdio.connect(\n self.main.redirect_internalshell_stdio)\n self.main.add_dockwidget(self)\n\n profiler_act = create_action(self, _(\"Profile\"),\n icon=self.get_plugin_icon(),\n triggered=self.run_profiler)\n profiler_act.setEnabled(is_profiler_installed())\n self.register_shortcut(profiler_act, context=\"Profiler\",\n name=\"Run profiler\")\n \n self.main.run_menu_actions += [profiler_act]\n self.main.editor.pythonfile_dependent_actions += [profiler_act]\n\n def refresh_plugin(self):\n \"\"\"Refresh profiler widget\"\"\"\n #self.remove_obsolete_items() # FIXME: not implemented yet\n \n def closing_plugin(self, cancelable=False):\n \"\"\"Perform actions before parent main window is closed\"\"\"\n return True\n \n def apply_plugin_settings(self, options):\n \"\"\"Apply configuration file's plugin settings\"\"\"\n # The history depth option will be applied at \n # next Spyder startup, which is soon enough\n pass\n \n #------ Public API --------------------------------------------------------- \n def run_profiler(self):\n \"\"\"Run profiler\"\"\"\n if self.main.editor.save():\n self.analyze(self.main.editor.get_current_filename())\n\n def analyze(self, filename):\n \"\"\"Reimplement analyze method\"\"\"\n if self.dockwidget and not self.ismaximized:\n self.dockwidget.setVisible(True)\n self.dockwidget.setFocus()\n self.dockwidget.raise_()\n pythonpath = self.main.get_spyder_pythonpath()\n runconf = get_run_configuration(filename)\n wdir, args = None, []\n if runconf is not None:\n if runconf.wdir_enabled:\n wdir = runconf.wdir\n if runconf.args_enabled:\n args = runconf.args\n self.profiler.analyze(self, filename, wdir=wdir, args=args,\n pythonpath=pythonpath)\n",
"path": "spyder_profiler/profiler.py"
}
] | [
{
"content": "# -*- coding:utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# based on p_pylint.py by Pierre Raybaut\n#\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Profiler Plugin.\"\"\"\n\n# Standard library imports\nimport os.path as osp\n\n# Third party imports\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QGroupBox, QLabel, QVBoxLayout\n\n# Local imports\nfrom spyder.config.base import get_translation\nfrom spyder.api.plugins import SpyderPluginWidget\nfrom spyder.api.preferences import PluginConfigPage\nfrom spyder.plugins.runconfig import get_run_configuration\nfrom spyder.utils import icon_manager as ima\nfrom spyder.utils.qthelpers import create_action\nfrom .widgets.profilergui import (ProfilerWidget, is_profiler_installed)\n\n\n_ = get_translation(\"profiler\", \"spyder_profiler\")\n\n\nclass ProfilerConfigPage(PluginConfigPage):\n def setup_page(self):\n results_group = QGroupBox(_(\"Results\"))\n results_label1 = QLabel(_(\"Profiler plugin results \"\n \"(the output of python's profile/cProfile)\\n\"\n \"are stored here:\"))\n results_label1.setWordWrap(True)\n\n # Warning: do not try to regroup the following QLabel contents with\n # widgets above -- this string was isolated here in a single QLabel\n # on purpose: to fix Issue 863\n results_label2 = QLabel(ProfilerWidget.DATAPATH)\n\n results_label2.setTextInteractionFlags(Qt.TextSelectableByMouse)\n results_label2.setWordWrap(True)\n\n results_layout = QVBoxLayout()\n results_layout.addWidget(results_label1)\n results_layout.addWidget(results_label2)\n results_group.setLayout(results_layout)\n\n vlayout = QVBoxLayout()\n vlayout.addWidget(results_group)\n vlayout.addStretch(1)\n self.setLayout(vlayout)\n\n\nclass Profiler(SpyderPluginWidget):\n \"\"\"Profiler (after python's profile and pstats).\"\"\"\n\n CONF_SECTION = 'profiler'\n CONFIGWIDGET_CLASS = ProfilerConfigPage\n\n def __init__(self, parent=None):\n SpyderPluginWidget.__init__(self, parent)\n\n max_entries = self.get_option('max_entries', 50)\n self.profiler = ProfilerWidget(self, max_entries)\n\n layout = QVBoxLayout()\n layout.addWidget(self.profiler)\n self.setLayout(layout)\n\n # Initialize plugin\n self.initialize_plugin()\n \n #------ SpyderPluginWidget API --------------------------------------------- \n def get_plugin_title(self):\n \"\"\"Return widget title\"\"\"\n return _(\"Profiler\")\n\n def get_plugin_icon(self):\n \"\"\"Return widget icon\"\"\"\n path = osp.join(self.PLUGIN_PATH, self.IMG_PATH)\n return ima.icon('profiler', icon_path=path)\n\n def get_focus_widget(self):\n \"\"\"\n Return the widget to give focus to when\n this plugin's dockwidget is raised on top-level\n \"\"\"\n return self.profiler.datatree\n \n def get_plugin_actions(self):\n \"\"\"Return a list of actions related to plugin\"\"\"\n return []\n\n def on_first_registration(self):\n \"\"\"Action to be performed on first plugin registration\"\"\"\n self.main.tabify_plugins(self.main.help, self)\n self.dockwidget.hide()\n\n def register_plugin(self):\n \"\"\"Register plugin in Spyder's main window\"\"\"\n self.profiler.datatree.sig_edit_goto.connect(self.main.editor.load)\n self.profiler.redirect_stdio.connect(\n self.main.redirect_internalshell_stdio)\n self.main.add_dockwidget(self)\n\n profiler_act = create_action(self, _(\"Profile\"),\n icon=self.get_plugin_icon(),\n triggered=self.run_profiler)\n profiler_act.setEnabled(is_profiler_installed())\n self.register_shortcut(profiler_act, context=\"Profiler\",\n name=\"Run profiler\")\n \n self.main.run_menu_actions += [profiler_act]\n self.main.editor.pythonfile_dependent_actions += [profiler_act]\n\n def refresh_plugin(self):\n \"\"\"Refresh profiler widget\"\"\"\n #self.remove_obsolete_items() # FIXME: not implemented yet\n \n def closing_plugin(self, cancelable=False):\n \"\"\"Perform actions before parent main window is closed\"\"\"\n return True\n \n def apply_plugin_settings(self, options):\n \"\"\"Apply configuration file's plugin settings\"\"\"\n # The history depth option will be applied at \n # next Spyder startup, which is soon enough\n pass\n \n #------ Public API --------------------------------------------------------- \n def run_profiler(self):\n \"\"\"Run profiler\"\"\"\n if self.main.editor.save():\n self.analyze(self.main.editor.get_current_filename())\n\n def analyze(self, filename):\n \"\"\"Reimplement analyze method\"\"\"\n if self.dockwidget and not self.ismaximized:\n self.dockwidget.setVisible(True)\n self.dockwidget.setFocus()\n self.dockwidget.raise_()\n pythonpath = self.main.get_spyder_pythonpath()\n runconf = get_run_configuration(filename)\n wdir, args = None, []\n if runconf is not None:\n if runconf.wdir_enabled:\n wdir = runconf.wdir\n if runconf.args_enabled:\n args = runconf.args\n self.profiler.analyze(filename, wdir=wdir, args=args,\n pythonpath=pythonpath)\n",
"path": "spyder_profiler/profiler.py"
}
] | diff --git a/spyder_profiler/profiler.py b/spyder_profiler/profiler.py
index 9de18dab36c..5736ae9e5f7 100644
--- a/spyder_profiler/profiler.py
+++ b/spyder_profiler/profiler.py
@@ -151,5 +151,5 @@ def analyze(self, filename):
wdir = runconf.wdir
if runconf.args_enabled:
args = runconf.args
- self.profiler.analyze(self, filename, wdir=wdir, args=args,
+ self.profiler.analyze(filename, wdir=wdir, args=args,
pythonpath=pythonpath)
|
kymatio__kymatio-217 | `Scattering3D` crashes for `J != 2`
This was introduced by concatenation along the wrong axis in 36bfb11f29c0e7a3cddfc1eedc23ba84e9c17ea8. Instead of concatenating along axis 1 (that is, the spatial dimension), the concatenation should (of course) be along axis 2 (that is, the filter index/scale dimension).
It was not caught by the test suite since all tests have `J = 2`, which has the special property that the number of first-order scales (3) is equal to the number of second-order scales (2+1). We should modify the test suite so that this doesn't happen.
| [
{
"content": "# Authors: Louis Thiry, Georgios Exarchakis\n# Scientific Ancestry: Louis Thiry, Georgios Exarchakis, Matthew Hirn, Michael Eickenberg\n\n__all__ = ['Scattering3D']\n\nimport torch\nfrom .utils import compute_integrals, subsample\n \nfrom .backend import cdgmm3d, fft, complex_modulus, to_complex\nfrom .filter_bank import solid_harmonic_filter_bank, gaussian_filter_bank\n\n# TODO remove \"import backend\" below after implementing skcuda backend\nfrom kymatio.scattering3d import backend\n\n\nclass Scattering3D(object):\n \"\"\"3D Solid Harmonic scattering .\n\n This class implements solid harmonic scattering on an input 3D image.\n For details see https://arxiv.org/abs/1805.00571.\n\n Instantiates and initializes a 3d solid harmonic scattering object.\n\n Parameters\n ----------\n J: int\n number of scales\n shape: tuple of int\n shape (M, N, O) of the input signal\n L: int\n number of l values\n\n \"\"\"\n def __init__(self, J, shape, L, sigma_0):\n if backend.NAME == \"skcuda\":\n raise NotImplementedError(backend.skcuda_notimplementederror)\n super(Scattering3D, self).__init__()\n self.J = J\n self.shape = shape\n self.L = L\n self.sigma_0 = sigma_0\n\n self.build()\n\n def build(self):\n self.M, self.N, self.O = self.shape\n self.filters = solid_harmonic_filter_bank(\n self.M, self.N, self.O, self.J, self.L, self.sigma_0)\n self.gaussian_filters = gaussian_filter_bank(\n self.M, self.N, self.O, self.J + 1, self.sigma_0)\n\n def _fft_convolve(self, input_array, filter_array):\n \"\"\"\n Computes the fourier space convolution of the input_array, \n given in signal space, with a filter, given in fourier space.\n\n Parameters\n ----------\n\n input_array: torch tensor\n size (batchsize, M, N, O, 2)\n filter_array: torch tensor\n size (M, N, O, 2)\n\n Returns\n -------\n\n output: the result of the convolution of input_array with filter\n\n \"\"\"\n return fft(cdgmm3d(fft(input_array, inverse=False), filter_array), inverse=True)\n\n def _low_pass_filter(self, input_array, j):\n \"\"\"\n Computes the convolution of input_array with a lowpass filter phi_j\n\n Parameters\n ----------\n input_array : tensor\n size (batchsize, M, N, O, 2)\n\n j: int \n\n Returns\n -------\n output: the result of input_array :math:`\\\\star phi_J`\n\n \"\"\"\n cuda = isinstance(input_array, torch.cuda.FloatTensor)\n low_pass = self.gaussian_filters[j]\n if cuda:\n low_pass = low_pass.cuda()\n return self._fft_convolve(input_array, low_pass)\n\n def _compute_standard_scattering_coefs(self, input_array):\n \"\"\"\n Computes the convolution of input_array with a lowpass filter phi_J \n and downsamples by a factor J.\n\n Parameters\n ----------\n input_array: torch tensor of size (batchsize, M, N, O, 2)\n\n Returns\n -------\n output: the result of input_array \\\\star phi_J downsampled by a factor J\n\n \"\"\"\n convolved_input = self._low_pass_filter(input_array, self.J)\n return subsample(convolved_input, self.J).view(\n input_array.size(0), -1, 1)\n\n def _compute_local_scattering_coefs(self, input_array, points, j):\n \"\"\"\n Computes the convolution of input_array with a lowpass filter phi_j and \n and returns the value of the output at particular points\n\n Parameters\n ----------\n input_array: torch tensor\n size (batchsize, M, N, O, 2)\n points: torch tensor\n size (batchsize, number of points, 3)\n j: int\n the lowpass scale j of phi_j\n\n Returns\n -------\n output: torch tensor of size (batchsize, number of points, 1) with\n the values of the lowpass filtered moduli at the points given.\n\n \"\"\"\n local_coefs = torch.zeros(input_array.size(0), points.size(1), 1)\n convolved_input = self._low_pass_filter(input_array, j+1)\n for i in range(input_array.size(0)):\n for j in range(points[i].size(0)):\n x, y, z = points[i, j, 0], points[i, j, 1], points[i, j, 2]\n local_coefs[i, j, 0] = convolved_input[\n i, int(x), int(y), int(z), 0]\n return local_coefs\n\n def _compute_scattering_coefs(self, input_array, method, args, j):\n \"\"\"\n Computes the scattering coefficients out with any of the three methods \n 'standard', 'local', 'integral'\n\n Parameters\n ----------\n input_array : torch tensor\n size (batchsize, M, N, O, 2)\n method : string\n method name with three possible values (\"standard\", \"local\", \"integral\")\n args : dict\n method specific arguments. It methods is equal to \"standard\", then one\n expects the array args['integral_powers'] to be a list that holds\n the exponents the moduli. It should be raised to before calculating\n the integrals. If method is equal to \"local\", args['points'] must contain\n a torch tensor of size (batchsize, number of points, 3) the points in\n coordinate space at which you want the moduli sampled\n j : int\n lowpass scale j of :math:`\\\\phi_j`\n\n Returns\n -------\n output: torch tensor \n The scattering coefficients as given by different methods.\n\n \"\"\"\n methods = ['standard', 'local', 'integral']\n if (not method in methods):\n raise(ValueError('method must be in {}'.format(methods)))\n if method == 'integral':\n return compute_integrals(input_array[..., 0],\n args['integral_powers'])\n elif method == 'local':\n return self._compute_local_scattering_coefs(input_array,\n args['points'], j)\n elif method == 'standard':\n return self._compute_standard_scattering_coefs(input_array)\n\n def _rotation_covariant_convolution_and_modulus(self, input_array, l, j):\n \"\"\"\n Computes the convolution with a set of solid harmonics of scale j and \n degree l and returns the square root of their squared sum over m\n\n Parameters\n ----------\n input_array : tensor\n size (batchsize, M, N, O, 2)\n l : int\n solid harmonic degree l\n\n j : int\n solid harmonic scale j\n\n Returns\n -------\n\n output : torch tensor\n tensor of the same size as input_array. It holds the output of\n the operation::\n\n .. math:: \\\\sqrt{\\\\sum_m (\\\\text{input}_\\\\text{array} \\\\star \\\\psi_{j,l,m})^2)}\n\n which is covariant to 3D translations and rotations\n\n \"\"\"\n cuda = input_array.is_cuda\n filters_l_j = self.filters[l][j]\n if cuda:\n filters_l_j = filters_l_j.cuda()\n convolution_modulus = input_array.new(input_array.size()).fill_(0)\n for m in range(filters_l_j.size(0)):\n convolution_modulus[..., 0] += (self._fft_convolve(\n input_array, filters_l_j[m]) ** 2).sum(-1)\n return torch.sqrt(convolution_modulus)\n\n def _convolution_and_modulus(self, input_array, l, j, m=0):\n \"\"\"\n Computes the convolution with a set of solid harmonics of scale j and \n degree l and returns the square root of their squared sum over m\n\n Parameters\n ----------\n input_array: torch tensor\n size (batchsize, M, N, O, 2)\n l : int\n solid harmonic degree l\n j : int\n solid harmonic scale j\n m : int, optional\n solid harmonic rank m (defaults to 0)\n\n Returns\n -------\n output: torch tensor \n tensor of the same size as input_array. It holds the output of the\n operation::\n\n .. math:: \\\\text{input}_\\\\text{array} \\\\star \\\\psi_{j,l,m})\n\n \"\"\"\n cuda = isinstance(input_array, torch.cuda.FloatTensor)\n filters_l_m_j = self.filters[l][j][m]\n if cuda:\n filters_l_m_j = filters_l_m_j.cuda()\n return complex_modulus(self._fft_convolve(input_array, filters_l_m_j))\n\n def _check_input(self, input_array):\n if not torch.is_tensor(input_array):\n raise(TypeError(\n 'The input should be a torch.cuda.FloatTensor, '\n 'a torch.FloatTensor or a torch.DoubleTensor'))\n\n if (not input_array.is_contiguous()):\n input_array = input_array.contiguous()\n\n if((input_array.size(-1) != self.O or input_array.size(-2) != self.N \n or input_array.size(-3) != self.M)):\n raise (RuntimeError(\n 'Tensor must be of spatial size (%i,%i,%i)!' % (\n self.M, self.N, self.O)))\n\n if (input_array.dim() != 4):\n raise (RuntimeError('Input tensor must be 4D'))\n\n def forward(self, input_array, order_2=True, rotation_covariant=True,\n method='standard', points=None, integral_powers=(.5, 1., 2.)):\n \"\"\"\n The forward pass of 3D solid harmonic scattering\n\n Parameters\n ----------\n input_array: torch tensor \n input of size (batchsize, M, N, O)\n order_2: bool, optional\n if set to False|True it also excludes|includes second order\n scattering coefficients (default: True).\n rotation_covariant: bool, optional\n if set to True the first order moduli take the form::\n\n .. math:: \\\\sqrt(\\\\sum_m (input_array \\\\star \\\\psi_{j,l,m})^2))\n\n if set to False the first order moduli take the form::\n\n .. math:: input_array \\\\star \\\\psi_{j,l,m})\n\n The second order moduli change analogously\n Defaut: True\n method: string, optional\n specifies the method for obtaining scattering coefficients\n (\"standard\",\"local\",\"integral\"). Default: \"standard\"\n points: array-like, optional\n List of locations in which to sample wavelet moduli. Used when\n method == 'local'\n\n integral_powers: array-like\n List of exponents to the power of which moduli are raised before\n integration. Used with method == 'standard', method == 'integral'\n\n Returns\n -------\n output: tuple | torch tensor\n if order_2 is false it returns a torch tensor with the\n first order scattering coefficients\n if order_2 is true it returns a tuple with two elements,\n the first and second order scattering coefficients\n\n \"\"\"\n self._check_input(input_array)\n if rotation_covariant:\n convolution_and_modulus = (\n self._rotation_covariant_convolution_and_modulus)\n else:\n convolution_and_modulus = self._convolution_and_modulus\n\n compute_scattering_coefs = self._compute_scattering_coefs\n\n s_order_1 = []\n s_order_2 = []\n _input = to_complex(input_array)\n\n method_args = dict(points=points, integral_powers=integral_powers)\n\n for l in range(self.L+1):\n s_order_1_l, s_order_2_l = [], []\n for j_1 in range(self.J+1):\n conv_modulus = convolution_and_modulus(_input, l, j_1)\n s_order_1_l.append(compute_scattering_coefs(\n conv_modulus, method, method_args, j_1))\n if not order_2:\n continue\n for j_2 in range(j_1+1, self.J+1):\n conv_modulus_2 = convolution_and_modulus(\n conv_modulus, l, j_2)\n s_order_2_l.append(compute_scattering_coefs(\n conv_modulus_2, method, method_args, j_2))\n s_order_1.append(torch.cat(s_order_1_l, -1))\n if order_2:\n s_order_2.append(torch.cat(s_order_2_l, -1))\n\n if order_2:\n return torch.cat(\n [torch.stack(s_order_1, dim=-1),\n torch.stack(s_order_2, dim=-1)], 1)\n else:\n return torch.stack(s_order_1, dim=-1)\n\n\n __call__ = forward\n\n",
"path": "kymatio/scattering3d/scattering3d.py"
}
] | [
{
"content": "# Authors: Louis Thiry, Georgios Exarchakis\n# Scientific Ancestry: Louis Thiry, Georgios Exarchakis, Matthew Hirn, Michael Eickenberg\n\n__all__ = ['Scattering3D']\n\nimport torch\nfrom .utils import compute_integrals, subsample\n \nfrom .backend import cdgmm3d, fft, complex_modulus, to_complex\nfrom .filter_bank import solid_harmonic_filter_bank, gaussian_filter_bank\n\n# TODO remove \"import backend\" below after implementing skcuda backend\nfrom kymatio.scattering3d import backend\n\n\nclass Scattering3D(object):\n \"\"\"3D Solid Harmonic scattering .\n\n This class implements solid harmonic scattering on an input 3D image.\n For details see https://arxiv.org/abs/1805.00571.\n\n Instantiates and initializes a 3d solid harmonic scattering object.\n\n Parameters\n ----------\n J: int\n number of scales\n shape: tuple of int\n shape (M, N, O) of the input signal\n L: int\n number of l values\n\n \"\"\"\n def __init__(self, J, shape, L, sigma_0):\n if backend.NAME == \"skcuda\":\n raise NotImplementedError(backend.skcuda_notimplementederror)\n super(Scattering3D, self).__init__()\n self.J = J\n self.shape = shape\n self.L = L\n self.sigma_0 = sigma_0\n\n self.build()\n\n def build(self):\n self.M, self.N, self.O = self.shape\n self.filters = solid_harmonic_filter_bank(\n self.M, self.N, self.O, self.J, self.L, self.sigma_0)\n self.gaussian_filters = gaussian_filter_bank(\n self.M, self.N, self.O, self.J + 1, self.sigma_0)\n\n def _fft_convolve(self, input_array, filter_array):\n \"\"\"\n Computes the fourier space convolution of the input_array, \n given in signal space, with a filter, given in fourier space.\n\n Parameters\n ----------\n\n input_array: torch tensor\n size (batchsize, M, N, O, 2)\n filter_array: torch tensor\n size (M, N, O, 2)\n\n Returns\n -------\n\n output: the result of the convolution of input_array with filter\n\n \"\"\"\n return fft(cdgmm3d(fft(input_array, inverse=False), filter_array), inverse=True)\n\n def _low_pass_filter(self, input_array, j):\n \"\"\"\n Computes the convolution of input_array with a lowpass filter phi_j\n\n Parameters\n ----------\n input_array : tensor\n size (batchsize, M, N, O, 2)\n\n j: int \n\n Returns\n -------\n output: the result of input_array :math:`\\\\star phi_J`\n\n \"\"\"\n cuda = isinstance(input_array, torch.cuda.FloatTensor)\n low_pass = self.gaussian_filters[j]\n if cuda:\n low_pass = low_pass.cuda()\n return self._fft_convolve(input_array, low_pass)\n\n def _compute_standard_scattering_coefs(self, input_array):\n \"\"\"\n Computes the convolution of input_array with a lowpass filter phi_J \n and downsamples by a factor J.\n\n Parameters\n ----------\n input_array: torch tensor of size (batchsize, M, N, O, 2)\n\n Returns\n -------\n output: the result of input_array \\\\star phi_J downsampled by a factor J\n\n \"\"\"\n convolved_input = self._low_pass_filter(input_array, self.J)\n return subsample(convolved_input, self.J).view(\n input_array.size(0), -1, 1)\n\n def _compute_local_scattering_coefs(self, input_array, points, j):\n \"\"\"\n Computes the convolution of input_array with a lowpass filter phi_j and \n and returns the value of the output at particular points\n\n Parameters\n ----------\n input_array: torch tensor\n size (batchsize, M, N, O, 2)\n points: torch tensor\n size (batchsize, number of points, 3)\n j: int\n the lowpass scale j of phi_j\n\n Returns\n -------\n output: torch tensor of size (batchsize, number of points, 1) with\n the values of the lowpass filtered moduli at the points given.\n\n \"\"\"\n local_coefs = torch.zeros(input_array.size(0), points.size(1), 1)\n convolved_input = self._low_pass_filter(input_array, j+1)\n for i in range(input_array.size(0)):\n for j in range(points[i].size(0)):\n x, y, z = points[i, j, 0], points[i, j, 1], points[i, j, 2]\n local_coefs[i, j, 0] = convolved_input[\n i, int(x), int(y), int(z), 0]\n return local_coefs\n\n def _compute_scattering_coefs(self, input_array, method, args, j):\n \"\"\"\n Computes the scattering coefficients out with any of the three methods \n 'standard', 'local', 'integral'\n\n Parameters\n ----------\n input_array : torch tensor\n size (batchsize, M, N, O, 2)\n method : string\n method name with three possible values (\"standard\", \"local\", \"integral\")\n args : dict\n method specific arguments. It methods is equal to \"standard\", then one\n expects the array args['integral_powers'] to be a list that holds\n the exponents the moduli. It should be raised to before calculating\n the integrals. If method is equal to \"local\", args['points'] must contain\n a torch tensor of size (batchsize, number of points, 3) the points in\n coordinate space at which you want the moduli sampled\n j : int\n lowpass scale j of :math:`\\\\phi_j`\n\n Returns\n -------\n output: torch tensor \n The scattering coefficients as given by different methods.\n\n \"\"\"\n methods = ['standard', 'local', 'integral']\n if (not method in methods):\n raise(ValueError('method must be in {}'.format(methods)))\n if method == 'integral':\n return compute_integrals(input_array[..., 0],\n args['integral_powers'])\n elif method == 'local':\n return self._compute_local_scattering_coefs(input_array,\n args['points'], j)\n elif method == 'standard':\n return self._compute_standard_scattering_coefs(input_array)\n\n def _rotation_covariant_convolution_and_modulus(self, input_array, l, j):\n \"\"\"\n Computes the convolution with a set of solid harmonics of scale j and \n degree l and returns the square root of their squared sum over m\n\n Parameters\n ----------\n input_array : tensor\n size (batchsize, M, N, O, 2)\n l : int\n solid harmonic degree l\n\n j : int\n solid harmonic scale j\n\n Returns\n -------\n\n output : torch tensor\n tensor of the same size as input_array. It holds the output of\n the operation::\n\n .. math:: \\\\sqrt{\\\\sum_m (\\\\text{input}_\\\\text{array} \\\\star \\\\psi_{j,l,m})^2)}\n\n which is covariant to 3D translations and rotations\n\n \"\"\"\n cuda = input_array.is_cuda\n filters_l_j = self.filters[l][j]\n if cuda:\n filters_l_j = filters_l_j.cuda()\n convolution_modulus = input_array.new(input_array.size()).fill_(0)\n for m in range(filters_l_j.size(0)):\n convolution_modulus[..., 0] += (self._fft_convolve(\n input_array, filters_l_j[m]) ** 2).sum(-1)\n return torch.sqrt(convolution_modulus)\n\n def _convolution_and_modulus(self, input_array, l, j, m=0):\n \"\"\"\n Computes the convolution with a set of solid harmonics of scale j and \n degree l and returns the square root of their squared sum over m\n\n Parameters\n ----------\n input_array: torch tensor\n size (batchsize, M, N, O, 2)\n l : int\n solid harmonic degree l\n j : int\n solid harmonic scale j\n m : int, optional\n solid harmonic rank m (defaults to 0)\n\n Returns\n -------\n output: torch tensor \n tensor of the same size as input_array. It holds the output of the\n operation::\n\n .. math:: \\\\text{input}_\\\\text{array} \\\\star \\\\psi_{j,l,m})\n\n \"\"\"\n cuda = isinstance(input_array, torch.cuda.FloatTensor)\n filters_l_m_j = self.filters[l][j][m]\n if cuda:\n filters_l_m_j = filters_l_m_j.cuda()\n return complex_modulus(self._fft_convolve(input_array, filters_l_m_j))\n\n def _check_input(self, input_array):\n if not torch.is_tensor(input_array):\n raise(TypeError(\n 'The input should be a torch.cuda.FloatTensor, '\n 'a torch.FloatTensor or a torch.DoubleTensor'))\n\n if (not input_array.is_contiguous()):\n input_array = input_array.contiguous()\n\n if((input_array.size(-1) != self.O or input_array.size(-2) != self.N \n or input_array.size(-3) != self.M)):\n raise (RuntimeError(\n 'Tensor must be of spatial size (%i,%i,%i)!' % (\n self.M, self.N, self.O)))\n\n if (input_array.dim() != 4):\n raise (RuntimeError('Input tensor must be 4D'))\n\n def forward(self, input_array, order_2=True, rotation_covariant=True,\n method='standard', points=None, integral_powers=(.5, 1., 2.)):\n \"\"\"\n The forward pass of 3D solid harmonic scattering\n\n Parameters\n ----------\n input_array: torch tensor \n input of size (batchsize, M, N, O)\n order_2: bool, optional\n if set to False|True it also excludes|includes second order\n scattering coefficients (default: True).\n rotation_covariant: bool, optional\n if set to True the first order moduli take the form::\n\n .. math:: \\\\sqrt(\\\\sum_m (input_array \\\\star \\\\psi_{j,l,m})^2))\n\n if set to False the first order moduli take the form::\n\n .. math:: input_array \\\\star \\\\psi_{j,l,m})\n\n The second order moduli change analogously\n Defaut: True\n method: string, optional\n specifies the method for obtaining scattering coefficients\n (\"standard\",\"local\",\"integral\"). Default: \"standard\"\n points: array-like, optional\n List of locations in which to sample wavelet moduli. Used when\n method == 'local'\n\n integral_powers: array-like\n List of exponents to the power of which moduli are raised before\n integration. Used with method == 'standard', method == 'integral'\n\n Returns\n -------\n output: tuple | torch tensor\n if order_2 is false it returns a torch tensor with the\n first order scattering coefficients\n if order_2 is true it returns a tuple with two elements,\n the first and second order scattering coefficients\n\n \"\"\"\n self._check_input(input_array)\n if rotation_covariant:\n convolution_and_modulus = (\n self._rotation_covariant_convolution_and_modulus)\n else:\n convolution_and_modulus = self._convolution_and_modulus\n\n compute_scattering_coefs = self._compute_scattering_coefs\n\n s_order_1 = []\n s_order_2 = []\n _input = to_complex(input_array)\n\n method_args = dict(points=points, integral_powers=integral_powers)\n\n for l in range(self.L+1):\n s_order_1_l, s_order_2_l = [], []\n for j_1 in range(self.J+1):\n conv_modulus = convolution_and_modulus(_input, l, j_1)\n s_order_1_l.append(compute_scattering_coefs(\n conv_modulus, method, method_args, j_1))\n if not order_2:\n continue\n for j_2 in range(j_1+1, self.J+1):\n conv_modulus_2 = convolution_and_modulus(\n conv_modulus, l, j_2)\n s_order_2_l.append(compute_scattering_coefs(\n conv_modulus_2, method, method_args, j_2))\n s_order_1.append(torch.cat(s_order_1_l, -1))\n if order_2:\n s_order_2.append(torch.cat(s_order_2_l, -1))\n\n if order_2:\n return torch.cat(\n [torch.stack(s_order_1, dim=-1),\n torch.stack(s_order_2, dim=-1)], -2)\n else:\n return torch.stack(s_order_1, dim=-1)\n\n\n __call__ = forward\n\n",
"path": "kymatio/scattering3d/scattering3d.py"
}
] | diff --git a/kymatio/scattering3d/scattering3d.py b/kymatio/scattering3d/scattering3d.py
index 2a7349b30..1e9f635bf 100644
--- a/kymatio/scattering3d/scattering3d.py
+++ b/kymatio/scattering3d/scattering3d.py
@@ -342,7 +342,7 @@ def forward(self, input_array, order_2=True, rotation_covariant=True,
if order_2:
return torch.cat(
[torch.stack(s_order_1, dim=-1),
- torch.stack(s_order_2, dim=-1)], 1)
+ torch.stack(s_order_2, dim=-1)], -2)
else:
return torch.stack(s_order_1, dim=-1)
diff --git a/kymatio/scattering3d/tests/test_scattering3d.py b/kymatio/scattering3d/tests/test_scattering3d.py
index d7f5e8979..1ce8c8d00 100644
--- a/kymatio/scattering3d/tests/test_scattering3d.py
+++ b/kymatio/scattering3d/tests/test_scattering3d.py
@@ -78,6 +78,19 @@ def test_against_standard_computations():
x, order_2=True, method='integral',
integral_powers=integral_powers)
+ # WARNING: These are hard-coded values for the setting J = 2.
+ n_order_1 = 3
+ n_order_2 = 3
+
+ # Extract orders and make order axis the slowest in accordance with
+ # the stored reference scattering transform.
+ order_1 = orders_1_and_2[:,:,0:n_order_1,:]
+ order_2 = orders_1_and_2[:,:,n_order_1:n_order_1+n_order_2,:]
+
+ order_1 = order_1.reshape((batch_size, -1))
+ order_2 = order_2.reshape((batch_size, -1))
+
+ orders_1_and_2 = torch.cat((order_1, order_2), 1)
order_0 = order_0.cpu().numpy().reshape((batch_size, -1))
start = 0
@@ -122,3 +135,20 @@ def test_solid_harmonic_scattering():
for l in range(1, L+1):
err = torch.abs(s[0, 0, j, l] - k ** l).sum()/(1e-6+s[0, 0, j, l].abs().sum())
assert err<1e-4
+
+def test_larger_scales():
+ if backend.NAME == "skcuda":
+ warnings.warn(("The skcuda backend is not yet implemented for 3D "
+ "scattering, but that's ok (for now)."), RuntimeWarning,
+ stacklevel=2)
+ return
+
+ shape = (32, 32, 32)
+ L = 3
+ sigma_0 = 1
+
+ x = torch.randn((1,) + shape)
+
+ for J in range(3, 4+1):
+ scattering = Scattering3D(J=J, shape=shape, L=L, sigma_0=sigma_0)
+ Sx = scattering(x, method='integral')
|
paperless-ngx__paperless-ngx-6303 | [BUG] At Splitting: Custom field leads to "The following error occurred while storing document 15_2-2.pdf after parsing: CustomField matching query does not exist."
### Description
Document with custom field leads to error message on splittting. No workflow active.
"The following error occurred while storing document 15_2-2.pdf after parsing: CustomField matching query does not exist."
### Steps to reproduce
1. Go to Ducuments
2. Edit document
3. Splitting into separate Pages
4. Error message appears
### Webserver logs
```bash
[2024-04-06 11:24:55,324] [DEBUG] [paperless.tasks] Skipping plugin CollatePlugin
[2024-04-06 11:24:55,325] [DEBUG] [paperless.tasks] Executing plugin BarcodePlugin
[2024-04-06 11:24:55,325] [DEBUG] [paperless.barcodes] Scanning for barcodes using PYZBAR
[2024-04-06 11:24:55,542] [INFO] [paperless.tasks] BarcodePlugin completed with no message
[2024-04-06 11:24:55,545] [DEBUG] [paperless.tasks] Executing plugin WorkflowTriggerPlugin
[2024-04-06 11:24:55,547] [INFO] [paperless.tasks] WorkflowTriggerPlugin completed with:
[2024-04-06 11:24:55,554] [INFO] [paperless.consumer] Consuming 15_2-2.pdf
[2024-04-06 11:24:55,556] [DEBUG] [paperless.consumer] Detected mime type: application/pdf
[2024-04-06 11:24:55,562] [DEBUG] [paperless.consumer] Parser: RasterisedDocumentParser
[2024-04-06 11:24:55,565] [DEBUG] [paperless.consumer] Parsing 15_2-2.pdf...
[2024-04-06 11:24:55,572] [INFO] [paperless.parsing.tesseract] pdftotext exited 0
[2024-04-06 11:24:55,711] [DEBUG] [paperless.parsing.tesseract] Calling OCRmyPDF with args: {'input_file': PosixPath('/tmp/paperless/paperless-ngx2bzkbofi/15_2-2.pdf'), 'output_file': PosixPath('/tmp/paperless/paperless-6d3y7xqr/archive.pdf'), 'use_threads': True, 'jobs': 4, 'language': 'deu+eng', 'output_type': 'pdfa', 'progress_bar': False, 'color_conversion_strategy': 'RGB', 'skip_text': True, 'clean': True, 'deskew': True, 'rotate_pages': True, 'rotate_pages_threshold': 6.0, 'sidecar': PosixPath('/tmp/paperless/paperless-6d3y7xqr/sidecar.txt')}
[2024-04-06 11:24:55,809] [INFO] [ocrmypdf._pipeline] skipping all processing on this page
[2024-04-06 11:24:55,812] [INFO] [ocrmypdf._pipelines.ocr] Postprocessing...
[2024-04-06 11:24:55,916] [INFO] [ocrmypdf._pipeline] Image optimization ratio: 1.00 savings: 0.0%
[2024-04-06 11:24:55,917] [INFO] [ocrmypdf._pipeline] Total file size ratio: 0.72 savings: -39.6%
[2024-04-06 11:24:55,918] [INFO] [ocrmypdf._pipelines._common] Output file is a PDF/A-2B (as expected)
[2024-04-06 11:24:55,922] [DEBUG] [paperless.parsing.tesseract] Incomplete sidecar file: discarding.
[2024-04-06 11:24:55,946] [INFO] [paperless.parsing.tesseract] pdftotext exited 0
[2024-04-06 11:24:55,946] [DEBUG] [paperless.consumer] Generating thumbnail for 15_2-2.pdf...
[2024-04-06 11:24:55,950] [DEBUG] [paperless.parsing] Execute: convert -density 300 -scale 500x5000> -alpha remove -strip -auto-orient -define pdf:use-cropbox=true /tmp/paperless/paperless-6d3y7xqr/archive.pdf[0] /tmp/paperless/paperless-6d3y7xqr/convert.webp
[2024-04-06 11:24:56,727] [INFO] [paperless.parsing] convert exited 0
[2024-04-06 11:24:56,797] [DEBUG] [paperless.consumer] Saving record to database
[2024-04-06 11:24:56,798] [DEBUG] [paperless.consumer] Creation date from st_mtime: 2024-04-06 11:24:52.835078+02:00
[2024-04-06 11:24:56,831] [ERROR] [paperless.consumer] The following error occurred while storing document 15_2-2.pdf after parsing: CustomField matching query does not exist.
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 327, in main_wrap
raise exc_info[1]
File "/usr/src/paperless/src/documents/consumer.py", line 675, in try_consume_file
document = self._store(text=text, date=date, mime_type=mime_type)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/src/paperless/src/documents/consumer.py", line 851, in _store
self.apply_overrides(document)
File "/usr/src/paperless/src/documents/consumer.py", line 905, in apply_overrides
field = CustomField.objects.get(pk=field_id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/manager.py", line 87, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/query.py", line 637, in get
raise self.model.DoesNotExist(
documents.models.CustomField.DoesNotExist: CustomField matching query does not exist.
[2024-04-06 11:24:56,833] [DEBUG] [paperless.parsing.tesseract] Deleting directory /tmp/paperless/paperless-6d3y7xqr
```
### Browser logs
_No response_
### Paperless-ngx version
2.7.0
### Host OS
Docker on Synology NAS - DSM 7.2
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
_No response_
### Other
_No response_
### Please confirm the following
- [X] I believe this issue is a bug that affects all users of Paperless-ngx, not something specific to my installation.
- [X] I have already searched for relevant existing issues and discussions before opening this report.
- [X] I have updated the title field above with a concise description.
| [
{
"content": "import dataclasses\nimport datetime\nfrom enum import IntEnum\nfrom pathlib import Path\nfrom typing import Optional\n\nimport magic\nfrom guardian.shortcuts import get_groups_with_perms\nfrom guardian.shortcuts import get_users_with_perms\n\n\[email protected]\nclass DocumentMetadataOverrides:\n \"\"\"\n Manages overrides for document fields which normally would\n be set from content or matching. All fields default to None,\n meaning no override is happening\n \"\"\"\n\n filename: Optional[str] = None\n title: Optional[str] = None\n correspondent_id: Optional[int] = None\n document_type_id: Optional[int] = None\n tag_ids: Optional[list[int]] = None\n storage_path_id: Optional[int] = None\n created: Optional[datetime.datetime] = None\n asn: Optional[int] = None\n owner_id: Optional[int] = None\n view_users: Optional[list[int]] = None\n view_groups: Optional[list[int]] = None\n change_users: Optional[list[int]] = None\n change_groups: Optional[list[int]] = None\n custom_field_ids: Optional[list[int]] = None\n\n def update(self, other: \"DocumentMetadataOverrides\") -> \"DocumentMetadataOverrides\":\n \"\"\"\n Merges two DocumentMetadataOverrides objects such that object B's overrides\n are applied to object A or merged if multiple are accepted.\n\n The update is an in-place modification of self\n \"\"\"\n # only if empty\n if other.title is not None:\n self.title = other.title\n if other.correspondent_id is not None:\n self.correspondent_id = other.correspondent_id\n if other.document_type_id is not None:\n self.document_type_id = other.document_type_id\n if other.storage_path_id is not None:\n self.storage_path_id = other.storage_path_id\n if other.owner_id is not None:\n self.owner_id = other.owner_id\n\n # merge\n if self.tag_ids is None:\n self.tag_ids = other.tag_ids\n elif other.tag_ids is not None:\n self.tag_ids.extend(other.tag_ids)\n self.tag_ids = list(set(self.tag_ids))\n\n if self.view_users is None:\n self.view_users = other.view_users\n elif other.view_users is not None:\n self.view_users.extend(other.view_users)\n self.view_users = list(set(self.view_users))\n\n if self.view_groups is None:\n self.view_groups = other.view_groups\n elif other.view_groups is not None:\n self.view_groups.extend(other.view_groups)\n self.view_groups = list(set(self.view_groups))\n\n if self.change_users is None:\n self.change_users = other.change_users\n elif other.change_users is not None:\n self.change_users.extend(other.change_users)\n self.change_users = list(set(self.change_users))\n\n if self.change_groups is None:\n self.change_groups = other.change_groups\n elif other.change_groups is not None:\n self.change_groups.extend(other.change_groups)\n self.change_groups = list(set(self.change_groups))\n\n if self.custom_field_ids is None:\n self.custom_field_ids = other.custom_field_ids\n elif other.custom_field_ids is not None:\n self.custom_field_ids.extend(other.custom_field_ids)\n self.custom_field_ids = list(set(self.custom_field_ids))\n\n return self\n\n @staticmethod\n def from_document(doc) -> \"DocumentMetadataOverrides\":\n \"\"\"\n Fills in the overrides from a document object\n \"\"\"\n overrides = DocumentMetadataOverrides()\n overrides.title = doc.title\n overrides.correspondent_id = doc.correspondent.id if doc.correspondent else None\n overrides.document_type_id = doc.document_type.id if doc.document_type else None\n overrides.storage_path_id = doc.storage_path.id if doc.storage_path else None\n overrides.owner_id = doc.owner.id if doc.owner else None\n overrides.tag_ids = list(doc.tags.values_list(\"id\", flat=True))\n\n overrides.view_users = list(\n get_users_with_perms(\n doc,\n only_with_perms_in=[\"view_document\"],\n ).values_list(\"id\", flat=True),\n )\n overrides.change_users = list(\n get_users_with_perms(\n doc,\n only_with_perms_in=[\"change_document\"],\n ).values_list(\"id\", flat=True),\n )\n overrides.custom_field_ids = list(\n doc.custom_fields.values_list(\"id\", flat=True),\n )\n\n groups_with_perms = get_groups_with_perms(\n doc,\n attach_perms=True,\n )\n overrides.view_groups = [\n group.id\n for group in groups_with_perms\n if \"view_document\" in groups_with_perms[group]\n ]\n overrides.change_groups = [\n group.id\n for group in groups_with_perms\n if \"change_document\" in groups_with_perms[group]\n ]\n\n return overrides\n\n\nclass DocumentSource(IntEnum):\n \"\"\"\n The source of an incoming document. May have other uses in the future\n \"\"\"\n\n ConsumeFolder = 1\n ApiUpload = 2\n MailFetch = 3\n\n\[email protected]\nclass ConsumableDocument:\n \"\"\"\n Encapsulates an incoming document, either from consume folder, API upload\n or mail fetching and certain useful operations on it.\n \"\"\"\n\n source: DocumentSource\n original_file: Path\n mailrule_id: Optional[int] = None\n mime_type: str = dataclasses.field(init=False, default=None)\n\n def __post_init__(self):\n \"\"\"\n After a dataclass is initialized, this is called to finalize some data\n 1. Make sure the original path is an absolute, fully qualified path\n 2. Get the mime type of the file\n \"\"\"\n # Always fully qualify the path first thing\n # Just in case, convert to a path if it's a str\n self.original_file = Path(self.original_file).resolve()\n\n # Get the file type once at init\n # Note this function isn't called when the object is unpickled\n self.mime_type = magic.from_file(self.original_file, mime=True)\n",
"path": "src/documents/data_models.py"
}
] | [
{
"content": "import dataclasses\nimport datetime\nfrom enum import IntEnum\nfrom pathlib import Path\nfrom typing import Optional\n\nimport magic\nfrom guardian.shortcuts import get_groups_with_perms\nfrom guardian.shortcuts import get_users_with_perms\n\n\[email protected]\nclass DocumentMetadataOverrides:\n \"\"\"\n Manages overrides for document fields which normally would\n be set from content or matching. All fields default to None,\n meaning no override is happening\n \"\"\"\n\n filename: Optional[str] = None\n title: Optional[str] = None\n correspondent_id: Optional[int] = None\n document_type_id: Optional[int] = None\n tag_ids: Optional[list[int]] = None\n storage_path_id: Optional[int] = None\n created: Optional[datetime.datetime] = None\n asn: Optional[int] = None\n owner_id: Optional[int] = None\n view_users: Optional[list[int]] = None\n view_groups: Optional[list[int]] = None\n change_users: Optional[list[int]] = None\n change_groups: Optional[list[int]] = None\n custom_field_ids: Optional[list[int]] = None\n\n def update(self, other: \"DocumentMetadataOverrides\") -> \"DocumentMetadataOverrides\":\n \"\"\"\n Merges two DocumentMetadataOverrides objects such that object B's overrides\n are applied to object A or merged if multiple are accepted.\n\n The update is an in-place modification of self\n \"\"\"\n # only if empty\n if other.title is not None:\n self.title = other.title\n if other.correspondent_id is not None:\n self.correspondent_id = other.correspondent_id\n if other.document_type_id is not None:\n self.document_type_id = other.document_type_id\n if other.storage_path_id is not None:\n self.storage_path_id = other.storage_path_id\n if other.owner_id is not None:\n self.owner_id = other.owner_id\n\n # merge\n if self.tag_ids is None:\n self.tag_ids = other.tag_ids\n elif other.tag_ids is not None:\n self.tag_ids.extend(other.tag_ids)\n self.tag_ids = list(set(self.tag_ids))\n\n if self.view_users is None:\n self.view_users = other.view_users\n elif other.view_users is not None:\n self.view_users.extend(other.view_users)\n self.view_users = list(set(self.view_users))\n\n if self.view_groups is None:\n self.view_groups = other.view_groups\n elif other.view_groups is not None:\n self.view_groups.extend(other.view_groups)\n self.view_groups = list(set(self.view_groups))\n\n if self.change_users is None:\n self.change_users = other.change_users\n elif other.change_users is not None:\n self.change_users.extend(other.change_users)\n self.change_users = list(set(self.change_users))\n\n if self.change_groups is None:\n self.change_groups = other.change_groups\n elif other.change_groups is not None:\n self.change_groups.extend(other.change_groups)\n self.change_groups = list(set(self.change_groups))\n\n if self.custom_field_ids is None:\n self.custom_field_ids = other.custom_field_ids\n elif other.custom_field_ids is not None:\n self.custom_field_ids.extend(other.custom_field_ids)\n self.custom_field_ids = list(set(self.custom_field_ids))\n\n return self\n\n @staticmethod\n def from_document(doc) -> \"DocumentMetadataOverrides\":\n \"\"\"\n Fills in the overrides from a document object\n \"\"\"\n overrides = DocumentMetadataOverrides()\n overrides.title = doc.title\n overrides.correspondent_id = doc.correspondent.id if doc.correspondent else None\n overrides.document_type_id = doc.document_type.id if doc.document_type else None\n overrides.storage_path_id = doc.storage_path.id if doc.storage_path else None\n overrides.owner_id = doc.owner.id if doc.owner else None\n overrides.tag_ids = list(doc.tags.values_list(\"id\", flat=True))\n\n overrides.view_users = list(\n get_users_with_perms(\n doc,\n only_with_perms_in=[\"view_document\"],\n ).values_list(\"id\", flat=True),\n )\n overrides.change_users = list(\n get_users_with_perms(\n doc,\n only_with_perms_in=[\"change_document\"],\n ).values_list(\"id\", flat=True),\n )\n overrides.custom_field_ids = list(\n doc.custom_fields.values_list(\"field\", flat=True),\n )\n\n groups_with_perms = get_groups_with_perms(\n doc,\n attach_perms=True,\n )\n overrides.view_groups = [\n group.id\n for group in groups_with_perms\n if \"view_document\" in groups_with_perms[group]\n ]\n overrides.change_groups = [\n group.id\n for group in groups_with_perms\n if \"change_document\" in groups_with_perms[group]\n ]\n\n return overrides\n\n\nclass DocumentSource(IntEnum):\n \"\"\"\n The source of an incoming document. May have other uses in the future\n \"\"\"\n\n ConsumeFolder = 1\n ApiUpload = 2\n MailFetch = 3\n\n\[email protected]\nclass ConsumableDocument:\n \"\"\"\n Encapsulates an incoming document, either from consume folder, API upload\n or mail fetching and certain useful operations on it.\n \"\"\"\n\n source: DocumentSource\n original_file: Path\n mailrule_id: Optional[int] = None\n mime_type: str = dataclasses.field(init=False, default=None)\n\n def __post_init__(self):\n \"\"\"\n After a dataclass is initialized, this is called to finalize some data\n 1. Make sure the original path is an absolute, fully qualified path\n 2. Get the mime type of the file\n \"\"\"\n # Always fully qualify the path first thing\n # Just in case, convert to a path if it's a str\n self.original_file = Path(self.original_file).resolve()\n\n # Get the file type once at init\n # Note this function isn't called when the object is unpickled\n self.mime_type = magic.from_file(self.original_file, mime=True)\n",
"path": "src/documents/data_models.py"
}
] | diff --git a/src/documents/data_models.py b/src/documents/data_models.py
index 22572709ff3..b99c8511d85 100644
--- a/src/documents/data_models.py
+++ b/src/documents/data_models.py
@@ -116,7 +116,7 @@ def from_document(doc) -> "DocumentMetadataOverrides":
).values_list("id", flat=True),
)
overrides.custom_field_ids = list(
- doc.custom_fields.values_list("id", flat=True),
+ doc.custom_fields.values_list("field", flat=True),
)
groups_with_perms = get_groups_with_perms(
|
wagtail__wagtail-432 | Bug - Redirect loop
There seems to be a bug of redirection loop when a user (created without any admin/editor roles) attempts to log in. I was trying out the demo and apparently the admin interface does not cater for users without any roles.
It would be nice to know if there is a workaround this issue. Thanks!
| [
{
"content": "from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.views import logout as auth_logout, login as auth_login\nfrom django.utils.translation import ugettext as _ \nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.cache import never_cache\n\nfrom wagtail.wagtailadmin import forms\nfrom wagtail.wagtailusers.forms import NotificationPreferencesForm\nfrom wagtail.wagtailusers.models import UserProfile\nfrom wagtail.wagtailcore.models import UserPagePermissionsProxy\n\n\n@permission_required('wagtailadmin.access_admin')\ndef account(request):\n user_perms = UserPagePermissionsProxy(request.user)\n show_notification_preferences = user_perms.can_edit_pages() or user_perms.can_publish_pages()\n\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n 'show_notification_preferences': show_notification_preferences\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef notification_preferences(request):\n\n if request.POST:\n form = NotificationPreferencesForm(request.POST, instance=UserProfile.get_for_user(request.user))\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Your preferences have been updated successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = NotificationPreferencesForm(instance=UserProfile.get_for_user(request.user))\n\n # quick-and-dirty catch-all in case the form has been rendered with no\n # fields, as the user has no customisable permissions\n if not form.fields:\n return redirect('wagtailadmin_account')\n\n return render(request, 'wagtailadmin/account/notification_preferences.html', {\n 'form': form,\n })\n\n\n@sensitive_post_parameters()\n@never_cache\ndef login(request):\n if request.user.is_authenticated():\n return redirect('wagtailadmin_home')\n else:\n return auth_login(request,\n template_name='wagtailadmin/login.html',\n authentication_form=forms.LoginForm,\n extra_context={\n 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),\n },\n )\n\n\ndef logout(request):\n response = auth_logout(request, next_page = 'wagtailadmin_login')\n\n # By default, logging out will generate a fresh sessionid cookie. We want to use the\n # absence of sessionid as an indication that front-end pages are being viewed by a\n # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.\n response.delete_cookie(settings.SESSION_COOKIE_NAME,\n domain=settings.SESSION_COOKIE_DOMAIN,\n path=settings.SESSION_COOKIE_PATH)\n\n # HACK: pretend that the session hasn't been modified, so that SessionMiddleware\n # won't override the above and write a new cookie.\n request.session.modified = False\n\n return response\n",
"path": "wagtail/wagtailadmin/views/account.py"
}
] | [
{
"content": "from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.views import logout as auth_logout, login as auth_login\nfrom django.utils.translation import ugettext as _ \nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.cache import never_cache\n\nfrom wagtail.wagtailadmin import forms\nfrom wagtail.wagtailusers.forms import NotificationPreferencesForm\nfrom wagtail.wagtailusers.models import UserProfile\nfrom wagtail.wagtailcore.models import UserPagePermissionsProxy\n\n\n@permission_required('wagtailadmin.access_admin')\ndef account(request):\n user_perms = UserPagePermissionsProxy(request.user)\n show_notification_preferences = user_perms.can_edit_pages() or user_perms.can_publish_pages()\n\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n 'show_notification_preferences': show_notification_preferences\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef notification_preferences(request):\n\n if request.POST:\n form = NotificationPreferencesForm(request.POST, instance=UserProfile.get_for_user(request.user))\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Your preferences have been updated successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = NotificationPreferencesForm(instance=UserProfile.get_for_user(request.user))\n\n # quick-and-dirty catch-all in case the form has been rendered with no\n # fields, as the user has no customisable permissions\n if not form.fields:\n return redirect('wagtailadmin_account')\n\n return render(request, 'wagtailadmin/account/notification_preferences.html', {\n 'form': form,\n })\n\n\n@sensitive_post_parameters()\n@never_cache\ndef login(request):\n if request.user.is_authenticated() and request.user.has_perm('wagtailadmin.access_admin'):\n return redirect('wagtailadmin_home')\n else:\n return auth_login(request,\n template_name='wagtailadmin/login.html',\n authentication_form=forms.LoginForm,\n extra_context={\n 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),\n },\n )\n\n\ndef logout(request):\n response = auth_logout(request, next_page = 'wagtailadmin_login')\n\n # By default, logging out will generate a fresh sessionid cookie. We want to use the\n # absence of sessionid as an indication that front-end pages are being viewed by a\n # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.\n response.delete_cookie(settings.SESSION_COOKIE_NAME,\n domain=settings.SESSION_COOKIE_DOMAIN,\n path=settings.SESSION_COOKIE_PATH)\n\n # HACK: pretend that the session hasn't been modified, so that SessionMiddleware\n # won't override the above and write a new cookie.\n request.session.modified = False\n\n return response\n",
"path": "wagtail/wagtailadmin/views/account.py"
}
] | diff --git a/wagtail/wagtailadmin/tests/test_account_management.py b/wagtail/wagtailadmin/tests/test_account_management.py
index 391baad9d28d..048ef6fbc1e3 100644
--- a/wagtail/wagtailadmin/tests/test_account_management.py
+++ b/wagtail/wagtailadmin/tests/test_account_management.py
@@ -14,16 +14,10 @@ class TestAuthentication(TestCase, WagtailTestUtils):
"""
This tests that users can login and logout of the admin interface
"""
- def setUp(self):
- self.login()
-
def test_login_view(self):
"""
This tests that the login view responds with a login page
"""
- # Logout so we can test the login view
- self.client.logout()
-
# Get login page
response = self.client.get(reverse('wagtailadmin_login'))
@@ -36,8 +30,8 @@ def test_login_view_post(self):
This posts user credentials to the login view and checks that
the user was logged in successfully
"""
- # Logout so we can test the login view
- self.client.logout()
+ # Create user to log in with
+ user = User.objects.create_superuser(username='test', email='[email protected]', password='password')
# Post credentials to the login page
post_data = {
@@ -59,16 +53,40 @@ def test_already_logged_in_redirect(self):
redirected to the admin dashboard if they try to access the login
page
"""
+ # Login
+ self.login()
+
# Get login page
response = self.client.get(reverse('wagtailadmin_login'))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
+ def test_logged_in_as_non_privileged_user_doesnt_redirect(self):
+ """
+ This tests that if the user is logged in but hasn't got permission
+ to access the admin, they are not redirected to the admin
+
+ This tests issue #431
+ """
+ # Login as unprivileged user
+ User.objects.create(username='unprivileged', password='123')
+ self.client.login(username='unprivileged', password='123')
+
+ # Get login page
+ response = self.client.get(reverse('wagtailadmin_login'))
+
+ # Check that the user recieved a login page and was not redirected
+ self.assertEqual(response.status_code, 200)
+ self.assertTemplateUsed(response, 'wagtailadmin/login.html')
+
def test_logout(self):
"""
This tests that the user can logout
"""
+ # Login
+ self.login()
+
# Get logout page
response = self.client.get(reverse('wagtailadmin_logout'))
@@ -83,9 +101,6 @@ def test_not_logged_in_redirect(self):
This tests that a not logged in user is redirected to the
login page
"""
- # Logout
- self.client.logout()
-
# Get dashboard
response = self.client.get(reverse('wagtailadmin_home'))
@@ -98,9 +113,6 @@ def test_not_logged_in_redirect_default_settings(self):
redirects to the correct place when the user has not set
the LOGIN_URL setting correctly
"""
- # Logout
- self.client.logout()
-
# Get dashboard with default LOGIN_URL setting
with self.settings(LOGIN_URL='django.contrib.auth.views.login'):
response = self.client.get(reverse('wagtailadmin_home'))
diff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py
index 65e8dbeec71f..9dfa15b9e851 100644
--- a/wagtail/wagtailadmin/views/account.py
+++ b/wagtail/wagtailadmin/views/account.py
@@ -75,7 +75,7 @@ def notification_preferences(request):
@sensitive_post_parameters()
@never_cache
def login(request):
- if request.user.is_authenticated():
+ if request.user.is_authenticated() and request.user.has_perm('wagtailadmin.access_admin'):
return redirect('wagtailadmin_home')
else:
return auth_login(request,
|
conda__conda-4651 | Package cache error with conda 4.3.12 on win64
Following error occurs when I try to create a new environment with conda 4.3.12 on win64:
`$ C:\Program Files\Anaconda3\Scripts\conda-script.py create -p C:\hub\temp\venv python=3.5`
Traceback (most recent call last):
File "C:\Program Files\Anaconda3\lib\site-packages\conda\exceptions.py", line 616, in conda_exception_handler
return_value = func(*args, **kwargs)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\cli\main.py", line 137, in _main
exit_code = args.func(args, p)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\cli\main_create.py", line 68, in execute
install(args, parser, 'create')
File "C:\Program Files\Anaconda3\lib\site-packages\conda\cli\install.py", line 222, in install
unknown=index_args['unknown'], prefix=prefix)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 125, in get_index
index = fetch_index(channel_priority_map, use_cache=use_cache)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 558, in fetch_index
repodatas = _collect_repodatas(use_cache, tasks)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 547, in _collect_repodatas
repodatas = _collect_repodatas_serial(use_cache, tasks)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 520, in _collect_repodatas_serial
for url, schan, pri in tasks]
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 520, in <listcomp>
for url, schan, pri in tasks]
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 144, in func
res = f(*args, **kwargs)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 464, in fetch_repodata
cache_path = join(cache_dir or create_cache_dir(), cache_fn_url(url))
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 596, in create_cache_dir
assert pkgs_dir == context.pkgs_dirs[0], (pkgs_dir, context.pkgs_dirs)
AssertionError: ('C:\\Users\\xxx\\AppData\\Local\\conda\\conda\\pkgs', ('C:\\Program Files\\Anaconda3\\pkgs', 'C:\\Users\\xxx\\AppData\\Local\\conda\\conda\\pkgs'))
Conda info is:
platform : win-64
conda version : 4.3.12
conda is private : False
conda-env version : 4.3.12
conda-build version : 2.1.4
python version : 3.6.0.final.0
requests version : 2.12.4
root environment : C:\Program Files\Anaconda3 (read only)
default environment : C:\Program Files\Anaconda3
envs directories : C:\Program Files\Anaconda3\envs
C:\Users\xxx\AppData\Local\conda\conda\envs
C:\Users\xxx\.conda\envs
package cache : C:\Program Files\Anaconda3\pkgs
C:\Users\xxx\AppData\Local\conda\conda\pkgs
channel URLs : [...]
config file : C:\Program Files\Anaconda3\.condarc
offline mode : False
user-agent : conda/4.3.12 requests/2.12.4 CPython/3.6.0 Windows/10 Windows/10.0.10240
Somehow the program files cache dir is prioritized even if it is not writable.
Works with 4.3.9, throws a different error with 4.3.11 (part of the stack trace below)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 494, in fetch_repodata
touch(cache_path)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\gateways\disk\update.py", line 64, in touch
utime(path, None)
PermissionError: [WinError 5] Access is denied: 'C:\\Program Files\\Anaconda3\\pkgs\\cache\\3e904a53.json'
Package cache error with conda 4.3.12 on win64
Following error occurs when I try to create a new environment with conda 4.3.12 on win64:
`$ C:\Program Files\Anaconda3\Scripts\conda-script.py create -p C:\hub\temp\venv python=3.5`
Traceback (most recent call last):
File "C:\Program Files\Anaconda3\lib\site-packages\conda\exceptions.py", line 616, in conda_exception_handler
return_value = func(*args, **kwargs)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\cli\main.py", line 137, in _main
exit_code = args.func(args, p)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\cli\main_create.py", line 68, in execute
install(args, parser, 'create')
File "C:\Program Files\Anaconda3\lib\site-packages\conda\cli\install.py", line 222, in install
unknown=index_args['unknown'], prefix=prefix)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 125, in get_index
index = fetch_index(channel_priority_map, use_cache=use_cache)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 558, in fetch_index
repodatas = _collect_repodatas(use_cache, tasks)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 547, in _collect_repodatas
repodatas = _collect_repodatas_serial(use_cache, tasks)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 520, in _collect_repodatas_serial
for url, schan, pri in tasks]
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 520, in <listcomp>
for url, schan, pri in tasks]
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 144, in func
res = f(*args, **kwargs)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 464, in fetch_repodata
cache_path = join(cache_dir or create_cache_dir(), cache_fn_url(url))
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 596, in create_cache_dir
assert pkgs_dir == context.pkgs_dirs[0], (pkgs_dir, context.pkgs_dirs)
AssertionError: ('C:\\Users\\xxx\\AppData\\Local\\conda\\conda\\pkgs', ('C:\\Program Files\\Anaconda3\\pkgs', 'C:\\Users\\xxx\\AppData\\Local\\conda\\conda\\pkgs'))
Conda info is:
platform : win-64
conda version : 4.3.12
conda is private : False
conda-env version : 4.3.12
conda-build version : 2.1.4
python version : 3.6.0.final.0
requests version : 2.12.4
root environment : C:\Program Files\Anaconda3 (read only)
default environment : C:\Program Files\Anaconda3
envs directories : C:\Program Files\Anaconda3\envs
C:\Users\xxx\AppData\Local\conda\conda\envs
C:\Users\xxx\.conda\envs
package cache : C:\Program Files\Anaconda3\pkgs
C:\Users\xxx\AppData\Local\conda\conda\pkgs
channel URLs : [...]
config file : C:\Program Files\Anaconda3\.condarc
offline mode : False
user-agent : conda/4.3.12 requests/2.12.4 CPython/3.6.0 Windows/10 Windows/10.0.10240
Somehow the program files cache dir is prioritized even if it is not writable.
Works with 4.3.9, throws a different error with 4.3.11 (part of the stack trace below)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\core\index.py", line 494, in fetch_repodata
touch(cache_path)
File "C:\Program Files\Anaconda3\lib\site-packages\conda\gateways\disk\update.py", line 64, in touch
utime(path, None)
PermissionError: [WinError 5] Access is denied: 'C:\\Program Files\\Anaconda3\\pkgs\\cache\\3e904a53.json'
| [
{
"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport bz2\nfrom contextlib import closing\nfrom functools import wraps\nimport hashlib\nimport json\nfrom logging import DEBUG, getLogger\nfrom mmap import ACCESS_READ, mmap\nfrom os import makedirs\nfrom os.path import getmtime, isfile, join, split as path_split, dirname\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom time import time\nimport warnings\n\nfrom requests.exceptions import ConnectionError, HTTPError, SSLError\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nfrom .linked_data import linked_data\nfrom .package_cache import PackageCache\nfrom .. import CondaError\nfrom .._vendor.auxlib.entity import EntityEncoder\nfrom .._vendor.auxlib.ish import dals\nfrom .._vendor.auxlib.logz import stringify\nfrom ..base.constants import CONDA_HOMEPAGE_URL, MAX_CHANNEL_PRIORITY\nfrom ..base.context import context\nfrom ..common.compat import (ensure_binary, ensure_text_type, ensure_unicode, iteritems,\n itervalues)\nfrom ..common.url import join_url\nfrom ..connection import CondaSession\nfrom ..exceptions import CondaHTTPError, CondaRuntimeError\nfrom ..gateways.disk.delete import rm_rf\nfrom ..gateways.disk.read import read_index_json\nfrom ..gateways.disk.update import touch\nfrom ..models.channel import Channel, prioritize_channels\nfrom ..models.dist import Dist\nfrom ..models.index_record import EMPTY_LINK, IndexRecord, Priority\n\ntry:\n from cytoolz.itertoolz import take\nexcept ImportError:\n from .._vendor.toolz.itertoolz import take\n\n\nlog = getLogger(__name__)\ndotlog = getLogger('dotupdate')\nstdoutlog = getLogger('stdoutlog')\nstderrlog = getLogger('stderrlog')\n\nfail_unknown_host = False\n\n\nREPODATA_PICKLE_VERSION = 1\nREPODATA_HEADER_RE = b'\"(_etag|_mod|_cache_control)\":[ ]?\"(.*)\"'\n\n\ndef supplement_index_with_prefix(index, prefix, channels):\n # type: (Dict[Dist, IndexRecord], str, Set[canonical_channel]) -> None\n # supplement index with information from prefix/conda-meta\n assert prefix\n maxp = len(channels) + 1\n for dist, info in iteritems(linked_data(prefix)):\n if dist in index:\n # The downloaded repodata takes priority, so we do not overwrite.\n # We do, however, copy the link information so that the solver\n # knows this package is installed.\n old_record = index[dist]\n link = info.get('link') or EMPTY_LINK\n index[dist] = IndexRecord.from_objects(old_record, link=link)\n else:\n # If the package is not in the repodata, use the local data. If\n # the 'depends' field is not present, we need to set it; older\n # installations are likely to have this.\n depends = info.get('depends') or ()\n # If the channel is known but the package is not in the index, it\n # is because 1) the channel is unavailable offline, or 2) it no\n # longer contains this package. Either way, we should prefer any\n # other version of the package to this one. On the other hand, if\n # it is in a channel we don't know about, assign it a value just\n # above the priority of all known channels.\n priority = MAX_CHANNEL_PRIORITY if dist.channel in channels else maxp\n index[dist] = IndexRecord.from_objects(info, depends=depends, priority=priority)\n\n\ndef supplement_index_with_cache(index, channels):\n # type: (Dict[Dist, IndexRecord], Set[canonical_channel]) -> None\n # supplement index with packages from the cache\n maxp = len(channels) + 1\n for pc_entry in PackageCache.get_all_extracted_entries():\n dist = pc_entry.dist\n if dist in index:\n # The downloaded repodata takes priority\n continue\n pkg_dir = pc_entry.extracted_package_dir\n index_json_record = read_index_json(pkg_dir)\n # See the discussion above about priority assignments.\n priority = MAX_CHANNEL_PRIORITY if dist.channel in channels else maxp\n index_json_record.fn = dist.to_filename()\n index_json_record.schannel = dist.channel\n index_json_record.priority = priority\n index_json_record.url = dist.to_url()\n index[dist] = index_json_record\n\n\ndef get_index(channel_urls=(), prepend=True, platform=None,\n use_local=False, use_cache=False, unknown=None, prefix=None):\n \"\"\"\n Return the index of packages available on the channels\n\n If prepend=False, only the channels passed in as arguments are used.\n If platform=None, then the current platform is used.\n If prefix is supplied, then the packages installed in that prefix are added.\n \"\"\"\n if use_local:\n channel_urls = ['local'] + list(channel_urls)\n if prepend:\n channel_urls += context.channels\n if context.offline and unknown is None:\n unknown = True\n\n channel_priority_map = prioritize_channels(channel_urls, platform=platform)\n index = fetch_index(channel_priority_map, use_cache=use_cache)\n\n if prefix or unknown:\n known_channels = {chnl for chnl, _ in itervalues(channel_priority_map)}\n if prefix:\n supplement_index_with_prefix(index, prefix, known_channels)\n if unknown:\n supplement_index_with_cache(index, known_channels)\n return index\n\n\n# We need a decorator so that the dot gets printed *after* the repodata is fetched\nclass dotlog_on_return(object):\n def __init__(self, msg):\n self.msg = msg\n\n def __call__(self, f):\n @wraps(f)\n def func(*args, **kwargs):\n res = f(*args, **kwargs)\n dotlog.debug(\"%s args %s kwargs %s\" % (self.msg, args, kwargs))\n return res\n return func\n\n\ndef read_mod_and_etag(path):\n with open(path, 'rb') as f:\n try:\n with closing(mmap(f.fileno(), 0, access=ACCESS_READ)) as m:\n match_objects = take(3, re.finditer(REPODATA_HEADER_RE, m))\n result = dict(map(ensure_unicode, mo.groups()) for mo in match_objects)\n return result\n except (BufferError, ValueError):\n # BufferError: cannot close exported pointers exist\n # https://github.com/conda/conda/issues/4592\n # ValueError: cannot mmap an empty file\n return {}\n\n\ndef get_cache_control_max_age(cache_control_value):\n max_age = re.search(r\"max-age=(\\d+)\", cache_control_value)\n return int(max_age.groups()[0]) if max_age else 0\n\n\nclass Response304ContentUnchanged(Exception):\n pass\n\n\ndef fetch_repodata_remote_request(session, url, etag, mod_stamp):\n if not context.ssl_verify:\n warnings.simplefilter('ignore', InsecureRequestWarning)\n\n session = session or CondaSession()\n\n headers = {}\n if etag:\n headers[\"If-None-Match\"] = etag\n if mod_stamp:\n headers[\"If-Modified-Since\"] = mod_stamp\n\n if 'repo.continuum.io' in url or url.startswith(\"file://\"):\n filename = 'repodata.json.bz2'\n headers['Accept-Encoding'] = 'identity'\n else:\n headers['Accept-Encoding'] = 'gzip, deflate, compress, identity'\n headers['Content-Type'] = 'application/json'\n filename = 'repodata.json'\n\n try:\n timeout = context.remote_connect_timeout_secs, context.remote_read_timeout_secs\n resp = session.get(join_url(url, filename), headers=headers, proxies=session.proxies,\n timeout=timeout)\n if log.isEnabledFor(DEBUG):\n log.debug(stringify(resp))\n resp.raise_for_status()\n\n if resp.status_code == 304:\n raise Response304ContentUnchanged()\n\n def maybe_decompress(filename, resp_content):\n return ensure_text_type(bz2.decompress(resp_content)\n if filename.endswith('.bz2')\n else resp_content).strip()\n json_str = maybe_decompress(filename, resp.content)\n fetched_repodata = json.loads(json_str) if json_str else {}\n fetched_repodata['_url'] = url\n add_http_value_to_dict(resp, 'Etag', fetched_repodata, '_etag')\n add_http_value_to_dict(resp, 'Last-Modified', fetched_repodata, '_mod')\n add_http_value_to_dict(resp, 'Cache-Control', fetched_repodata, '_cache_control')\n return fetched_repodata\n\n except ValueError as e:\n raise CondaRuntimeError(\"Invalid index file: {0}: {1}\".format(join_url(url, filename), e))\n\n except (ConnectionError, HTTPError, SSLError) as e:\n # status_code might not exist on SSLError\n status_code = getattr(e.response, 'status_code', None)\n if status_code == 404:\n if not url.endswith('/noarch'):\n return None\n else:\n if context.allow_non_channel_urls:\n help_message = dedent(\"\"\"\n WARNING: The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n It is possible you have given conda an invalid channel. Please double-check\n your conda configuration using `conda config --show`.\n\n If the requested url is in fact a valid conda channel, please request that the\n channel administrator create `noarch/repodata.json` and associated\n `noarch/repodata.json.bz2` files, even if `noarch/repodata.json` is empty.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n \"\"\") % dirname(url)\n stderrlog.warn(help_message)\n return None\n else:\n help_message = dals(\"\"\"\n The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n As of conda 4.3, a valid channel must contain a `noarch/repodata.json` and\n associated `noarch/repodata.json.bz2` file, even if `noarch/repodata.json` is\n empty. please request that the channel administrator create\n `noarch/repodata.json` and associated `noarch/repodata.json.bz2` files.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n\n You will need to adjust your conda configuration to proceed.\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % (dirname(url), join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))\n\n elif status_code == 403:\n if not url.endswith('/noarch'):\n return None\n else:\n if context.allow_non_channel_urls:\n help_message = dedent(\"\"\"\n WARNING: The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n It is possible you have given conda an invalid channel. Please double-check\n your conda configuration using `conda config --show`.\n\n If the requested url is in fact a valid conda channel, please request that the\n channel administrator create `noarch/repodata.json` and associated\n `noarch/repodata.json.bz2` files, even if `noarch/repodata.json` is empty.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n \"\"\") % dirname(url)\n stderrlog.warn(help_message)\n return None\n else:\n help_message = dals(\"\"\"\n The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n As of conda 4.3, a valid channel must contain a `noarch/repodata.json` and\n associated `noarch/repodata.json.bz2` file, even if `noarch/repodata.json` is\n empty. please request that the channel administrator create\n `noarch/repodata.json` and associated `noarch/repodata.json.bz2` files.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n\n You will need to adjust your conda configuration to proceed.\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % (dirname(url), join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))\n\n elif status_code == 401:\n channel = Channel(url)\n if channel.token:\n help_message = dals(\"\"\"\n The token '%s' given for the URL is invalid.\n\n If this token was pulled from anaconda-client, you will need to use\n anaconda-client to reauthenticate.\n\n If you supplied this token to conda directly, you will need to adjust your\n conda configuration to proceed.\n\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % (channel.token, join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))\n\n elif context.channel_alias.location in url:\n # Note, this will not trigger if the binstar configured url does\n # not match the conda configured one.\n help_message = dals(\"\"\"\n The remote server has indicated you are using invalid credentials for this channel.\n\n If the remote site is anaconda.org or follows the Anaconda Server API, you\n will need to\n (a) login to the site with `anaconda login`, or\n (b) provide conda with a valid token directly.\n\n Further configuration help can be found at <%s>.\n \"\"\") % join_url(CONDA_HOMEPAGE_URL, 'docs/config.html')\n\n else:\n help_message = dals(\"\"\"\n The credentials you have provided for this URL are invalid.\n\n You will need to modify your conda configuration to proceed.\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % join_url(CONDA_HOMEPAGE_URL, 'docs/config.html')\n\n elif status_code is not None and 500 <= status_code < 600:\n help_message = dals(\"\"\"\n An remote server error occurred when trying to retrieve this URL.\n\n A 500-type error (e.g. 500, 501, 502, 503, etc.) indicates the server failed to\n fulfill a valid request. The problem may be spurious, and will resolve itself if you\n try your request again. If the problem persists, consider notifying the maintainer\n of the remote server.\n \"\"\")\n\n else:\n help_message = dals(\"\"\"\n An HTTP error occurred when trying to retrieve this URL.\n HTTP errors are often intermittent, and a simple retry will get you on your way.\n %r\n \"\"\") % e\n\n raise CondaHTTPError(help_message,\n getattr(e.response, 'url', None),\n status_code,\n getattr(e.response, 'reason', None),\n getattr(e.response, 'elapsed', None),\n e.response)\n\n\ndef write_pickled_repodata(cache_path, repodata):\n # Don't bother to pickle empty channels\n if not repodata.get('packages'):\n return\n try:\n with open(get_pickle_path(cache_path), 'wb') as f:\n pickle.dump(repodata, f)\n except Exception as e:\n import traceback\n log.debug(\"Failed to dump pickled repodata.\\n%s\", traceback.format_exc())\n\n\ndef read_pickled_repodata(cache_path, channel_url, schannel, priority, etag, mod_stamp):\n pickle_path = get_pickle_path(cache_path)\n # Don't trust pickled data if there is no accompanying json data\n if not isfile(pickle_path) or not isfile(cache_path):\n return None\n try:\n if isfile(pickle_path):\n log.debug(\"found pickle file %s\", pickle_path)\n with open(pickle_path, 'rb') as f:\n repodata = pickle.load(f)\n except Exception as e:\n import traceback\n log.debug(\"Failed to load pickled repodata.\\n%s\", traceback.format_exc())\n rm_rf(pickle_path)\n return None\n\n def _check_pickled_valid():\n yield repodata.get('_url') == channel_url\n yield repodata.get('_schannel') == schannel\n yield repodata.get('_add_pip') == context.add_pip_as_python_dependency\n yield repodata.get('_mod') == mod_stamp\n yield repodata.get('_etag') == etag\n yield repodata.get('_pickle_version') == REPODATA_PICKLE_VERSION\n\n if not all(_check_pickled_valid()):\n return None\n\n if int(repodata['_priority']) != priority:\n log.debug(\"setting priority for %s to '%d'\", repodata.get('_url'), priority)\n repodata['_priority']._priority = priority\n\n return repodata\n\n\ndef read_local_repodata(cache_path, channel_url, schannel, priority, etag, mod_stamp):\n local_repodata = read_pickled_repodata(cache_path, channel_url, schannel, priority,\n etag, mod_stamp)\n if local_repodata:\n return local_repodata\n with open(cache_path) as f:\n try:\n local_repodata = json.load(f)\n except ValueError as e:\n # ValueError: Expecting object: line 11750 column 6 (char 303397)\n log.debug(\"Error for cache path: '%s'\\n%r\", cache_path, e)\n message = dals(\"\"\"\n An error occurred when loading cached repodata. Executing\n `conda clean --index-cache` will remove cached repodata files\n so they can be downloaded again.\n \"\"\")\n raise CondaError(message)\n else:\n process_repodata(local_repodata, channel_url, schannel, priority)\n write_pickled_repodata(cache_path, local_repodata)\n return local_repodata\n\n\ndef process_repodata(repodata, channel_url, schannel, priority):\n opackages = repodata.setdefault('packages', {})\n if not opackages:\n return repodata\n\n repodata['_add_pip'] = add_pip = context.add_pip_as_python_dependency\n repodata['_pickle_version'] = REPODATA_PICKLE_VERSION\n repodata['_priority'] = priority = Priority(priority)\n repodata['_schannel'] = schannel\n\n meta_in_common = { # just need to make this once, then apply with .update()\n 'arch': repodata.get('info', {}).get('arch'),\n 'channel': channel_url,\n 'platform': repodata.get('info', {}).get('platform'),\n 'priority': priority,\n 'schannel': schannel,\n }\n packages = {}\n for fn, info in iteritems(opackages):\n info['fn'] = fn\n info['url'] = join_url(channel_url, fn)\n if add_pip and info['name'] == 'python' and info['version'].startswith(('2.', '3.')):\n info['depends'].append('pip')\n info.update(meta_in_common)\n rec = IndexRecord(**info)\n packages[Dist(rec)] = rec\n repodata['packages'] = packages\n\n\n@dotlog_on_return(\"fetching repodata:\")\ndef fetch_repodata(url, schannel, priority,\n cache_dir=None, use_cache=False, session=None):\n cache_path = join(cache_dir or create_cache_dir(), cache_fn_url(url))\n\n try:\n mtime = getmtime(cache_path)\n except (IOError, OSError):\n log.debug(\"No local cache found for %s at %s\", url, cache_path)\n if use_cache:\n return {'packages': {}}\n else:\n mod_etag_headers = {}\n else:\n mod_etag_headers = read_mod_and_etag(cache_path)\n\n if context.local_repodata_ttl > 1:\n max_age = context.local_repodata_ttl\n elif context.local_repodata_ttl == 1:\n max_age = get_cache_control_max_age(mod_etag_headers.get('_cache_control', ''))\n else:\n max_age = 0\n\n timeout = mtime + max_age - time()\n if (timeout > 0 or context.offline) and not url.startswith('file://'):\n log.debug(\"Using cached repodata for %s at %s. Timeout in %d sec\",\n url, cache_path, timeout)\n return read_local_repodata(cache_path, url, schannel, priority,\n mod_etag_headers.get('_etag'), mod_etag_headers.get('_mod'))\n\n log.debug(\"Locally invalidating cached repodata for %s at %s\", url, cache_path)\n\n try:\n assert url is not None, url\n repodata = fetch_repodata_remote_request(session, url,\n mod_etag_headers.get('_etag'),\n mod_etag_headers.get('_mod'))\n except Response304ContentUnchanged:\n log.debug(\"304 NOT MODIFIED for '%s'. Updating mtime and loading from disk\", url)\n touch(cache_path)\n return read_local_repodata(cache_path, url, schannel, priority,\n mod_etag_headers.get('_etag'), mod_etag_headers.get('_mod'))\n if repodata is None:\n return None\n\n with open(cache_path, 'w') as fo:\n json.dump(repodata, fo, indent=2, sort_keys=True, cls=EntityEncoder)\n\n process_repodata(repodata, url, schannel, priority)\n write_pickled_repodata(cache_path, repodata)\n return repodata\n\n\ndef _collect_repodatas_serial(use_cache, tasks):\n # type: (bool, List[str]) -> List[Sequence[str, Option[Dict[Dist, IndexRecord]]]]\n session = CondaSession()\n repodatas = [(url, fetch_repodata(url, schan, pri,\n use_cache=use_cache,\n session=session))\n for url, schan, pri in tasks]\n return repodatas\n\n\ndef _collect_repodatas_concurrent(executor, use_cache, tasks):\n futures = tuple(executor.submit(fetch_repodata, url, schan, pri,\n use_cache=use_cache,\n session=CondaSession())\n for url, schan, pri in tasks)\n repodatas = [(t[0], f.result()) for t, f in zip(tasks, futures)]\n return repodatas\n\n\ndef _collect_repodatas(use_cache, tasks):\n repodatas = executor = None\n if context.concurrent:\n try:\n import concurrent.futures\n executor = concurrent.futures.ThreadPoolExecutor(10)\n repodatas = _collect_repodatas_concurrent(executor, use_cache, tasks)\n except (ImportError, RuntimeError) as e:\n # concurrent.futures is only available in Python >= 3.2 or if futures is installed\n # RuntimeError is thrown if number of threads are limited by OS\n log.debug(repr(e))\n if executor:\n executor.shutdown(wait=True)\n if repodatas is None:\n repodatas = _collect_repodatas_serial(use_cache, tasks)\n return repodatas\n\n\ndef fetch_index(channel_urls, use_cache=False, index=None):\n # type: (prioritize_channels(), bool, bool, Dict[Dist, IndexRecord]) -> Dict[Dist, IndexRecord]\n log.debug('channel_urls=' + repr(channel_urls))\n if not context.json:\n stdoutlog.info(\"Fetching package metadata ...\")\n\n tasks = [(url,) + cdata for url, cdata in iteritems(channel_urls)]\n repodatas = _collect_repodatas(use_cache, tasks)\n # type: List[Sequence[str, Option[Dict[Dist, IndexRecord]]]]\n # this is sorta a lie; actually more primitve types\n\n if index is None:\n index = {}\n for _, repodata in repodatas:\n if repodata:\n index.update(repodata.get('packages', {}))\n\n if not context.json:\n stdoutlog.info('\\n')\n return index\n\n\ndef cache_fn_url(url):\n # url must be right-padded with '/' to not invalidate any existing caches\n if not url.endswith('/'):\n url += '/'\n md5 = hashlib.md5(ensure_binary(url)).hexdigest()\n return '%s.json' % (md5[:8],)\n\n\ndef get_pickle_path(cache_path):\n cache_dir, cache_base = path_split(cache_path)\n return join(cache_dir, cache_base.replace('.json', '.q'))\n\n\ndef add_http_value_to_dict(resp, http_key, d, dict_key):\n value = resp.headers.get(http_key)\n if value:\n d[dict_key] = value\n\n\ndef create_cache_dir():\n pkgs_dir = PackageCache.first_writable(context.pkgs_dirs).pkgs_dir\n assert pkgs_dir == context.pkgs_dirs[0], (pkgs_dir, context.pkgs_dirs)\n cache_dir = join(PackageCache.first_writable(context.pkgs_dirs).pkgs_dir, 'cache')\n try:\n makedirs(cache_dir)\n except OSError:\n pass\n return cache_dir\n\n\ndef dist_str_in_index(index, dist_str):\n return Dist(dist_str) in index\n",
"path": "conda/core/index.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport bz2\nfrom contextlib import closing\nfrom functools import wraps\nimport hashlib\nimport json\nfrom logging import DEBUG, getLogger\nfrom mmap import ACCESS_READ, mmap\nfrom os import makedirs\nfrom os.path import getmtime, isfile, join, split as path_split, dirname\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom time import time\nimport warnings\n\nfrom requests.exceptions import ConnectionError, HTTPError, SSLError\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nfrom .linked_data import linked_data\nfrom .package_cache import PackageCache\nfrom .. import CondaError\nfrom .._vendor.auxlib.entity import EntityEncoder\nfrom .._vendor.auxlib.ish import dals\nfrom .._vendor.auxlib.logz import stringify\nfrom ..base.constants import CONDA_HOMEPAGE_URL, MAX_CHANNEL_PRIORITY\nfrom ..base.context import context\nfrom ..common.compat import (ensure_binary, ensure_text_type, ensure_unicode, iteritems,\n itervalues)\nfrom ..common.url import join_url\nfrom ..connection import CondaSession\nfrom ..exceptions import CondaHTTPError, CondaRuntimeError\nfrom ..gateways.disk.delete import rm_rf\nfrom ..gateways.disk.read import read_index_json\nfrom ..gateways.disk.update import touch\nfrom ..models.channel import Channel, prioritize_channels\nfrom ..models.dist import Dist\nfrom ..models.index_record import EMPTY_LINK, IndexRecord, Priority\n\ntry:\n from cytoolz.itertoolz import take\nexcept ImportError:\n from .._vendor.toolz.itertoolz import take\n\n\nlog = getLogger(__name__)\ndotlog = getLogger('dotupdate')\nstdoutlog = getLogger('stdoutlog')\nstderrlog = getLogger('stderrlog')\n\nfail_unknown_host = False\n\n\nREPODATA_PICKLE_VERSION = 1\nREPODATA_HEADER_RE = b'\"(_etag|_mod|_cache_control)\":[ ]?\"(.*)\"'\n\n\ndef supplement_index_with_prefix(index, prefix, channels):\n # type: (Dict[Dist, IndexRecord], str, Set[canonical_channel]) -> None\n # supplement index with information from prefix/conda-meta\n assert prefix\n maxp = len(channels) + 1\n for dist, info in iteritems(linked_data(prefix)):\n if dist in index:\n # The downloaded repodata takes priority, so we do not overwrite.\n # We do, however, copy the link information so that the solver\n # knows this package is installed.\n old_record = index[dist]\n link = info.get('link') or EMPTY_LINK\n index[dist] = IndexRecord.from_objects(old_record, link=link)\n else:\n # If the package is not in the repodata, use the local data. If\n # the 'depends' field is not present, we need to set it; older\n # installations are likely to have this.\n depends = info.get('depends') or ()\n # If the channel is known but the package is not in the index, it\n # is because 1) the channel is unavailable offline, or 2) it no\n # longer contains this package. Either way, we should prefer any\n # other version of the package to this one. On the other hand, if\n # it is in a channel we don't know about, assign it a value just\n # above the priority of all known channels.\n priority = MAX_CHANNEL_PRIORITY if dist.channel in channels else maxp\n index[dist] = IndexRecord.from_objects(info, depends=depends, priority=priority)\n\n\ndef supplement_index_with_cache(index, channels):\n # type: (Dict[Dist, IndexRecord], Set[canonical_channel]) -> None\n # supplement index with packages from the cache\n maxp = len(channels) + 1\n for pc_entry in PackageCache.get_all_extracted_entries():\n dist = pc_entry.dist\n if dist in index:\n # The downloaded repodata takes priority\n continue\n pkg_dir = pc_entry.extracted_package_dir\n index_json_record = read_index_json(pkg_dir)\n # See the discussion above about priority assignments.\n priority = MAX_CHANNEL_PRIORITY if dist.channel in channels else maxp\n index_json_record.fn = dist.to_filename()\n index_json_record.schannel = dist.channel\n index_json_record.priority = priority\n index_json_record.url = dist.to_url()\n index[dist] = index_json_record\n\n\ndef get_index(channel_urls=(), prepend=True, platform=None,\n use_local=False, use_cache=False, unknown=None, prefix=None):\n \"\"\"\n Return the index of packages available on the channels\n\n If prepend=False, only the channels passed in as arguments are used.\n If platform=None, then the current platform is used.\n If prefix is supplied, then the packages installed in that prefix are added.\n \"\"\"\n if use_local:\n channel_urls = ['local'] + list(channel_urls)\n if prepend:\n channel_urls += context.channels\n if context.offline and unknown is None:\n unknown = True\n\n channel_priority_map = prioritize_channels(channel_urls, platform=platform)\n index = fetch_index(channel_priority_map, use_cache=use_cache)\n\n if prefix or unknown:\n known_channels = {chnl for chnl, _ in itervalues(channel_priority_map)}\n if prefix:\n supplement_index_with_prefix(index, prefix, known_channels)\n if unknown:\n supplement_index_with_cache(index, known_channels)\n return index\n\n\n# We need a decorator so that the dot gets printed *after* the repodata is fetched\nclass dotlog_on_return(object):\n def __init__(self, msg):\n self.msg = msg\n\n def __call__(self, f):\n @wraps(f)\n def func(*args, **kwargs):\n res = f(*args, **kwargs)\n dotlog.debug(\"%s args %s kwargs %s\" % (self.msg, args, kwargs))\n return res\n return func\n\n\ndef read_mod_and_etag(path):\n with open(path, 'rb') as f:\n try:\n with closing(mmap(f.fileno(), 0, access=ACCESS_READ)) as m:\n match_objects = take(3, re.finditer(REPODATA_HEADER_RE, m))\n result = dict(map(ensure_unicode, mo.groups()) for mo in match_objects)\n return result\n except (BufferError, ValueError):\n # BufferError: cannot close exported pointers exist\n # https://github.com/conda/conda/issues/4592\n # ValueError: cannot mmap an empty file\n return {}\n\n\ndef get_cache_control_max_age(cache_control_value):\n max_age = re.search(r\"max-age=(\\d+)\", cache_control_value)\n return int(max_age.groups()[0]) if max_age else 0\n\n\nclass Response304ContentUnchanged(Exception):\n pass\n\n\ndef fetch_repodata_remote_request(session, url, etag, mod_stamp):\n if not context.ssl_verify:\n warnings.simplefilter('ignore', InsecureRequestWarning)\n\n session = session or CondaSession()\n\n headers = {}\n if etag:\n headers[\"If-None-Match\"] = etag\n if mod_stamp:\n headers[\"If-Modified-Since\"] = mod_stamp\n\n if 'repo.continuum.io' in url or url.startswith(\"file://\"):\n filename = 'repodata.json.bz2'\n headers['Accept-Encoding'] = 'identity'\n else:\n headers['Accept-Encoding'] = 'gzip, deflate, compress, identity'\n headers['Content-Type'] = 'application/json'\n filename = 'repodata.json'\n\n try:\n timeout = context.remote_connect_timeout_secs, context.remote_read_timeout_secs\n resp = session.get(join_url(url, filename), headers=headers, proxies=session.proxies,\n timeout=timeout)\n if log.isEnabledFor(DEBUG):\n log.debug(stringify(resp))\n resp.raise_for_status()\n\n if resp.status_code == 304:\n raise Response304ContentUnchanged()\n\n def maybe_decompress(filename, resp_content):\n return ensure_text_type(bz2.decompress(resp_content)\n if filename.endswith('.bz2')\n else resp_content).strip()\n json_str = maybe_decompress(filename, resp.content)\n fetched_repodata = json.loads(json_str) if json_str else {}\n fetched_repodata['_url'] = url\n add_http_value_to_dict(resp, 'Etag', fetched_repodata, '_etag')\n add_http_value_to_dict(resp, 'Last-Modified', fetched_repodata, '_mod')\n add_http_value_to_dict(resp, 'Cache-Control', fetched_repodata, '_cache_control')\n return fetched_repodata\n\n except ValueError as e:\n raise CondaRuntimeError(\"Invalid index file: {0}: {1}\".format(join_url(url, filename), e))\n\n except (ConnectionError, HTTPError, SSLError) as e:\n # status_code might not exist on SSLError\n status_code = getattr(e.response, 'status_code', None)\n if status_code == 404:\n if not url.endswith('/noarch'):\n return None\n else:\n if context.allow_non_channel_urls:\n help_message = dedent(\"\"\"\n WARNING: The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n It is possible you have given conda an invalid channel. Please double-check\n your conda configuration using `conda config --show`.\n\n If the requested url is in fact a valid conda channel, please request that the\n channel administrator create `noarch/repodata.json` and associated\n `noarch/repodata.json.bz2` files, even if `noarch/repodata.json` is empty.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n \"\"\") % dirname(url)\n stderrlog.warn(help_message)\n return None\n else:\n help_message = dals(\"\"\"\n The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n As of conda 4.3, a valid channel must contain a `noarch/repodata.json` and\n associated `noarch/repodata.json.bz2` file, even if `noarch/repodata.json` is\n empty. please request that the channel administrator create\n `noarch/repodata.json` and associated `noarch/repodata.json.bz2` files.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n\n You will need to adjust your conda configuration to proceed.\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % (dirname(url), join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))\n\n elif status_code == 403:\n if not url.endswith('/noarch'):\n return None\n else:\n if context.allow_non_channel_urls:\n help_message = dedent(\"\"\"\n WARNING: The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n It is possible you have given conda an invalid channel. Please double-check\n your conda configuration using `conda config --show`.\n\n If the requested url is in fact a valid conda channel, please request that the\n channel administrator create `noarch/repodata.json` and associated\n `noarch/repodata.json.bz2` files, even if `noarch/repodata.json` is empty.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n \"\"\") % dirname(url)\n stderrlog.warn(help_message)\n return None\n else:\n help_message = dals(\"\"\"\n The remote server could not find the noarch directory for the\n requested channel with url: %s\n\n As of conda 4.3, a valid channel must contain a `noarch/repodata.json` and\n associated `noarch/repodata.json.bz2` file, even if `noarch/repodata.json` is\n empty. please request that the channel administrator create\n `noarch/repodata.json` and associated `noarch/repodata.json.bz2` files.\n $ mkdir noarch\n $ echo '{}' > noarch/repodata.json\n $ bzip2 -k noarch/repodata.json\n\n You will need to adjust your conda configuration to proceed.\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % (dirname(url), join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))\n\n elif status_code == 401:\n channel = Channel(url)\n if channel.token:\n help_message = dals(\"\"\"\n The token '%s' given for the URL is invalid.\n\n If this token was pulled from anaconda-client, you will need to use\n anaconda-client to reauthenticate.\n\n If you supplied this token to conda directly, you will need to adjust your\n conda configuration to proceed.\n\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % (channel.token, join_url(CONDA_HOMEPAGE_URL, 'docs/config.html'))\n\n elif context.channel_alias.location in url:\n # Note, this will not trigger if the binstar configured url does\n # not match the conda configured one.\n help_message = dals(\"\"\"\n The remote server has indicated you are using invalid credentials for this channel.\n\n If the remote site is anaconda.org or follows the Anaconda Server API, you\n will need to\n (a) login to the site with `anaconda login`, or\n (b) provide conda with a valid token directly.\n\n Further configuration help can be found at <%s>.\n \"\"\") % join_url(CONDA_HOMEPAGE_URL, 'docs/config.html')\n\n else:\n help_message = dals(\"\"\"\n The credentials you have provided for this URL are invalid.\n\n You will need to modify your conda configuration to proceed.\n Use `conda config --show` to view your configuration's current state.\n Further configuration help can be found at <%s>.\n \"\"\") % join_url(CONDA_HOMEPAGE_URL, 'docs/config.html')\n\n elif status_code is not None and 500 <= status_code < 600:\n help_message = dals(\"\"\"\n An remote server error occurred when trying to retrieve this URL.\n\n A 500-type error (e.g. 500, 501, 502, 503, etc.) indicates the server failed to\n fulfill a valid request. The problem may be spurious, and will resolve itself if you\n try your request again. If the problem persists, consider notifying the maintainer\n of the remote server.\n \"\"\")\n\n else:\n help_message = dals(\"\"\"\n An HTTP error occurred when trying to retrieve this URL.\n HTTP errors are often intermittent, and a simple retry will get you on your way.\n %r\n \"\"\") % e\n\n raise CondaHTTPError(help_message,\n getattr(e.response, 'url', None),\n status_code,\n getattr(e.response, 'reason', None),\n getattr(e.response, 'elapsed', None),\n e.response)\n\n\ndef write_pickled_repodata(cache_path, repodata):\n # Don't bother to pickle empty channels\n if not repodata.get('packages'):\n return\n try:\n with open(get_pickle_path(cache_path), 'wb') as f:\n pickle.dump(repodata, f)\n except Exception as e:\n import traceback\n log.debug(\"Failed to dump pickled repodata.\\n%s\", traceback.format_exc())\n\n\ndef read_pickled_repodata(cache_path, channel_url, schannel, priority, etag, mod_stamp):\n pickle_path = get_pickle_path(cache_path)\n # Don't trust pickled data if there is no accompanying json data\n if not isfile(pickle_path) or not isfile(cache_path):\n return None\n try:\n if isfile(pickle_path):\n log.debug(\"found pickle file %s\", pickle_path)\n with open(pickle_path, 'rb') as f:\n repodata = pickle.load(f)\n except Exception as e:\n import traceback\n log.debug(\"Failed to load pickled repodata.\\n%s\", traceback.format_exc())\n rm_rf(pickle_path)\n return None\n\n def _check_pickled_valid():\n yield repodata.get('_url') == channel_url\n yield repodata.get('_schannel') == schannel\n yield repodata.get('_add_pip') == context.add_pip_as_python_dependency\n yield repodata.get('_mod') == mod_stamp\n yield repodata.get('_etag') == etag\n yield repodata.get('_pickle_version') == REPODATA_PICKLE_VERSION\n\n if not all(_check_pickled_valid()):\n return None\n\n if int(repodata['_priority']) != priority:\n log.debug(\"setting priority for %s to '%d'\", repodata.get('_url'), priority)\n repodata['_priority']._priority = priority\n\n return repodata\n\n\ndef read_local_repodata(cache_path, channel_url, schannel, priority, etag, mod_stamp):\n local_repodata = read_pickled_repodata(cache_path, channel_url, schannel, priority,\n etag, mod_stamp)\n if local_repodata:\n return local_repodata\n with open(cache_path) as f:\n try:\n local_repodata = json.load(f)\n except ValueError as e:\n # ValueError: Expecting object: line 11750 column 6 (char 303397)\n log.debug(\"Error for cache path: '%s'\\n%r\", cache_path, e)\n message = dals(\"\"\"\n An error occurred when loading cached repodata. Executing\n `conda clean --index-cache` will remove cached repodata files\n so they can be downloaded again.\n \"\"\")\n raise CondaError(message)\n else:\n process_repodata(local_repodata, channel_url, schannel, priority)\n write_pickled_repodata(cache_path, local_repodata)\n return local_repodata\n\n\ndef process_repodata(repodata, channel_url, schannel, priority):\n opackages = repodata.setdefault('packages', {})\n if not opackages:\n return repodata\n\n repodata['_add_pip'] = add_pip = context.add_pip_as_python_dependency\n repodata['_pickle_version'] = REPODATA_PICKLE_VERSION\n repodata['_priority'] = priority = Priority(priority)\n repodata['_schannel'] = schannel\n\n meta_in_common = { # just need to make this once, then apply with .update()\n 'arch': repodata.get('info', {}).get('arch'),\n 'channel': channel_url,\n 'platform': repodata.get('info', {}).get('platform'),\n 'priority': priority,\n 'schannel': schannel,\n }\n packages = {}\n for fn, info in iteritems(opackages):\n info['fn'] = fn\n info['url'] = join_url(channel_url, fn)\n if add_pip and info['name'] == 'python' and info['version'].startswith(('2.', '3.')):\n info['depends'].append('pip')\n info.update(meta_in_common)\n rec = IndexRecord(**info)\n packages[Dist(rec)] = rec\n repodata['packages'] = packages\n\n\n@dotlog_on_return(\"fetching repodata:\")\ndef fetch_repodata(url, schannel, priority,\n cache_dir=None, use_cache=False, session=None):\n cache_path = join(cache_dir or create_cache_dir(), cache_fn_url(url))\n\n try:\n mtime = getmtime(cache_path)\n except (IOError, OSError):\n log.debug(\"No local cache found for %s at %s\", url, cache_path)\n if use_cache:\n return {'packages': {}}\n else:\n mod_etag_headers = {}\n else:\n mod_etag_headers = read_mod_and_etag(cache_path)\n\n if context.local_repodata_ttl > 1:\n max_age = context.local_repodata_ttl\n elif context.local_repodata_ttl == 1:\n max_age = get_cache_control_max_age(mod_etag_headers.get('_cache_control', ''))\n else:\n max_age = 0\n\n timeout = mtime + max_age - time()\n if (timeout > 0 or context.offline) and not url.startswith('file://'):\n log.debug(\"Using cached repodata for %s at %s. Timeout in %d sec\",\n url, cache_path, timeout)\n return read_local_repodata(cache_path, url, schannel, priority,\n mod_etag_headers.get('_etag'), mod_etag_headers.get('_mod'))\n\n log.debug(\"Locally invalidating cached repodata for %s at %s\", url, cache_path)\n\n try:\n assert url is not None, url\n repodata = fetch_repodata_remote_request(session, url,\n mod_etag_headers.get('_etag'),\n mod_etag_headers.get('_mod'))\n except Response304ContentUnchanged:\n log.debug(\"304 NOT MODIFIED for '%s'. Updating mtime and loading from disk\", url)\n touch(cache_path)\n return read_local_repodata(cache_path, url, schannel, priority,\n mod_etag_headers.get('_etag'), mod_etag_headers.get('_mod'))\n if repodata is None:\n return None\n\n with open(cache_path, 'w') as fo:\n json.dump(repodata, fo, indent=2, sort_keys=True, cls=EntityEncoder)\n\n process_repodata(repodata, url, schannel, priority)\n write_pickled_repodata(cache_path, repodata)\n return repodata\n\n\ndef _collect_repodatas_serial(use_cache, tasks):\n # type: (bool, List[str]) -> List[Sequence[str, Option[Dict[Dist, IndexRecord]]]]\n session = CondaSession()\n repodatas = [(url, fetch_repodata(url, schan, pri,\n use_cache=use_cache,\n session=session))\n for url, schan, pri in tasks]\n return repodatas\n\n\ndef _collect_repodatas_concurrent(executor, use_cache, tasks):\n futures = tuple(executor.submit(fetch_repodata, url, schan, pri,\n use_cache=use_cache,\n session=CondaSession())\n for url, schan, pri in tasks)\n repodatas = [(t[0], f.result()) for t, f in zip(tasks, futures)]\n return repodatas\n\n\ndef _collect_repodatas(use_cache, tasks):\n repodatas = executor = None\n if context.concurrent:\n try:\n import concurrent.futures\n executor = concurrent.futures.ThreadPoolExecutor(10)\n repodatas = _collect_repodatas_concurrent(executor, use_cache, tasks)\n except (ImportError, RuntimeError) as e:\n # concurrent.futures is only available in Python >= 3.2 or if futures is installed\n # RuntimeError is thrown if number of threads are limited by OS\n log.debug(repr(e))\n if executor:\n executor.shutdown(wait=True)\n if repodatas is None:\n repodatas = _collect_repodatas_serial(use_cache, tasks)\n return repodatas\n\n\ndef fetch_index(channel_urls, use_cache=False, index=None):\n # type: (prioritize_channels(), bool, bool, Dict[Dist, IndexRecord]) -> Dict[Dist, IndexRecord]\n log.debug('channel_urls=' + repr(channel_urls))\n if not context.json:\n stdoutlog.info(\"Fetching package metadata ...\")\n\n tasks = [(url,) + cdata for url, cdata in iteritems(channel_urls)]\n repodatas = _collect_repodatas(use_cache, tasks)\n # type: List[Sequence[str, Option[Dict[Dist, IndexRecord]]]]\n # this is sorta a lie; actually more primitve types\n\n if index is None:\n index = {}\n for _, repodata in repodatas:\n if repodata:\n index.update(repodata.get('packages', {}))\n\n if not context.json:\n stdoutlog.info('\\n')\n return index\n\n\ndef cache_fn_url(url):\n # url must be right-padded with '/' to not invalidate any existing caches\n if not url.endswith('/'):\n url += '/'\n md5 = hashlib.md5(ensure_binary(url)).hexdigest()\n return '%s.json' % (md5[:8],)\n\n\ndef get_pickle_path(cache_path):\n cache_dir, cache_base = path_split(cache_path)\n return join(cache_dir, cache_base.replace('.json', '.q'))\n\n\ndef add_http_value_to_dict(resp, http_key, d, dict_key):\n value = resp.headers.get(http_key)\n if value:\n d[dict_key] = value\n\n\ndef create_cache_dir():\n cache_dir = join(PackageCache.first_writable(context.pkgs_dirs).pkgs_dir, 'cache')\n try:\n makedirs(cache_dir)\n except OSError:\n pass\n return cache_dir\n\n\ndef dist_str_in_index(index, dist_str):\n return Dist(dist_str) in index\n",
"path": "conda/core/index.py"
}
] | diff --git a/conda/core/index.py b/conda/core/index.py
index e29465fbda7..e3bda240772 100644
--- a/conda/core/index.py
+++ b/conda/core/index.py
@@ -591,8 +591,6 @@ def add_http_value_to_dict(resp, http_key, d, dict_key):
def create_cache_dir():
- pkgs_dir = PackageCache.first_writable(context.pkgs_dirs).pkgs_dir
- assert pkgs_dir == context.pkgs_dirs[0], (pkgs_dir, context.pkgs_dirs)
cache_dir = join(PackageCache.first_writable(context.pkgs_dirs).pkgs_dir, 'cache')
try:
makedirs(cache_dir)
|
dotkom__onlineweb4-1920 | PaymentPrice description is not required by manager but required for __str__
## What kind of an issue is this?
- [x] Bug report
## What is the expected behaviour?
That you can create a PaymentPrice by following the guidelines, which are a required price and an optional description.
## What is the current behaviour?
By creating a PaymentPrice with a required price but no description the system fails with the error message
```
TypeError at /admin/payment/payment/add/
unsupported operand type(s) for +: 'NoneType' and 'str'
```
## How do you reproduce this problem?
* Create an event with an attached attendance
* Navigate to /admin/payment/payment/add/
* Add a payment to the attendance event you created, without a description (but with a price)
* The system crashes with the error message `unsupported operand type(s) for +: 'NoneType' and 'str'`
| [
{
"content": "# -*- coding: utf-8 -*-\n\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom rest_framework.exceptions import NotAcceptable\n\nfrom apps.events.models import AttendanceEvent, Attendee\nfrom apps.marks.models import Suspension\n\nUser = settings.AUTH_USER_MODEL\n\n\nclass Payment(models.Model):\n\n TYPE_CHOICES = (\n (1, _('Umiddelbar')),\n (2, _('Frist')),\n (3, _('Utsettelse')),\n )\n\n # Make sure these exist in settings if they are to be used.\n STRIPE_KEY_CHOICES = (\n ('arrkom', 'arrkom'),\n ('prokom', 'prokom'),\n ('trikom', 'trikom'),\n ('fagkom', 'fagkom'),\n )\n\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey()\n stripe_key = models.CharField(\n _('stripe key'),\n max_length=10,\n choices=STRIPE_KEY_CHOICES,\n default=\"arrkom\"\n )\n\n payment_type = models.SmallIntegerField(_('type'), choices=TYPE_CHOICES)\n\n # Optional fields depending on payment type\n deadline = models.DateTimeField(_(\"frist\"), blank=True, null=True)\n active = models.BooleanField(default=True)\n delay = models.SmallIntegerField(_('utsettelse'), blank=True, null=True, default=2)\n\n # For logging and history\n added_date = models.DateTimeField(_(\"opprettet dato\"), auto_now=True)\n changed_date = models.DateTimeField(auto_now=True, editable=False)\n last_changed_by = models.ForeignKey(User, editable=False, null=True) # Blank and null is temperarly\n\n def paid_users(self):\n return [payment_relation.user for payment_relation in self.paymentrelation_set.filter(refunded=False)]\n\n def payment_delays(self):\n return self.paymentdelay_set.filter(active=True)\n\n def payment_delay_users(self):\n return [payment_delay.user for payment_delay in self.payment_delays()]\n\n def create_payment_delay(self, user, deadline):\n payment_delays = self.paymentdelay_set.filter(payment=self, user=user)\n\n if payment_delays:\n for payment_delay in payment_delays:\n payment_delay.valid_to = deadline\n payment_delay.save()\n else:\n PaymentDelay.objects.create(payment=self, user=user, valid_to=deadline)\n\n def description(self):\n if self._is_type(AttendanceEvent):\n return self.content_object.event.title\n\n def get_receipt_description(self):\n receipt_description = \"\"\n description = [' '] * 30\n temp = self.description()[0:25]\n description[0:len(temp)+1] = list(temp)\n for c in description:\n receipt_description += c\n return receipt_description\n\n def responsible_mail(self):\n if self._is_type(AttendanceEvent):\n event_type = self.content_object.event.event_type\n if event_type == 1 or event_type == 4: # Sosialt & Utflukt\n return settings.EMAIL_ARRKOM\n elif event_type == 2: # Bedpres\n return settings.EMAIL_BEDKOM\n elif event_type == 3: # Kurs\n return settings.EMAIL_FAGKOM\n elif event_type == 5: # Ekskursjon\n return settings.EMAIL_EKSKOM\n else:\n return settings.DEFAULT_FROM_EMAIL\n else:\n return settings.DEFAULT_FROM_EMAIL\n\n def handle_payment(self, user):\n if self._is_type(AttendanceEvent):\n attendee = Attendee.objects.filter(event=self.content_object, user=user)\n\n # Delete payment delay objects for the user if there are any\n delays = PaymentDelay.objects.filter(payment=self, user=user)\n for delay in delays:\n delay.delete()\n\n # If the user is suspended because of a lack of payment the suspension is deactivated.\n suspensions = Suspension.objects.filter(payment_id=self.id, user=user)\n for suspension in suspensions:\n suspension.active = False\n suspension.save()\n\n if attendee:\n attendee[0].paid = True\n attendee[0].save()\n else:\n Attendee.objects.create(event=self.content_object, user=user, paid=True)\n\n def handle_refund(self, host, payment_relation):\n payment_relation.refunded = True\n payment_relation.save()\n\n if self._is_type(AttendanceEvent):\n self.content_object.notify_waiting_list(\n host=host, unattended_user=payment_relation.user)\n Attendee.objects.get(event=self.content_object,\n user=payment_relation.user).delete()\n\n def check_refund(self, payment_relation):\n if self._is_type(AttendanceEvent):\n attendance_event = self.content_object\n if attendance_event.unattend_deadline < timezone.now():\n return False, _(\"Fristen for og melde seg av har utgått\")\n if len(Attendee.objects.filter(event=attendance_event, user=payment_relation.user)) == 0:\n return False, _(\"Du er ikke påmeldt dette arrangementet.\")\n if attendance_event.event.event_start < timezone.now():\n return False, _(\"Dette arrangementet har allerede startet.\")\n\n return True, ''\n\n return False, 'Refund checks not implemented'\n\n def prices(self):\n return self.paymentprice_set.all()\n\n def price(self):\n # TODO implement group based pricing\n if self.paymentprice_set.count() > 0:\n return self.paymentprice_set.all()[0]\n return None\n\n def _is_type(self, model_type):\n return ContentType.objects.get_for_model(model_type) == self.content_type\n\n def __str__(self):\n return self.description()\n\n class Meta(object):\n unique_together = ('content_type', 'object_id')\n\n verbose_name = _(\"betaling\")\n verbose_name_plural = _(\"betalinger\")\n\n\nclass PaymentPrice(models.Model):\n payment = models.ForeignKey(Payment)\n price = models.IntegerField(_(\"pris\"))\n description = models.CharField(max_length=128, null=True, blank=True)\n\n def __str__(self):\n return self.description + \" (\" + str(self.price) + \"kr)\"\n\n class Meta(object):\n verbose_name = _(\"pris\")\n verbose_name_plural = _(\"priser\")\n\n\nclass PaymentRelation(models.Model):\n payment = models.ForeignKey(Payment)\n payment_price = models.ForeignKey(PaymentPrice)\n user = models.ForeignKey(User)\n datetime = models.DateTimeField(auto_now=True)\n refunded = models.BooleanField(default=False)\n\n unique_id = models.CharField(max_length=128, null=True, blank=True)\n stripe_id = models.CharField(max_length=128)\n\n def save(self, *args, **kwargs):\n if not self.unique_id:\n self.unique_id = str(uuid.uuid4())\n super(PaymentRelation, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.payment.description() + \" - \" + str(self.user)\n\n class Meta(object):\n verbose_name = _(\"betalingsrelasjon\")\n verbose_name_plural = _(\"betalingsrelasjoner\")\n\n\nclass PaymentDelay(models.Model):\n payment = models.ForeignKey(Payment)\n user = models.ForeignKey(User)\n valid_to = models.DateTimeField()\n\n active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.payment.description() + \" - \" + str(self.user)\n\n class Meta(object):\n unique_together = ('payment', 'user')\n\n verbose_name = _('betalingsutsettelse')\n verbose_name_plural = _('betalingsutsettelser')\n\n\nclass PaymentTransaction(models.Model):\n user = models.ForeignKey(User)\n amount = models.IntegerField(null=True, blank=True)\n used_stripe = models.BooleanField(default=False)\n\n datetime = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.user) + \" - \" + str(self.amount) + \"(\" + str(self.datetime) + \")\"\n\n def save(self, *args, **kwargs):\n if not self.pk:\n self.user.saldo = self.user.saldo + self.amount\n\n if self.user.saldo < 0:\n raise NotAcceptable(\"Insufficient funds\")\n\n self.user.save()\n super(PaymentTransaction, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ['-datetime']\n verbose_name = _('transaksjon')\n verbose_name_plural = _('transaksjoner')\n",
"path": "apps/payment/models.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom rest_framework.exceptions import NotAcceptable\n\nfrom apps.events.models import AttendanceEvent, Attendee\nfrom apps.marks.models import Suspension\n\nUser = settings.AUTH_USER_MODEL\n\n\nclass Payment(models.Model):\n\n TYPE_CHOICES = (\n (1, _('Umiddelbar')),\n (2, _('Frist')),\n (3, _('Utsettelse')),\n )\n\n # Make sure these exist in settings if they are to be used.\n STRIPE_KEY_CHOICES = (\n ('arrkom', 'arrkom'),\n ('prokom', 'prokom'),\n ('trikom', 'trikom'),\n ('fagkom', 'fagkom'),\n )\n\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey()\n stripe_key = models.CharField(\n _('stripe key'),\n max_length=10,\n choices=STRIPE_KEY_CHOICES,\n default=\"arrkom\"\n )\n\n payment_type = models.SmallIntegerField(_('type'), choices=TYPE_CHOICES)\n\n # Optional fields depending on payment type\n deadline = models.DateTimeField(_(\"frist\"), blank=True, null=True)\n active = models.BooleanField(default=True)\n delay = models.SmallIntegerField(_('utsettelse'), blank=True, null=True, default=2)\n\n # For logging and history\n added_date = models.DateTimeField(_(\"opprettet dato\"), auto_now=True)\n changed_date = models.DateTimeField(auto_now=True, editable=False)\n last_changed_by = models.ForeignKey(User, editable=False, null=True) # Blank and null is temperarly\n\n def paid_users(self):\n return [payment_relation.user for payment_relation in self.paymentrelation_set.filter(refunded=False)]\n\n def payment_delays(self):\n return self.paymentdelay_set.filter(active=True)\n\n def payment_delay_users(self):\n return [payment_delay.user for payment_delay in self.payment_delays()]\n\n def create_payment_delay(self, user, deadline):\n payment_delays = self.paymentdelay_set.filter(payment=self, user=user)\n\n if payment_delays:\n for payment_delay in payment_delays:\n payment_delay.valid_to = deadline\n payment_delay.save()\n else:\n PaymentDelay.objects.create(payment=self, user=user, valid_to=deadline)\n\n def description(self):\n if self._is_type(AttendanceEvent):\n return self.content_object.event.title\n\n def get_receipt_description(self):\n receipt_description = \"\"\n description = [' '] * 30\n temp = self.description()[0:25]\n description[0:len(temp)+1] = list(temp)\n for c in description:\n receipt_description += c\n return receipt_description\n\n def responsible_mail(self):\n if self._is_type(AttendanceEvent):\n event_type = self.content_object.event.event_type\n if event_type == 1 or event_type == 4: # Sosialt & Utflukt\n return settings.EMAIL_ARRKOM\n elif event_type == 2: # Bedpres\n return settings.EMAIL_BEDKOM\n elif event_type == 3: # Kurs\n return settings.EMAIL_FAGKOM\n elif event_type == 5: # Ekskursjon\n return settings.EMAIL_EKSKOM\n else:\n return settings.DEFAULT_FROM_EMAIL\n else:\n return settings.DEFAULT_FROM_EMAIL\n\n def handle_payment(self, user):\n if self._is_type(AttendanceEvent):\n attendee = Attendee.objects.filter(event=self.content_object, user=user)\n\n # Delete payment delay objects for the user if there are any\n delays = PaymentDelay.objects.filter(payment=self, user=user)\n for delay in delays:\n delay.delete()\n\n # If the user is suspended because of a lack of payment the suspension is deactivated.\n suspensions = Suspension.objects.filter(payment_id=self.id, user=user)\n for suspension in suspensions:\n suspension.active = False\n suspension.save()\n\n if attendee:\n attendee[0].paid = True\n attendee[0].save()\n else:\n Attendee.objects.create(event=self.content_object, user=user, paid=True)\n\n def handle_refund(self, host, payment_relation):\n payment_relation.refunded = True\n payment_relation.save()\n\n if self._is_type(AttendanceEvent):\n self.content_object.notify_waiting_list(\n host=host, unattended_user=payment_relation.user)\n Attendee.objects.get(event=self.content_object,\n user=payment_relation.user).delete()\n\n def check_refund(self, payment_relation):\n if self._is_type(AttendanceEvent):\n attendance_event = self.content_object\n if attendance_event.unattend_deadline < timezone.now():\n return False, _(\"Fristen for og melde seg av har utgått\")\n if len(Attendee.objects.filter(event=attendance_event, user=payment_relation.user)) == 0:\n return False, _(\"Du er ikke påmeldt dette arrangementet.\")\n if attendance_event.event.event_start < timezone.now():\n return False, _(\"Dette arrangementet har allerede startet.\")\n\n return True, ''\n\n return False, 'Refund checks not implemented'\n\n def prices(self):\n return self.paymentprice_set.all()\n\n def price(self):\n # TODO implement group based pricing\n if self.paymentprice_set.count() > 0:\n return self.paymentprice_set.all()[0]\n return None\n\n def _is_type(self, model_type):\n return ContentType.objects.get_for_model(model_type) == self.content_type\n\n def __str__(self):\n return self.description()\n\n class Meta(object):\n unique_together = ('content_type', 'object_id')\n\n verbose_name = _(\"betaling\")\n verbose_name_plural = _(\"betalinger\")\n\n\nclass PaymentPrice(models.Model):\n payment = models.ForeignKey(Payment)\n price = models.IntegerField(_(\"pris\"))\n description = models.CharField(max_length=128, null=True, blank=True)\n\n def __str__(self):\n if not self.description:\n return str(self.price) + \"kr\"\n return self.description + \" (\" + str(self.price) + \"kr)\"\n\n class Meta(object):\n verbose_name = _(\"pris\")\n verbose_name_plural = _(\"priser\")\n\n\nclass PaymentRelation(models.Model):\n payment = models.ForeignKey(Payment)\n payment_price = models.ForeignKey(PaymentPrice)\n user = models.ForeignKey(User)\n datetime = models.DateTimeField(auto_now=True)\n refunded = models.BooleanField(default=False)\n\n unique_id = models.CharField(max_length=128, null=True, blank=True)\n stripe_id = models.CharField(max_length=128)\n\n def save(self, *args, **kwargs):\n if not self.unique_id:\n self.unique_id = str(uuid.uuid4())\n super(PaymentRelation, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.payment.description() + \" - \" + str(self.user)\n\n class Meta(object):\n verbose_name = _(\"betalingsrelasjon\")\n verbose_name_plural = _(\"betalingsrelasjoner\")\n\n\nclass PaymentDelay(models.Model):\n payment = models.ForeignKey(Payment)\n user = models.ForeignKey(User)\n valid_to = models.DateTimeField()\n\n active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.payment.description() + \" - \" + str(self.user)\n\n class Meta(object):\n unique_together = ('payment', 'user')\n\n verbose_name = _('betalingsutsettelse')\n verbose_name_plural = _('betalingsutsettelser')\n\n\nclass PaymentTransaction(models.Model):\n user = models.ForeignKey(User)\n amount = models.IntegerField(null=True, blank=True)\n used_stripe = models.BooleanField(default=False)\n\n datetime = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return str(self.user) + \" - \" + str(self.amount) + \"(\" + str(self.datetime) + \")\"\n\n def save(self, *args, **kwargs):\n if not self.pk:\n self.user.saldo = self.user.saldo + self.amount\n\n if self.user.saldo < 0:\n raise NotAcceptable(\"Insufficient funds\")\n\n self.user.save()\n super(PaymentTransaction, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ['-datetime']\n verbose_name = _('transaksjon')\n verbose_name_plural = _('transaksjoner')\n",
"path": "apps/payment/models.py"
}
] | diff --git a/apps/payment/models.py b/apps/payment/models.py
index f55f8748d..12d57d3bb 100644
--- a/apps/payment/models.py
+++ b/apps/payment/models.py
@@ -175,6 +175,8 @@ class PaymentPrice(models.Model):
description = models.CharField(max_length=128, null=True, blank=True)
def __str__(self):
+ if not self.description:
+ return str(self.price) + "kr"
return self.description + " (" + str(self.price) + "kr)"
class Meta(object):
|
googleapis__python-bigquery-1567 | Warning on OpenTelemetry when some attributes are not set
#### Environment details
- OS type and version: Windows 10 21H2
- Python version: `3.9.4`
- pip version: `22.2.2`
- `google-cloud-bigquery` version: `3.3.5`
#### Steps to reproduce
1. Set up a query job with opentelemetry enabled
2. See warning in the console: `Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types`
#### Code example
```python
import logging
from google.cloud import bigquery
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
SimpleSpanProcessor,
ConsoleSpanExporter,
)
provider = TracerProvider()
simple_processor = SimpleSpanProcessor(ConsoleSpanExporter())
provider.add_span_processor(simple_processor)
trace.set_tracer_provider(provider)
logging.basicConfig(level=10)
# Construct a BigQuery client object.
client = bigquery.Client()
query = "SELECT 1;"
query_job = client.query(query)
```
#### Stack trace
```
DEBUG:google.auth._default:Checking None for explicit credentials as part of auth process...
DEBUG:google.auth._default:Checking Cloud SDK credentials as part of auth process...
DEBUG:google.auth._default:Checking None for explicit credentials as part of auth process...
DEBUG:google.auth._default:Checking Cloud SDK credentials as part of auth process...
WARNING:opentelemetry.attributes:Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types
WARNING:opentelemetry.attributes:Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types
DEBUG:urllib3.util.retry:Converted retries value: 3 -> Retry(total=3, connect=None, read=None, redirect=None, status=None)
DEBUG:google.auth.transport.requests:Making request: POST https://oauth2.googleapis.com/token
DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): oauth2.googleapis.com:443
DEBUG:urllib3.connectionpool:https://oauth2.googleapis.com:443 "POST /token HTTP/1.1" 200 None
DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): bigquery.googleapis.com:443
DEBUG:urllib3.connectionpool:https://bigquery.googleapis.com:443 "POST /bigquery/v2/projects/my-project/jobs?prettyPrint=false HTTP/1.1" 200 None
{
"name": "BigQuery.job.begin",
"context": {
"trace_id": "0x192a0e4ec554c63f68525922208fed88",
"span_id": "0xfa29f0363122c4c4",
"trace_state": "[]"
},
"kind": "SpanKind.INTERNAL",
"parent_id": null,
"start_time": "2022-10-12T09:41:57.259114Z",
"end_time": "2022-10-12T09:41:57.934410Z",
"status": {
"status_code": "UNSET"
},
"attributes": {
"db.system": "BigQuery",
"db.name": "my-project",
"job_id": "fc1581e3-708b-4b51-9a05-e3ad52c68dec",
"hasErrors": false,
"num_child_jobs": 0,
"path": "/projects/my-project/jobs"
},
"events": [],
"links": [],
"resource": {
"attributes": {
"telemetry.sdk.language": "python",
"telemetry.sdk.name": "opentelemetry",
"telemetry.sdk.version": "1.13.0",
"service.name": "unknown_service"
},
"schema_url": ""
}
}
```
#### Analysis
Warnings appear when `location` and job `state` attributes are not set.
| [
{
"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n",
"path": "google/cloud/bigquery/opentelemetry_tracing.py"
}
] | [
{
"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n\n filtered = {k: v for k, v in final_attributes.items() if v is not None}\n final_attributes.clear()\n final_attributes.update(filtered)\n\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n",
"path": "google/cloud/bigquery/opentelemetry_tracing.py"
}
] | diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py
index 3d0a66ba8..0e1187c6b 100644
--- a/google/cloud/bigquery/opentelemetry_tracing.py
+++ b/google/cloud/bigquery/opentelemetry_tracing.py
@@ -97,6 +97,11 @@ def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
final_attributes.update(job_attributes)
if attributes:
final_attributes.update(attributes)
+
+ filtered = {k: v for k, v in final_attributes.items() if v is not None}
+ final_attributes.clear()
+ final_attributes.update(filtered)
+
return final_attributes
|
facebookresearch__hydra-1281 | Release new version of Hydra
# 🚀 Feature Request
I would like you to release Hydra that includes this PR: https://github.com/facebookresearch/hydra/pull/1197
## Motivation
currently I am using python 3.9 and I can't run Hydra due to a bug that is solved in above PR
| [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# Source of truth for Hydra's version\n__version__ = \"1.0.4\"\nfrom hydra import utils\nfrom hydra.errors import MissingConfigException\nfrom hydra.main import main\nfrom hydra.types import TaskFunction\n\n__all__ = [\"__version__\", \"MissingConfigException\", \"main\", \"utils\", \"TaskFunction\"]\n",
"path": "hydra/__init__.py"
}
] | [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# Source of truth for Hydra's version\n__version__ = \"1.0.5\"\nfrom hydra import utils\nfrom hydra.errors import MissingConfigException\nfrom hydra.main import main\nfrom hydra.types import TaskFunction\n\n__all__ = [\"__version__\", \"MissingConfigException\", \"main\", \"utils\", \"TaskFunction\"]\n",
"path": "hydra/__init__.py"
}
] | diff --git a/NEWS.md b/NEWS.md
index fb432038040..a6dcdc8a4f1 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,3 +1,28 @@
+1.0.5 (2021-01-07)
+==================
+
+### Features
+
+- Support Python 3.9 . ([#1062](https://github.com/facebookresearch/hydra/issues/1062))
+
+### API Change (Renames, deprecations and removals)
+
+- Deprecate support for renaming packages via the command line. ([#1140](https://github.com/facebookresearch/hydra/issues/1140))
+
+### Bug Fixes
+
+- Fixed hydra.job.id and hydra.job.num not getting passed to jobs in multirun ([#1270](https://github.com/facebookresearch/hydra/issues/1270))
+
+### Plugins
+
+- Support `additional_parameters` as an optional param in the Submitit launcher plugin.
+- Add [Optuna](https://optuna.org/) Sweeper plugin
+
+### Maintenance Changes
+
+- Limit OmegaConf depedency to 2.0 versions ([#1253](https://github.com/facebookresearch/hydra/issues/1253))
+
+
1.0.4 (2020-11-17)
==================
diff --git a/hydra/__init__.py b/hydra/__init__.py
index b30a3269ce3..9eb6250cdd0 100644
--- a/hydra/__init__.py
+++ b/hydra/__init__.py
@@ -1,7 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Source of truth for Hydra's version
-__version__ = "1.0.4"
+__version__ = "1.0.5"
from hydra import utils
from hydra.errors import MissingConfigException
from hydra.main import main
diff --git a/news/1036.plugin b/news/1036.plugin
deleted file mode 100644
index 3f013e0e57f..00000000000
--- a/news/1036.plugin
+++ /dev/null
@@ -1 +0,0 @@
-Support `additional_parameters` as an optional param in the Submitit launcher plugin.
diff --git a/news/1062.feature b/news/1062.feature
deleted file mode 100644
index 28c5553dcd4..00000000000
--- a/news/1062.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support Python 3.9 .
diff --git a/news/1132.plugin b/news/1132.plugin
deleted file mode 100644
index 21c801a9591..00000000000
--- a/news/1132.plugin
+++ /dev/null
@@ -1 +0,0 @@
-Add [Optuna](https://optuna.org/) Sweeper plugin
diff --git a/news/1140.api_change b/news/1140.api_change
deleted file mode 100644
index 3ff0aa22e14..00000000000
--- a/news/1140.api_change
+++ /dev/null
@@ -1 +0,0 @@
-Deprecate support for renaming packages via the command line.
\ No newline at end of file
diff --git a/news/1253.maintenance b/news/1253.maintenance
deleted file mode 100644
index 064164df20d..00000000000
--- a/news/1253.maintenance
+++ /dev/null
@@ -1,3 +0,0 @@
-Limit OmegaConf depedency to 2.0 versions
-
-
diff --git a/news/1270.bugfix b/news/1270.bugfix
deleted file mode 100644
index eb4023db2f7..00000000000
--- a/news/1270.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fixed hydra.job.id and hydra.job.num not getting passed to jobs in multirun
|
sopel-irc__sopel-1605 | db: create table "nicknames" fails with MySQL (VARCHAR requires length)
Testing the new database system. According to some quick google research as well as some looking at from @Exirel , "The `String` type requires a lenght in most DB".
Config:
````
db_type = mysql
db_user = sopeluser
db_pass = sopelpass
db_host = 127.0.0.1
db_port = 3306
db_name = sopel
````
Traceback:
````
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 2845, in visit_create_table
create_column, first_pk=column.primary_key and not first_pk
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 2877, in visit_create_column
text = self.get_column_specification(column, first_pk=first_pk)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/dialects/mysql/base.py", line 1490, in get_column_specification
column.type, type_expression=column
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 400, in process
return type_._compiler_dispatch(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 3342, in visit_string
return self.visit_VARCHAR(type_, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/dialects/mysql/base.py", line 1951, in visit_VARCHAR
"VARCHAR requires a length on dialect %s" % self.dialect.name
sqlalchemy.exc.CompileError: VARCHAR requires a length on dialect mysql
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/sopel/cli/run.py", line 64, in run
p = bot.Sopel(settings, daemon=daemon)
File "/usr/local/lib/python3.6/dist-packages/sopel/bot.py", line 123, in __init__
self.db = SopelDB(config)
File "/usr/local/lib/python3.6/dist-packages/sopel/db.py", line 145, in __init__
BASE.metadata.create_all(self.engine)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/schema.py", line 4287, in create_all
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 2033, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1607, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/ddl.py", line 781, in visit_metadata
_is_metadata_operation=True,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/ddl.py", line 826, in visit_table
include_foreign_key_constraints,
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py", line 1043, in _execute_ddl
else None,
File "<string>", line 1, in <lambda>
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 2857, in visit_create_table
% (table.description, column.name, ce.args[0])
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 383, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py", line 128, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 2845, in visit_create_table
create_column, first_pk=column.primary_key and not first_pk
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 2877, in visit_create_column
text = self.get_column_specification(column, first_pk=first_pk)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/dialects/mysql/base.py", line 1490, in get_column_specification
column.type, type_expression=column
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 400, in process
return type_._compiler_dispatch(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/compiler.py", line 3342, in visit_string
return self.visit_VARCHAR(type_, **kw)
File "/usr/local/lib/python3.6/dist-packages/sqlalchemy/dialects/mysql/base.py", line 1951, in visit_VARCHAR
"VARCHAR requires a length on dialect %s" % self.dialect.name
sqlalchemy.exc.CompileError: (in table 'nicknames', column 'slug'): VARCHAR requires a length on dialect mysql
````
| [
{
"content": "# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport os.path\nimport sys\n\nfrom sopel.tools import Identifier\n\nfrom sqlalchemy import create_engine, Column, ForeignKey, Integer, String\nfrom sqlalchemy.engine.url import URL\nfrom sqlalchemy.exc import OperationalError, SQLAlchemyError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = str\n\n\ndef _deserialize(value):\n if value is None:\n return None\n # sqlite likes to return ints for strings that look like ints, even though\n # the column type is string. That's how you do dynamic typing wrong.\n value = unicode(value)\n # Just in case someone's mucking with the DB in a way we can't account for,\n # ignore json parsing errors\n try:\n value = json.loads(value)\n except ValueError:\n pass\n return value\n\n\nBASE = declarative_base()\n\n\nclass NickIDs(BASE):\n \"\"\"\n NickIDs SQLAlchemy Class\n \"\"\"\n __tablename__ = 'nick_ids'\n nick_id = Column(Integer, primary_key=True)\n\n\nclass Nicknames(BASE):\n \"\"\"\n Nicknames SQLAlchemy Class\n \"\"\"\n __tablename__ = 'nicknames'\n nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)\n slug = Column(String, primary_key=True)\n canonical = Column(String)\n\n\nclass NickValues(BASE):\n \"\"\"\n NickValues SQLAlchemy Class\n \"\"\"\n __tablename__ = 'nick_values'\n nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)\n key = Column(String(255), primary_key=True)\n value = Column(String(255))\n\n\nclass ChannelValues(BASE):\n \"\"\"\n ChannelValues SQLAlchemy Class\n \"\"\"\n __tablename__ = 'channel_values'\n channel = Column(String(255), primary_key=True)\n key = Column(String(255), primary_key=True)\n value = Column(String(255))\n\n\nclass SopelDB(object):\n \"\"\"*Availability: 5.0+*\n\n This defines an interface for basic, common operations on a sqlite\n database. It simplifies those common operations, and allows direct access\n to the database, wherever the user has configured it to be.\n\n When configured with a relative filename, it is assumed to be in the same\n directory as the config.\"\"\"\n\n def __init__(self, config):\n # MySQL - mysql://username:password@localhost/db\n # SQLite - sqlite:////home/sopel/.sopel/default.db\n db_type = config.core.db_type\n\n # Handle SQLite explicitly as a default\n if db_type == 'sqlite':\n path = config.core.db_filename\n config_dir, config_file = os.path.split(config.filename)\n config_name, _ = os.path.splitext(config_file)\n if path is None:\n path = os.path.join(config_dir, config_name + '.db')\n path = os.path.expanduser(path)\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(config_dir, path))\n self.filename = path\n self.url = 'sqlite:///%s' % path\n # Otherwise, handle all other database engines\n else:\n if db_type == 'mysql':\n drivername = config.core.db_driver or 'mysql'\n elif db_type == 'postgres':\n drivername = config.core.db_driver or 'postgresql'\n elif db_type == 'oracle':\n drivername = config.core.db_driver or 'oracle'\n elif db_type == 'mssql':\n drivername = config.core.db_driver or 'mssql+pymssql'\n elif db_type == 'firebird':\n drivername = config.core.db_driver or 'firebird+fdb'\n elif db_type == 'sybase':\n drivername = config.core.db_driver or 'sybase+pysybase'\n else:\n raise Exception('Unknown db_type')\n\n db_user = config.core.db_user\n db_pass = config.core.db_pass\n db_host = config.core.db_host\n db_port = config.core.db_port # Optional\n db_name = config.core.db_name # Optional, depending on DB\n\n # Ensure we have all our variables defined\n if db_user is None or db_pass is None or db_host is None:\n raise Exception('Please make sure the following core '\n 'configuration values are defined: '\n 'db_user, db_pass, db_host')\n self.url = URL(drivername=drivername, username=db_user, password=db_pass,\n host=db_host, port=db_port, database=db_name)\n\n self.engine = create_engine(self.url)\n\n # Catch any errors connecting to database\n try:\n self.engine.connect()\n except OperationalError:\n print(\"OperationalError: Unable to connect to database.\")\n raise\n\n # Create our tables\n BASE.metadata.create_all(self.engine)\n\n self.ssession = scoped_session(sessionmaker(bind=self.engine))\n\n def connect(self):\n \"\"\"Return a raw database connection object.\"\"\"\n return self.engine.connect()\n\n def execute(self, *args, **kwargs):\n \"\"\"Execute an arbitrary SQL query against the database.\n\n Returns a cursor object, on which things like `.fetchall()` can be\n called per PEP 249.\"\"\"\n with self.connect() as conn:\n return conn.execute(*args, **kwargs)\n\n def get_uri(self):\n \"\"\"Returns a URL for the database, usable to connect with SQLAlchemy.\"\"\"\n return 'sqlite:///{}'.format(self.filename)\n\n # NICK FUNCTIONS\n\n def get_nick_id(self, nick, create=True):\n \"\"\"Return the internal identifier for a given nick.\n\n This identifier is unique to a user, and shared across all of that\n user's aliases. If create is True, a new ID will be created if one does\n not already exist\"\"\"\n session = self.ssession()\n slug = nick.lower()\n try:\n nickname = session.query(Nicknames) \\\n .filter(Nicknames.slug == slug) \\\n .one_or_none()\n\n if nickname is None:\n if not create:\n raise ValueError('No ID exists for the given nick')\n # Generate a new ID\n nick_id = NickIDs()\n session.add(nick_id)\n session.commit()\n\n # Create a new Nickname\n nickname = Nicknames(nick_id=nick_id.nick_id, slug=slug, canonical=nick)\n session.add(nickname)\n session.commit()\n return nickname.nick_id\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def alias_nick(self, nick, alias):\n \"\"\"Create an alias for a nick.\n\n Raises ValueError if the alias already exists. If nick does not already\n exist, it will be added along with the alias.\"\"\"\n nick = Identifier(nick)\n alias = Identifier(alias)\n nick_id = self.get_nick_id(nick)\n session = self.ssession()\n try:\n result = session.query(Nicknames) \\\n .filter(Nicknames.slug == alias.lower()) \\\n .filter(Nicknames.canonical == alias) \\\n .one_or_none()\n if result:\n raise ValueError('Given alias is the only entry in its group.')\n nickname = Nicknames(nick_id=nick_id, slug=alias.lower(), canonical=alias)\n session.add(nickname)\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def set_nick_value(self, nick, key, value):\n \"\"\"Sets the value for a given key to be associated with the nick.\"\"\"\n nick = Identifier(nick)\n value = json.dumps(value, ensure_ascii=False)\n nick_id = self.get_nick_id(nick)\n session = self.ssession()\n try:\n result = session.query(NickValues) \\\n .filter(NickValues.nick_id == nick_id) \\\n .filter(NickValues.key == key) \\\n .one_or_none()\n # NickValue exists, update\n if result:\n result.value = value\n session.commit()\n # DNE - Insert\n else:\n new_nickvalue = NickValues(nick_id=nick_id, key=key, value=value)\n session.add(new_nickvalue)\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def get_nick_value(self, nick, key):\n \"\"\"Retrieves the value for a given key associated with a nick.\"\"\"\n nick = Identifier(nick)\n session = self.ssession()\n try:\n result = session.query(NickValues) \\\n .filter(Nicknames.nick_id == NickValues.nick_id) \\\n .filter(Nicknames.slug == nick.lower()) \\\n .filter(NickValues.key == key) \\\n .one_or_none()\n if result is not None:\n result = result.value\n return _deserialize(result)\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def unalias_nick(self, alias):\n \"\"\"Removes an alias.\n\n Raises ValueError if there is not at least one other nick in the group.\n To delete an entire group, use `delete_group`.\n \"\"\"\n alias = Identifier(alias)\n nick_id = self.get_nick_id(alias, False)\n session = self.ssession()\n try:\n count = session.query(Nicknames) \\\n .filter(Nicknames.nick_id == nick_id) \\\n .count()\n if count <= 1:\n raise ValueError('Given alias is the only entry in its group.')\n session.query(Nicknames).filter(Nicknames.slug == alias.lower()).delete()\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def delete_nick_group(self, nick):\n \"\"\"Removes a nickname, and all associated aliases and settings.\"\"\"\n nick = Identifier(nick)\n nick_id = self.get_nick_id(nick, False)\n session = self.ssession()\n try:\n session.query(Nicknames).filter(Nicknames.nick_id == nick_id).delete()\n session.query(NickValues).filter(NickValues.nick_id == nick_id).delete()\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def merge_nick_groups(self, first_nick, second_nick):\n \"\"\"Merges the nick groups for the specified nicks.\n\n Takes two nicks, which may or may not be registered. Unregistered\n nicks will be registered. Keys which are set for only one of the given\n nicks will be preserved. Where multiple nicks have values for a given\n key, the value set for the first nick will be used.\n\n Note that merging of data only applies to the native key-value store.\n If modules define their own tables which rely on the nick table, they\n will need to have their merging done separately.\"\"\"\n first_id = self.get_nick_id(Identifier(first_nick))\n second_id = self.get_nick_id(Identifier(second_nick))\n session = self.ssession()\n try:\n # Get second_id's values\n res = session.query(NickValues).filter(NickValues.nick_id == second_id).all()\n # Update first_id with second_id values if first_id doesn't have that key\n for row in res:\n first_res = session.query(NickValues) \\\n .filter(NickValues.nick_id == first_id) \\\n .filter(NickValues.key == row.key) \\\n .one_or_none()\n if not first_res:\n self.set_nick_value(first_nick, row.key, _deserialize(row.value))\n session.query(NickValues).filter(NickValues.nick_id == second_id).delete()\n session.query(Nicknames) \\\n .filter(Nicknames.nick_id == second_id) \\\n .update({'nick_id': first_id})\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n # CHANNEL FUNCTIONS\n\n def set_channel_value(self, channel, key, value):\n \"\"\"Sets the value for a given key to be associated with the channel.\"\"\"\n channel = Identifier(channel).lower()\n value = json.dumps(value, ensure_ascii=False)\n session = self.ssession()\n try:\n result = session.query(ChannelValues) \\\n .filter(ChannelValues.channel == channel)\\\n .filter(ChannelValues.key == key) \\\n .one_or_none()\n # ChannelValue exists, update\n if result:\n result.value = value\n session.commit()\n # DNE - Insert\n else:\n new_channelvalue = ChannelValues(channel=channel, key=key, value=value)\n session.add(new_channelvalue)\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def get_channel_value(self, channel, key):\n \"\"\"Retrieves the value for a given key associated with a channel.\"\"\"\n channel = Identifier(channel).lower()\n session = self.ssession()\n try:\n result = session.query(ChannelValues) \\\n .filter(ChannelValues.channel == channel)\\\n .filter(ChannelValues.key == key) \\\n .one_or_none()\n if result is not None:\n result = result.value\n return _deserialize(result)\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n # NICK AND CHANNEL FUNCTIONS\n\n def get_nick_or_channel_value(self, name, key):\n \"\"\"Gets the value `key` associated to the nick or channel `name`.\"\"\"\n name = Identifier(name)\n if name.is_nick():\n return self.get_nick_value(name, key)\n else:\n return self.get_channel_value(name, key)\n\n def get_preferred_value(self, names, key):\n \"\"\"Gets the value for the first name which has it set.\n\n `names` is a list of channel and/or user names. Returns None if none of\n the names have the key set.\"\"\"\n for name in names:\n value = self.get_nick_or_channel_value(name, key)\n if value is not None:\n return value\n",
"path": "sopel/db.py"
}
] | [
{
"content": "# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport os.path\nimport sys\n\nfrom sopel.tools import Identifier\n\nfrom sqlalchemy import create_engine, Column, ForeignKey, Integer, String\nfrom sqlalchemy.engine.url import URL\nfrom sqlalchemy.exc import OperationalError, SQLAlchemyError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = str\n\n\ndef _deserialize(value):\n if value is None:\n return None\n # sqlite likes to return ints for strings that look like ints, even though\n # the column type is string. That's how you do dynamic typing wrong.\n value = unicode(value)\n # Just in case someone's mucking with the DB in a way we can't account for,\n # ignore json parsing errors\n try:\n value = json.loads(value)\n except ValueError:\n pass\n return value\n\n\nBASE = declarative_base()\n\n\nclass NickIDs(BASE):\n \"\"\"\n NickIDs SQLAlchemy Class\n \"\"\"\n __tablename__ = 'nick_ids'\n nick_id = Column(Integer, primary_key=True)\n\n\nclass Nicknames(BASE):\n \"\"\"\n Nicknames SQLAlchemy Class\n \"\"\"\n __tablename__ = 'nicknames'\n nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)\n slug = Column(String(255), primary_key=True)\n canonical = Column(String(255))\n\n\nclass NickValues(BASE):\n \"\"\"\n NickValues SQLAlchemy Class\n \"\"\"\n __tablename__ = 'nick_values'\n nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)\n key = Column(String(255), primary_key=True)\n value = Column(String(255))\n\n\nclass ChannelValues(BASE):\n \"\"\"\n ChannelValues SQLAlchemy Class\n \"\"\"\n __tablename__ = 'channel_values'\n channel = Column(String(255), primary_key=True)\n key = Column(String(255), primary_key=True)\n value = Column(String(255))\n\n\nclass SopelDB(object):\n \"\"\"*Availability: 5.0+*\n\n This defines an interface for basic, common operations on a sqlite\n database. It simplifies those common operations, and allows direct access\n to the database, wherever the user has configured it to be.\n\n When configured with a relative filename, it is assumed to be in the same\n directory as the config.\"\"\"\n\n def __init__(self, config):\n # MySQL - mysql://username:password@localhost/db\n # SQLite - sqlite:////home/sopel/.sopel/default.db\n db_type = config.core.db_type\n\n # Handle SQLite explicitly as a default\n if db_type == 'sqlite':\n path = config.core.db_filename\n config_dir, config_file = os.path.split(config.filename)\n config_name, _ = os.path.splitext(config_file)\n if path is None:\n path = os.path.join(config_dir, config_name + '.db')\n path = os.path.expanduser(path)\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(config_dir, path))\n self.filename = path\n self.url = 'sqlite:///%s' % path\n # Otherwise, handle all other database engines\n else:\n if db_type == 'mysql':\n drivername = config.core.db_driver or 'mysql'\n elif db_type == 'postgres':\n drivername = config.core.db_driver or 'postgresql'\n elif db_type == 'oracle':\n drivername = config.core.db_driver or 'oracle'\n elif db_type == 'mssql':\n drivername = config.core.db_driver or 'mssql+pymssql'\n elif db_type == 'firebird':\n drivername = config.core.db_driver or 'firebird+fdb'\n elif db_type == 'sybase':\n drivername = config.core.db_driver or 'sybase+pysybase'\n else:\n raise Exception('Unknown db_type')\n\n db_user = config.core.db_user\n db_pass = config.core.db_pass\n db_host = config.core.db_host\n db_port = config.core.db_port # Optional\n db_name = config.core.db_name # Optional, depending on DB\n\n # Ensure we have all our variables defined\n if db_user is None or db_pass is None or db_host is None:\n raise Exception('Please make sure the following core '\n 'configuration values are defined: '\n 'db_user, db_pass, db_host')\n self.url = URL(drivername=drivername, username=db_user, password=db_pass,\n host=db_host, port=db_port, database=db_name)\n\n self.engine = create_engine(self.url)\n\n # Catch any errors connecting to database\n try:\n self.engine.connect()\n except OperationalError:\n print(\"OperationalError: Unable to connect to database.\")\n raise\n\n # Create our tables\n BASE.metadata.create_all(self.engine)\n\n self.ssession = scoped_session(sessionmaker(bind=self.engine))\n\n def connect(self):\n \"\"\"Return a raw database connection object.\"\"\"\n return self.engine.connect()\n\n def execute(self, *args, **kwargs):\n \"\"\"Execute an arbitrary SQL query against the database.\n\n Returns a cursor object, on which things like `.fetchall()` can be\n called per PEP 249.\"\"\"\n with self.connect() as conn:\n return conn.execute(*args, **kwargs)\n\n def get_uri(self):\n \"\"\"Returns a URL for the database, usable to connect with SQLAlchemy.\"\"\"\n return 'sqlite:///{}'.format(self.filename)\n\n # NICK FUNCTIONS\n\n def get_nick_id(self, nick, create=True):\n \"\"\"Return the internal identifier for a given nick.\n\n This identifier is unique to a user, and shared across all of that\n user's aliases. If create is True, a new ID will be created if one does\n not already exist\"\"\"\n session = self.ssession()\n slug = nick.lower()\n try:\n nickname = session.query(Nicknames) \\\n .filter(Nicknames.slug == slug) \\\n .one_or_none()\n\n if nickname is None:\n if not create:\n raise ValueError('No ID exists for the given nick')\n # Generate a new ID\n nick_id = NickIDs()\n session.add(nick_id)\n session.commit()\n\n # Create a new Nickname\n nickname = Nicknames(nick_id=nick_id.nick_id, slug=slug, canonical=nick)\n session.add(nickname)\n session.commit()\n return nickname.nick_id\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def alias_nick(self, nick, alias):\n \"\"\"Create an alias for a nick.\n\n Raises ValueError if the alias already exists. If nick does not already\n exist, it will be added along with the alias.\"\"\"\n nick = Identifier(nick)\n alias = Identifier(alias)\n nick_id = self.get_nick_id(nick)\n session = self.ssession()\n try:\n result = session.query(Nicknames) \\\n .filter(Nicknames.slug == alias.lower()) \\\n .filter(Nicknames.canonical == alias) \\\n .one_or_none()\n if result:\n raise ValueError('Given alias is the only entry in its group.')\n nickname = Nicknames(nick_id=nick_id, slug=alias.lower(), canonical=alias)\n session.add(nickname)\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def set_nick_value(self, nick, key, value):\n \"\"\"Sets the value for a given key to be associated with the nick.\"\"\"\n nick = Identifier(nick)\n value = json.dumps(value, ensure_ascii=False)\n nick_id = self.get_nick_id(nick)\n session = self.ssession()\n try:\n result = session.query(NickValues) \\\n .filter(NickValues.nick_id == nick_id) \\\n .filter(NickValues.key == key) \\\n .one_or_none()\n # NickValue exists, update\n if result:\n result.value = value\n session.commit()\n # DNE - Insert\n else:\n new_nickvalue = NickValues(nick_id=nick_id, key=key, value=value)\n session.add(new_nickvalue)\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def get_nick_value(self, nick, key):\n \"\"\"Retrieves the value for a given key associated with a nick.\"\"\"\n nick = Identifier(nick)\n session = self.ssession()\n try:\n result = session.query(NickValues) \\\n .filter(Nicknames.nick_id == NickValues.nick_id) \\\n .filter(Nicknames.slug == nick.lower()) \\\n .filter(NickValues.key == key) \\\n .one_or_none()\n if result is not None:\n result = result.value\n return _deserialize(result)\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def unalias_nick(self, alias):\n \"\"\"Removes an alias.\n\n Raises ValueError if there is not at least one other nick in the group.\n To delete an entire group, use `delete_group`.\n \"\"\"\n alias = Identifier(alias)\n nick_id = self.get_nick_id(alias, False)\n session = self.ssession()\n try:\n count = session.query(Nicknames) \\\n .filter(Nicknames.nick_id == nick_id) \\\n .count()\n if count <= 1:\n raise ValueError('Given alias is the only entry in its group.')\n session.query(Nicknames).filter(Nicknames.slug == alias.lower()).delete()\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def delete_nick_group(self, nick):\n \"\"\"Removes a nickname, and all associated aliases and settings.\"\"\"\n nick = Identifier(nick)\n nick_id = self.get_nick_id(nick, False)\n session = self.ssession()\n try:\n session.query(Nicknames).filter(Nicknames.nick_id == nick_id).delete()\n session.query(NickValues).filter(NickValues.nick_id == nick_id).delete()\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def merge_nick_groups(self, first_nick, second_nick):\n \"\"\"Merges the nick groups for the specified nicks.\n\n Takes two nicks, which may or may not be registered. Unregistered\n nicks will be registered. Keys which are set for only one of the given\n nicks will be preserved. Where multiple nicks have values for a given\n key, the value set for the first nick will be used.\n\n Note that merging of data only applies to the native key-value store.\n If modules define their own tables which rely on the nick table, they\n will need to have their merging done separately.\"\"\"\n first_id = self.get_nick_id(Identifier(first_nick))\n second_id = self.get_nick_id(Identifier(second_nick))\n session = self.ssession()\n try:\n # Get second_id's values\n res = session.query(NickValues).filter(NickValues.nick_id == second_id).all()\n # Update first_id with second_id values if first_id doesn't have that key\n for row in res:\n first_res = session.query(NickValues) \\\n .filter(NickValues.nick_id == first_id) \\\n .filter(NickValues.key == row.key) \\\n .one_or_none()\n if not first_res:\n self.set_nick_value(first_nick, row.key, _deserialize(row.value))\n session.query(NickValues).filter(NickValues.nick_id == second_id).delete()\n session.query(Nicknames) \\\n .filter(Nicknames.nick_id == second_id) \\\n .update({'nick_id': first_id})\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n # CHANNEL FUNCTIONS\n\n def set_channel_value(self, channel, key, value):\n \"\"\"Sets the value for a given key to be associated with the channel.\"\"\"\n channel = Identifier(channel).lower()\n value = json.dumps(value, ensure_ascii=False)\n session = self.ssession()\n try:\n result = session.query(ChannelValues) \\\n .filter(ChannelValues.channel == channel)\\\n .filter(ChannelValues.key == key) \\\n .one_or_none()\n # ChannelValue exists, update\n if result:\n result.value = value\n session.commit()\n # DNE - Insert\n else:\n new_channelvalue = ChannelValues(channel=channel, key=key, value=value)\n session.add(new_channelvalue)\n session.commit()\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n def get_channel_value(self, channel, key):\n \"\"\"Retrieves the value for a given key associated with a channel.\"\"\"\n channel = Identifier(channel).lower()\n session = self.ssession()\n try:\n result = session.query(ChannelValues) \\\n .filter(ChannelValues.channel == channel)\\\n .filter(ChannelValues.key == key) \\\n .one_or_none()\n if result is not None:\n result = result.value\n return _deserialize(result)\n except SQLAlchemyError:\n session.rollback()\n raise\n finally:\n session.close()\n\n # NICK AND CHANNEL FUNCTIONS\n\n def get_nick_or_channel_value(self, name, key):\n \"\"\"Gets the value `key` associated to the nick or channel `name`.\"\"\"\n name = Identifier(name)\n if name.is_nick():\n return self.get_nick_value(name, key)\n else:\n return self.get_channel_value(name, key)\n\n def get_preferred_value(self, names, key):\n \"\"\"Gets the value for the first name which has it set.\n\n `names` is a list of channel and/or user names. Returns None if none of\n the names have the key set.\"\"\"\n for name in names:\n value = self.get_nick_or_channel_value(name, key)\n if value is not None:\n return value\n",
"path": "sopel/db.py"
}
] | diff --git a/sopel/db.py b/sopel/db.py
index 911d700555..6a176a3870 100644
--- a/sopel/db.py
+++ b/sopel/db.py
@@ -50,8 +50,8 @@ class Nicknames(BASE):
"""
__tablename__ = 'nicknames'
nick_id = Column(Integer, ForeignKey('nick_ids.nick_id'), primary_key=True)
- slug = Column(String, primary_key=True)
- canonical = Column(String)
+ slug = Column(String(255), primary_key=True)
+ canonical = Column(String(255))
class NickValues(BASE):
|
NVIDIA__NVFlare-1350 | Bug in prostate_2D example
https://github.com/NVIDIA/NVFlare/blob/8f8f029eeecf58a85d9633357ce1ed4f8f39f655/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py#L171
`self.transform_valid` is not defined if `cache_rate=0`.
| [
{
"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\n\nimport torch\nimport torch.optim as optim\nfrom learners.supervised_learner import SupervisedLearner\nfrom monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist\nfrom monai.inferers import SimpleInferer\nfrom monai.losses import DiceLoss\nfrom monai.metrics import DiceMetric\nfrom monai.networks.nets.unet import UNet\nfrom monai.transforms import (\n Activations,\n AsDiscrete,\n AsDiscreted,\n Compose,\n EnsureChannelFirstd,\n EnsureType,\n EnsureTyped,\n LoadImaged,\n Resized,\n ScaleIntensityRanged,\n)\nfrom utils.custom_client_datalist_json_path import custom_client_datalist_json_path\n\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.pt.pt_fedproxloss import PTFedProxLoss\n\n\nclass SupervisedMonaiProstateLearner(SupervisedLearner):\n def __init__(\n self,\n train_config_filename,\n aggregation_epochs: int = 1,\n train_task_name: str = AppConstants.TASK_TRAIN,\n ):\n \"\"\"MONAI Learner for prostate segmentation task.\n It inherits from SupervisedLearner.\n\n Args:\n train_config_filename: path for config file, this is an addition term for config loading\n aggregation_epochs: the number of training epochs for a round.\n train_task_name: name of the task to train the model.\n\n Returns:\n a Shareable with the updated local model after running `execute()`\n \"\"\"\n super().__init__(\n aggregation_epochs=aggregation_epochs,\n train_task_name=train_task_name,\n )\n self.train_config_filename = train_config_filename\n self.config_info = None\n\n def train_config(self, fl_ctx: FLContext):\n \"\"\"MONAI traning configuration\n Here, we use a json to specify the needed parameters\n \"\"\"\n\n # Load training configurations json\n engine = fl_ctx.get_engine()\n ws = engine.get_workspace()\n app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())\n train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)\n if not os.path.isfile(train_config_file_path):\n self.log_error(\n fl_ctx,\n f\"Training configuration file does not exist at {train_config_file_path}\",\n )\n with open(train_config_file_path) as file:\n self.config_info = json.load(file)\n\n # Get the config_info\n self.lr = self.config_info[\"learning_rate\"]\n self.fedproxloss_mu = self.config_info[\"fedproxloss_mu\"]\n cache_rate = self.config_info[\"cache_dataset\"]\n dataset_base_dir = self.config_info[\"dataset_base_dir\"]\n datalist_json_path = self.config_info[\"datalist_json_path\"]\n\n # Get datalist json\n datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)\n\n # Set datalist\n train_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"training\",\n base_dir=dataset_base_dir,\n )\n valid_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"validation\",\n base_dir=dataset_base_dir,\n )\n self.log_info(\n fl_ctx,\n f\"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}\",\n )\n\n # Set the training-related context\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = UNet(\n spatial_dims=2,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n ).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n self.criterion = DiceLoss(sigmoid=True)\n\n if self.fedproxloss_mu > 0:\n self.log_info(fl_ctx, f\"using FedProx loss with mu {self.fedproxloss_mu}\")\n self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)\n\n self.transform = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n ScaleIntensityRanged(keys=[\"image\", \"label\"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),\n Resized(\n keys=[\"image\", \"label\"],\n spatial_size=(256, 256),\n mode=(\"bilinear\"),\n align_corners=True,\n ),\n AsDiscreted(keys=[\"label\"], threshold=0.5),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n )\n self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])\n\n # Set dataset\n if cache_rate > 0.0:\n self.train_dataset = CacheDataset(\n data=train_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n self.valid_dataset = CacheDataset(\n data=valid_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n else:\n self.train_dataset = Dataset(\n data=train_list,\n transform=self.transform,\n )\n self.valid_dataset = Dataset(\n data=valid_list,\n transform=self.transform_valid,\n )\n\n self.train_loader = DataLoader(\n self.train_dataset,\n batch_size=1,\n shuffle=True,\n num_workers=2,\n )\n self.valid_loader = DataLoader(\n self.valid_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=2,\n )\n\n # Set inferer and evaluation metric\n self.inferer = SimpleInferer()\n self.valid_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n",
"path": "examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py"
}
] | [
{
"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\n\nimport torch\nimport torch.optim as optim\nfrom learners.supervised_learner import SupervisedLearner\nfrom monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist\nfrom monai.inferers import SimpleInferer\nfrom monai.losses import DiceLoss\nfrom monai.metrics import DiceMetric\nfrom monai.networks.nets.unet import UNet\nfrom monai.transforms import (\n Activations,\n AsDiscrete,\n AsDiscreted,\n Compose,\n EnsureChannelFirstd,\n EnsureType,\n EnsureTyped,\n LoadImaged,\n Resized,\n ScaleIntensityRanged,\n)\nfrom utils.custom_client_datalist_json_path import custom_client_datalist_json_path\n\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.pt.pt_fedproxloss import PTFedProxLoss\n\n\nclass SupervisedMonaiProstateLearner(SupervisedLearner):\n def __init__(\n self,\n train_config_filename,\n aggregation_epochs: int = 1,\n train_task_name: str = AppConstants.TASK_TRAIN,\n ):\n \"\"\"MONAI Learner for prostate segmentation task.\n It inherits from SupervisedLearner.\n\n Args:\n train_config_filename: path for config file, this is an addition term for config loading\n aggregation_epochs: the number of training epochs for a round.\n train_task_name: name of the task to train the model.\n\n Returns:\n a Shareable with the updated local model after running `execute()`\n \"\"\"\n super().__init__(\n aggregation_epochs=aggregation_epochs,\n train_task_name=train_task_name,\n )\n self.train_config_filename = train_config_filename\n self.config_info = None\n\n def train_config(self, fl_ctx: FLContext):\n \"\"\"MONAI traning configuration\n Here, we use a json to specify the needed parameters\n \"\"\"\n\n # Load training configurations json\n engine = fl_ctx.get_engine()\n ws = engine.get_workspace()\n app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())\n train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)\n if not os.path.isfile(train_config_file_path):\n self.log_error(\n fl_ctx,\n f\"Training configuration file does not exist at {train_config_file_path}\",\n )\n with open(train_config_file_path) as file:\n self.config_info = json.load(file)\n\n # Get the config_info\n self.lr = self.config_info[\"learning_rate\"]\n self.fedproxloss_mu = self.config_info[\"fedproxloss_mu\"]\n cache_rate = self.config_info[\"cache_dataset\"]\n dataset_base_dir = self.config_info[\"dataset_base_dir\"]\n datalist_json_path = self.config_info[\"datalist_json_path\"]\n\n # Get datalist json\n datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)\n\n # Set datalist\n train_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"training\",\n base_dir=dataset_base_dir,\n )\n valid_list = load_decathlon_datalist(\n data_list_file_path=datalist_json_path,\n is_segmentation=True,\n data_list_key=\"validation\",\n base_dir=dataset_base_dir,\n )\n self.log_info(\n fl_ctx,\n f\"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}\",\n )\n\n # Set the training-related context\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = UNet(\n spatial_dims=2,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n ).to(self.device)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n self.criterion = DiceLoss(sigmoid=True)\n\n if self.fedproxloss_mu > 0:\n self.log_info(fl_ctx, f\"using FedProx loss with mu {self.fedproxloss_mu}\")\n self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)\n\n self.transform = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n ScaleIntensityRanged(keys=[\"image\", \"label\"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),\n Resized(\n keys=[\"image\", \"label\"],\n spatial_size=(256, 256),\n mode=(\"bilinear\"),\n align_corners=True,\n ),\n AsDiscreted(keys=[\"label\"], threshold=0.5),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n )\n self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])\n\n # Set dataset\n if cache_rate > 0.0:\n self.train_dataset = CacheDataset(\n data=train_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n self.valid_dataset = CacheDataset(\n data=valid_list,\n transform=self.transform,\n cache_rate=cache_rate,\n num_workers=4,\n )\n else:\n self.train_dataset = Dataset(\n data=train_list,\n transform=self.transform,\n )\n self.valid_dataset = Dataset(\n data=valid_list,\n transform=self.transform,\n )\n\n self.train_loader = DataLoader(\n self.train_dataset,\n batch_size=1,\n shuffle=True,\n num_workers=2,\n )\n self.valid_loader = DataLoader(\n self.valid_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=2,\n )\n\n # Set inferer and evaluation metric\n self.inferer = SimpleInferer()\n self.valid_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n",
"path": "examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py"
}
] | diff --git a/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py b/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py
index ec4ed0e95f..ca6db1876d 100644
--- a/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py
+++ b/examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py
@@ -168,7 +168,7 @@ def train_config(self, fl_ctx: FLContext):
)
self.valid_dataset = Dataset(
data=valid_list,
- transform=self.transform_valid,
+ transform=self.transform,
)
self.train_loader = DataLoader(
|
coala__coala-3348 | Wrong doc string syntax in coalib.bearlib.aspects.Root
The doc string of the `Root` aspectclass has a formatting issue at https://github.com/coala/coala/blob/master/coalib/bearlib/aspects/__init__.py#L61
You can see the wrongly rendered result at https://api.coala.io/en/latest/coalib.bearlib.aspects.html#module-coalib.bearlib.aspects
| [
{
"content": "from .base import aspectbase\nfrom .meta import aspectclass\nfrom .taste import Taste, TasteError\n\n__all__ = ['Root', 'Taste', 'TasteError', 'aspectclass']\n\n\nclass Root(aspectbase, metaclass=aspectclass):\n \"\"\"\n The root aspectclass.\n\n Define sub-aspectclasses with class-bound ``.subaspect`` decorator.\n Definition string is taken from doc-string of decorated class.\n Remaining docs are taken from a nested ``docs`` class.\n Tastes are defined as class attributes that are instances of\n :class:`coalib.bearlib.aspectclasses.Taste`.\n\n >>> @Root.subaspect\n ... class Formatting:\n ... \\\"\"\"\n ... A parent aspect for code formatting aspects...\n ... \\\"\"\"\n\n We can now create subaspects like this:\n\n >>> @Formatting.subaspect\n ... class LineLength:\n ... \\\"\"\"\n ... This aspect controls the length of a line...\n ... \\\"\"\"\n ... class docs:\n ... example = \"...\"\n ... example_language = \"...\"\n ... importance_reason = \"...\"\n ... fix_suggestions = \"...\"\n ...\n ... max_line_length = Taste[int](\n ... \"Maximum length allowed for a line.\",\n ... (80, 90, 120), default=80)\n\n The representation will show the full \"path\" to the leaf of the tree:\n\n >>> Root.Formatting.LineLength\n <aspectclass 'Root.Formatting.LineLength'>\n\n We can see, which settings are availables:\n\n >>> Formatting.tastes\n {}\n >>> LineLength.tastes\n {'max_line_length': <....Taste[int] object at ...>}\n\n And instantiate the aspect with the values, they will be automatically\n converted:\n\n >>> Formatting('Python')\n <coalib.bearlib.aspects.Root.Formatting object at 0x...>\n >>> LineLength('Python', max_line_length=\"100\").tastes\n {'max_line_length': 100}\n\n If no settings are given, the defaults will be taken>\n >>> LineLength('Python').tastes\n {'max_line_length': 80}\n\n Tastes can also be made available for only specific languages:\n\n >>> from coalib.bearlib.languages import Language\n >>> @Language\n ... class GreaterTrumpScript:\n ... pass\n\n >>> @Formatting.subaspect\n ... class Greatness:\n ... \\\"\"\"\n ... This aspect controls the greatness of a file...\n ... \\\"\"\"\n ...\n ... min_greatness = Taste[int](\n ... \"Minimum greatness factor needed for a TrumpScript file. \"\n ... \"This is fact.\",\n ... (1000000, 1000000000, 1000000000000), default=1000000,\n ... languages=('GreaterTrumpScript' ,))\n\n >>> Greatness.tastes\n {'min_greatness': <....Taste[int] object at ...>}\n >>> Greatness('GreaterTrumpScript').tastes\n {'min_greatness': 1000000}\n >>> Greatness('GreaterTrumpScript', min_greatness=1000000000000).tastes\n {'min_greatness': 1000000000000}\n\n >>> Greatness('Python').tastes\n {}\n\n >>> Greatness('Python', min_greatness=1000000000)\n ... # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n coalib.bearlib.aspects.taste.TasteError:\n Root.Formatting.Greatness.min_greatness is not available ...\n\n >>> Greatness('Python').min_greatness\n ... # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n coalib.bearlib.aspects.taste.TasteError:\n Root.Formatting.Greatness.min_greatness is not available ...\n \"\"\"\n parent = None\n\n _tastes = {}\n",
"path": "coalib/bearlib/aspects/__init__.py"
}
] | [
{
"content": "from .base import aspectbase\nfrom .meta import aspectclass\nfrom .taste import Taste, TasteError\n\n__all__ = ['Root', 'Taste', 'TasteError', 'aspectclass']\n\n\nclass Root(aspectbase, metaclass=aspectclass):\n \"\"\"\n The root aspectclass.\n\n Define sub-aspectclasses with class-bound ``.subaspect`` decorator.\n Definition string is taken from doc-string of decorated class.\n Remaining docs are taken from a nested ``docs`` class.\n Tastes are defined as class attributes that are instances of\n :class:`coalib.bearlib.aspectclasses.Taste`.\n\n >>> @Root.subaspect\n ... class Formatting:\n ... \\\"\"\"\n ... A parent aspect for code formatting aspects...\n ... \\\"\"\"\n\n We can now create subaspects like this:\n\n >>> @Formatting.subaspect\n ... class LineLength:\n ... \\\"\"\"\n ... This aspect controls the length of a line...\n ... \\\"\"\"\n ... class docs:\n ... example = \"...\"\n ... example_language = \"...\"\n ... importance_reason = \"...\"\n ... fix_suggestions = \"...\"\n ...\n ... max_line_length = Taste[int](\n ... \"Maximum length allowed for a line.\",\n ... (80, 90, 120), default=80)\n\n The representation will show the full \"path\" to the leaf of the tree:\n\n >>> Root.Formatting.LineLength\n <aspectclass 'Root.Formatting.LineLength'>\n\n We can see, which settings are availables:\n\n >>> Formatting.tastes\n {}\n >>> LineLength.tastes\n {'max_line_length': <....Taste[int] object at ...>}\n\n And instantiate the aspect with the values, they will be automatically\n converted:\n\n >>> Formatting('Python')\n <coalib.bearlib.aspects.Root.Formatting object at 0x...>\n >>> LineLength('Python', max_line_length=\"100\").tastes\n {'max_line_length': 100}\n\n If no settings are given, the defaults will be taken:\n\n >>> LineLength('Python').tastes\n {'max_line_length': 80}\n\n Tastes can also be made available for only specific languages:\n\n >>> from coalib.bearlib.languages import Language\n >>> @Language\n ... class GreaterTrumpScript:\n ... pass\n\n >>> @Formatting.subaspect\n ... class Greatness:\n ... \\\"\"\"\n ... This aspect controls the greatness of a file...\n ... \\\"\"\"\n ...\n ... min_greatness = Taste[int](\n ... \"Minimum greatness factor needed for a TrumpScript file. \"\n ... \"This is fact.\",\n ... (1000000, 1000000000, 1000000000000), default=1000000,\n ... languages=('GreaterTrumpScript' ,))\n\n >>> Greatness.tastes\n {'min_greatness': <....Taste[int] object at ...>}\n >>> Greatness('GreaterTrumpScript').tastes\n {'min_greatness': 1000000}\n >>> Greatness('GreaterTrumpScript', min_greatness=1000000000000).tastes\n {'min_greatness': 1000000000000}\n\n >>> Greatness('Python').tastes\n {}\n\n >>> Greatness('Python', min_greatness=1000000000)\n ... # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n coalib.bearlib.aspects.taste.TasteError:\n Root.Formatting.Greatness.min_greatness is not available ...\n\n >>> Greatness('Python').min_greatness\n ... # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n coalib.bearlib.aspects.taste.TasteError:\n Root.Formatting.Greatness.min_greatness is not available ...\n \"\"\"\n parent = None\n\n _tastes = {}\n",
"path": "coalib/bearlib/aspects/__init__.py"
}
] | diff --git a/coalib/bearlib/aspects/__init__.py b/coalib/bearlib/aspects/__init__.py
index 268bb70319..8322b5fb50 100644
--- a/coalib/bearlib/aspects/__init__.py
+++ b/coalib/bearlib/aspects/__init__.py
@@ -58,7 +58,8 @@ class Root(aspectbase, metaclass=aspectclass):
>>> LineLength('Python', max_line_length="100").tastes
{'max_line_length': 100}
- If no settings are given, the defaults will be taken>
+ If no settings are given, the defaults will be taken:
+
>>> LineLength('Python').tastes
{'max_line_length': 80}
|
searx__searx-200 | bing_news can't parse other languages date
When searching for french article, the time is noted as "Il y a 5 minutes", and so, doesn't match for the regex `"^[0-9]+ minute(s|) ago$"`.
Do you see a way to internationalize this detection ?
| [
{
"content": "## Bing (News)\n#\n# @website https://www.bing.com/news\n# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),\n# max. 5000 query/month\n#\n# @using-api no (because of query limit)\n# @results HTML (using search portal)\n# @stable no (HTML can change)\n# @parse url, title, content, publishedDate\n\nfrom urllib import urlencode\nfrom cgi import escape\nfrom lxml import html\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\nimport re\n\n# engine dependent config\ncategories = ['news']\npaging = True\nlanguage_support = True\n\n# search-url\nbase_url = 'https://www.bing.com/'\nsearch_string = 'news/search?{query}&first={offset}'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n if params['language'] == 'all':\n language = 'en-US'\n else:\n language = params['language'].replace('_', '-')\n\n search_path = search_string.format(\n query=urlencode({'q': query, 'setmkt': language}),\n offset=offset)\n\n params['cookies']['SRCHHPGUSR'] = \\\n 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]\n\n params['url'] = base_url + search_path\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.content)\n\n # parse results\n for result in dom.xpath('//div[@class=\"sn_r\"]'):\n link = result.xpath('.//div[@class=\"newstitle\"]/a')[0]\n url = link.attrib.get('href')\n title = ' '.join(link.xpath('.//text()'))\n contentXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[@class=\"sn_snip\"]//text()')\n if contentXPath is not None:\n content = escape(' '.join(contentXPath))\n\n # parse publishedDate\n publishedDateXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[contains(@class,\"sn_ST\")]'\n '//span[contains(@class,\"sn_tm\")]'\n '//text()')\n if publishedDateXPath is not None:\n publishedDate = escape(' '.join(publishedDateXPath))\n\n if re.match(\"^[0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(minutes=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|),\"\n \" [0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\\\n - timedelta(minutes=int(timeNumbers[1]))\n elif re.match(\"^[0-9]+ day(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(days=int(timeNumbers[0]))\n else:\n try:\n # FIXME use params['language'] to parse either mm/dd or dd/mm\n publishedDate = parser.parse(publishedDate, dayfirst=False)\n except TypeError:\n # FIXME\n publishedDate = datetime.now()\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'publishedDate': publishedDate,\n 'content': content})\n\n # return results\n return results\n",
"path": "searx/engines/bing_news.py"
}
] | [
{
"content": "## Bing (News)\n#\n# @website https://www.bing.com/news\n# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),\n# max. 5000 query/month\n#\n# @using-api no (because of query limit)\n# @results HTML (using search portal)\n# @stable no (HTML can change)\n# @parse url, title, content, publishedDate\n\nfrom urllib import urlencode\nfrom cgi import escape\nfrom lxml import html\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\nimport re\n\n# engine dependent config\ncategories = ['news']\npaging = True\nlanguage_support = True\n\n# search-url\nbase_url = 'https://www.bing.com/'\nsearch_string = 'news/search?{query}&first={offset}'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n if params['language'] == 'all':\n language = 'en-US'\n else:\n language = params['language'].replace('_', '-')\n\n search_path = search_string.format(\n query=urlencode({'q': query, 'setmkt': language}),\n offset=offset)\n\n params['cookies']['_FP'] = \"ui=en-US\"\n\n params['url'] = base_url + search_path\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.content)\n\n # parse results\n for result in dom.xpath('//div[@class=\"sn_r\"]'):\n link = result.xpath('.//div[@class=\"newstitle\"]/a')[0]\n url = link.attrib.get('href')\n title = ' '.join(link.xpath('.//text()'))\n contentXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[@class=\"sn_snip\"]//text()')\n if contentXPath is not None:\n content = escape(' '.join(contentXPath))\n\n # parse publishedDate\n publishedDateXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[contains(@class,\"sn_ST\")]'\n '//span[contains(@class,\"sn_tm\")]'\n '//text()')\n if publishedDateXPath is not None:\n publishedDate = escape(' '.join(publishedDateXPath))\n\n if re.match(\"^[0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(minutes=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|),\"\n \" [0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\\\n - timedelta(minutes=int(timeNumbers[1]))\n elif re.match(\"^[0-9]+ day(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(days=int(timeNumbers[0]))\n else:\n try:\n # FIXME use params['language'] to parse either mm/dd or dd/mm\n publishedDate = parser.parse(publishedDate, dayfirst=False)\n except TypeError:\n # FIXME\n publishedDate = datetime.now()\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'publishedDate': publishedDate,\n 'content': content})\n\n # return results\n return results\n",
"path": "searx/engines/bing_news.py"
}
] | diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 3dda04cbbc..789a23b898 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -39,8 +39,7 @@ def request(query, params):
query=urlencode({'q': query, 'setmkt': language}),
offset=offset)
- params['cookies']['SRCHHPGUSR'] = \
- 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
+ params['cookies']['_FP'] = "ui=en-US"
params['url'] = base_url + search_path
return params
|
celery__celery-2598 | CELERY_RESULT_SERIALIZER = 'json' breaks Exception marshaling
Setting `CELERY_RESULT_SERIALIZER = json` and raising an exception in the worker leads to this:
```
/path/to/lib/python2.7/site-packages/celery/result.py in get(self, timeout, propagate, interval, no_ack, follow_parents, EXCEPTION_STATES, PROPAGATE_STATES)
173 status = meta['status']
174 if status in PROPAGATE_STATES and propagate:
--> 175 raise meta['result']
176 return meta['result']
177 wait = get # deprecated alias to :meth:`get`.
TypeError: exceptions must be old-style classes or derived from BaseException, not dict
```
where the contents of `meta['result']` are (in my case):
```
{u'exc_message': u'unknown keys: nam', u'exc_type': u'ValueError'}
```
so it _looks_ like celery could convert the dict to a real exception before raising, but it does not currently. Changing back to `pickle` works as expected.
bug can be reproduced with the following:
``` python
# jsonresults.py
from celery.app.base import Celery
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'amqp'
app = Celery(config_source=__name__)
@app.task
def hello():
raise ValueError('go away')
```
worker:
```
# python -m celery --app=jsonresults:app worker
```
caller:
``` python
import jsonresults
jsonresults.hello.delay().get()
```
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n celery.backends.amqp\n ~~~~~~~~~~~~~~~~~~~~\n\n The AMQP result backend.\n\n This backend publishes results as messages.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport socket\n\nfrom collections import deque\nfrom operator import itemgetter\n\nfrom kombu import Exchange, Queue, Producer, Consumer\n\nfrom celery import states\nfrom celery.exceptions import TimeoutError\nfrom celery.five import range, monotonic\nfrom celery.utils.functional import dictfilter\nfrom celery.utils.log import get_logger\nfrom celery.utils.timeutils import maybe_s_to_ms\n\nfrom .base import BaseBackend\n\n__all__ = ['BacklogLimitExceeded', 'AMQPBackend']\n\nlogger = get_logger(__name__)\n\n\nclass BacklogLimitExceeded(Exception):\n \"\"\"Too much state history to fast-forward.\"\"\"\n\n\ndef repair_uuid(s):\n # Historically the dashes in UUIDS are removed from AMQ entity names,\n # but there is no known reason to. Hopefully we'll be able to fix\n # this in v4.0.\n return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])\n\n\nclass NoCacheQueue(Queue):\n can_cache_declaration = False\n\n\nclass AMQPBackend(BaseBackend):\n \"\"\"Publishes results by sending messages.\"\"\"\n Exchange = Exchange\n Queue = NoCacheQueue\n Consumer = Consumer\n Producer = Producer\n\n BacklogLimitExceeded = BacklogLimitExceeded\n\n persistent = True\n supports_autoexpire = True\n supports_native_join = True\n\n retry_policy = {\n 'max_retries': 20,\n 'interval_start': 0,\n 'interval_step': 1,\n 'interval_max': 1,\n }\n\n def __init__(self, app, connection=None, exchange=None, exchange_type=None,\n persistent=None, serializer=None, auto_delete=True, **kwargs):\n super(AMQPBackend, self).__init__(app, **kwargs)\n conf = self.app.conf\n self._connection = connection\n self.persistent = self.prepare_persistent(persistent)\n self.delivery_mode = 2 if self.persistent else 1\n exchange = exchange or conf.CELERY_RESULT_EXCHANGE\n exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE\n self.exchange = self._create_exchange(\n exchange, exchange_type, self.delivery_mode,\n )\n self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER\n self.auto_delete = auto_delete\n self.queue_arguments = dictfilter({\n 'x-expires': maybe_s_to_ms(self.expires),\n })\n\n def _create_exchange(self, name, type='direct', delivery_mode=2):\n return self.Exchange(name=name,\n type=type,\n delivery_mode=delivery_mode,\n durable=self.persistent,\n auto_delete=False)\n\n def _create_binding(self, task_id):\n name = self.rkey(task_id)\n return self.Queue(name=name,\n exchange=self.exchange,\n routing_key=name,\n durable=self.persistent,\n auto_delete=self.auto_delete,\n queue_arguments=self.queue_arguments)\n\n def revive(self, channel):\n pass\n\n def rkey(self, task_id):\n return task_id.replace('-', '')\n\n def destination_for(self, task_id, request):\n if request:\n return self.rkey(task_id), request.correlation_id or task_id\n return self.rkey(task_id), task_id\n\n def store_result(self, task_id, result, status,\n traceback=None, request=None, **kwargs):\n \"\"\"Send task return value and status.\"\"\"\n routing_key, correlation_id = self.destination_for(task_id, request)\n if not routing_key:\n return\n with self.app.amqp.producer_pool.acquire(block=True) as producer:\n producer.publish(\n {'task_id': task_id, 'status': status,\n 'result': self.encode_result(result, status),\n 'traceback': traceback,\n 'children': self.current_task_children(request)},\n exchange=self.exchange,\n routing_key=routing_key,\n correlation_id=correlation_id,\n serializer=self.serializer,\n retry=True, retry_policy=self.retry_policy,\n declare=self.on_reply_declare(task_id),\n delivery_mode=self.delivery_mode,\n )\n return result\n\n def on_reply_declare(self, task_id):\n return [self._create_binding(task_id)]\n\n def wait_for(self, task_id, timeout=None, cache=True,\n no_ack=True, on_interval=None,\n READY_STATES=states.READY_STATES,\n PROPAGATE_STATES=states.PROPAGATE_STATES,\n **kwargs):\n cached_meta = self._cache.get(task_id)\n if cache and cached_meta and \\\n cached_meta['status'] in READY_STATES:\n return cached_meta\n else:\n try:\n return self.consume(task_id, timeout=timeout, no_ack=no_ack,\n on_interval=on_interval)\n except socket.timeout:\n raise TimeoutError('The operation timed out.')\n\n def get_task_meta(self, task_id, backlog_limit=1000):\n # Polling and using basic_get\n with self.app.pool.acquire_channel(block=True) as (_, channel):\n binding = self._create_binding(task_id)(channel)\n binding.declare()\n\n prev = latest = acc = None\n for i in range(backlog_limit): # spool ffwd\n acc = binding.get(\n accept=self.accept, no_ack=False,\n )\n if not acc: # no more messages\n break\n if acc.payload['task_id'] == task_id:\n prev, latest = latest, acc\n if prev:\n # backends are not expected to keep history,\n # so we delete everything except the most recent state.\n prev.ack()\n prev = None\n else:\n raise self.BacklogLimitExceeded(task_id)\n\n if latest:\n payload = self._cache[task_id] = latest.payload\n latest.requeue()\n return payload\n else:\n # no new state, use previous\n try:\n return self._cache[task_id]\n except KeyError:\n # result probably pending.\n return {'status': states.PENDING, 'result': None}\n poll = get_task_meta # XXX compat\n\n def drain_events(self, connection, consumer,\n timeout=None, on_interval=None, now=monotonic, wait=None):\n wait = wait or connection.drain_events\n results = {}\n\n def callback(meta, message):\n if meta['status'] in states.READY_STATES:\n results[meta['task_id']] = meta\n\n consumer.callbacks[:] = [callback]\n time_start = now()\n\n while 1:\n # Total time spent may exceed a single call to wait()\n if timeout and now() - time_start >= timeout:\n raise socket.timeout()\n try:\n wait(timeout=1)\n except socket.timeout:\n pass\n if on_interval:\n on_interval()\n if results: # got event on the wanted channel.\n break\n self._cache.update(results)\n return results\n\n def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):\n wait = self.drain_events\n with self.app.pool.acquire_channel(block=True) as (conn, channel):\n binding = self._create_binding(task_id)\n with self.Consumer(channel, binding,\n no_ack=no_ack, accept=self.accept) as consumer:\n while 1:\n try:\n return wait(\n conn, consumer, timeout, on_interval)[task_id]\n except KeyError:\n continue\n\n def _many_bindings(self, ids):\n return [self._create_binding(task_id) for task_id in ids]\n\n def get_many(self, task_ids, timeout=None, no_ack=True, on_message=None,\n now=monotonic, getfields=itemgetter('status', 'task_id'),\n READY_STATES=states.READY_STATES,\n PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):\n with self.app.pool.acquire_channel(block=True) as (conn, channel):\n ids = set(task_ids)\n cached_ids = set()\n mark_cached = cached_ids.add\n for task_id in ids:\n try:\n cached = self._cache[task_id]\n except KeyError:\n pass\n else:\n if cached['status'] in READY_STATES:\n yield task_id, cached\n mark_cached(task_id)\n ids.difference_update(cached_ids)\n results = deque()\n push_result = results.append\n push_cache = self._cache.__setitem__\n decode_result = self.meta_from_decoded\n\n def _on_message(message):\n body = decode_result(message.decode())\n if on_message is not None:\n on_message(body)\n state, uid = getfields(body)\n if state in READY_STATES:\n push_result(body) \\\n if uid in task_ids else push_cache(uid, body)\n\n bindings = self._many_bindings(task_ids)\n with self.Consumer(channel, bindings, on_message=_on_message,\n accept=self.accept, no_ack=no_ack):\n wait = conn.drain_events\n popleft = results.popleft\n while ids:\n wait(timeout=timeout)\n while results:\n state = popleft()\n task_id = state['task_id']\n ids.discard(task_id)\n push_cache(task_id, state)\n yield task_id, state\n\n def reload_task_result(self, task_id):\n raise NotImplementedError(\n 'reload_task_result is not supported by this backend.')\n\n def reload_group_result(self, task_id):\n \"\"\"Reload group result, even if it has been previously fetched.\"\"\"\n raise NotImplementedError(\n 'reload_group_result is not supported by this backend.')\n\n def save_group(self, group_id, result):\n raise NotImplementedError(\n 'save_group is not supported by this backend.')\n\n def restore_group(self, group_id, cache=True):\n raise NotImplementedError(\n 'restore_group is not supported by this backend.')\n\n def delete_group(self, group_id):\n raise NotImplementedError(\n 'delete_group is not supported by this backend.')\n\n def __reduce__(self, args=(), kwargs={}):\n kwargs.update(\n connection=self._connection,\n exchange=self.exchange.name,\n exchange_type=self.exchange.type,\n persistent=self.persistent,\n serializer=self.serializer,\n auto_delete=self.auto_delete,\n expires=self.expires,\n )\n return super(AMQPBackend, self).__reduce__(args, kwargs)\n",
"path": "celery/backends/amqp.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n celery.backends.amqp\n ~~~~~~~~~~~~~~~~~~~~\n\n The AMQP result backend.\n\n This backend publishes results as messages.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport socket\n\nfrom collections import deque\nfrom operator import itemgetter\n\nfrom kombu import Exchange, Queue, Producer, Consumer\n\nfrom celery import states\nfrom celery.exceptions import TimeoutError\nfrom celery.five import range, monotonic\nfrom celery.utils.functional import dictfilter\nfrom celery.utils.log import get_logger\nfrom celery.utils.timeutils import maybe_s_to_ms\n\nfrom .base import BaseBackend\n\n__all__ = ['BacklogLimitExceeded', 'AMQPBackend']\n\nlogger = get_logger(__name__)\n\n\nclass BacklogLimitExceeded(Exception):\n \"\"\"Too much state history to fast-forward.\"\"\"\n\n\ndef repair_uuid(s):\n # Historically the dashes in UUIDS are removed from AMQ entity names,\n # but there is no known reason to. Hopefully we'll be able to fix\n # this in v4.0.\n return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])\n\n\nclass NoCacheQueue(Queue):\n can_cache_declaration = False\n\n\nclass AMQPBackend(BaseBackend):\n \"\"\"Publishes results by sending messages.\"\"\"\n Exchange = Exchange\n Queue = NoCacheQueue\n Consumer = Consumer\n Producer = Producer\n\n BacklogLimitExceeded = BacklogLimitExceeded\n\n persistent = True\n supports_autoexpire = True\n supports_native_join = True\n\n retry_policy = {\n 'max_retries': 20,\n 'interval_start': 0,\n 'interval_step': 1,\n 'interval_max': 1,\n }\n\n def __init__(self, app, connection=None, exchange=None, exchange_type=None,\n persistent=None, serializer=None, auto_delete=True, **kwargs):\n super(AMQPBackend, self).__init__(app, **kwargs)\n conf = self.app.conf\n self._connection = connection\n self.persistent = self.prepare_persistent(persistent)\n self.delivery_mode = 2 if self.persistent else 1\n exchange = exchange or conf.CELERY_RESULT_EXCHANGE\n exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE\n self.exchange = self._create_exchange(\n exchange, exchange_type, self.delivery_mode,\n )\n self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER\n self.auto_delete = auto_delete\n self.queue_arguments = dictfilter({\n 'x-expires': maybe_s_to_ms(self.expires),\n })\n\n def _create_exchange(self, name, type='direct', delivery_mode=2):\n return self.Exchange(name=name,\n type=type,\n delivery_mode=delivery_mode,\n durable=self.persistent,\n auto_delete=False)\n\n def _create_binding(self, task_id):\n name = self.rkey(task_id)\n return self.Queue(name=name,\n exchange=self.exchange,\n routing_key=name,\n durable=self.persistent,\n auto_delete=self.auto_delete,\n queue_arguments=self.queue_arguments)\n\n def revive(self, channel):\n pass\n\n def rkey(self, task_id):\n return task_id.replace('-', '')\n\n def destination_for(self, task_id, request):\n if request:\n return self.rkey(task_id), request.correlation_id or task_id\n return self.rkey(task_id), task_id\n\n def store_result(self, task_id, result, status,\n traceback=None, request=None, **kwargs):\n \"\"\"Send task return value and status.\"\"\"\n routing_key, correlation_id = self.destination_for(task_id, request)\n if not routing_key:\n return\n with self.app.amqp.producer_pool.acquire(block=True) as producer:\n producer.publish(\n {'task_id': task_id, 'status': status,\n 'result': self.encode_result(result, status),\n 'traceback': traceback,\n 'children': self.current_task_children(request)},\n exchange=self.exchange,\n routing_key=routing_key,\n correlation_id=correlation_id,\n serializer=self.serializer,\n retry=True, retry_policy=self.retry_policy,\n declare=self.on_reply_declare(task_id),\n delivery_mode=self.delivery_mode,\n )\n return result\n\n def on_reply_declare(self, task_id):\n return [self._create_binding(task_id)]\n\n def wait_for(self, task_id, timeout=None, cache=True,\n no_ack=True, on_interval=None,\n READY_STATES=states.READY_STATES,\n PROPAGATE_STATES=states.PROPAGATE_STATES,\n **kwargs):\n cached_meta = self._cache.get(task_id)\n if cache and cached_meta and \\\n cached_meta['status'] in READY_STATES:\n return cached_meta\n else:\n try:\n return self.consume(task_id, timeout=timeout, no_ack=no_ack,\n on_interval=on_interval)\n except socket.timeout:\n raise TimeoutError('The operation timed out.')\n\n def get_task_meta(self, task_id, backlog_limit=1000):\n # Polling and using basic_get\n with self.app.pool.acquire_channel(block=True) as (_, channel):\n binding = self._create_binding(task_id)(channel)\n binding.declare()\n\n prev = latest = acc = None\n for i in range(backlog_limit): # spool ffwd\n acc = binding.get(\n accept=self.accept, no_ack=False,\n )\n if not acc: # no more messages\n break\n if acc.payload['task_id'] == task_id:\n prev, latest = latest, acc\n if prev:\n # backends are not expected to keep history,\n # so we delete everything except the most recent state.\n prev.ack()\n prev = None\n else:\n raise self.BacklogLimitExceeded(task_id)\n\n if latest:\n payload = self._cache[task_id] = latest.payload\n latest.requeue()\n return payload\n else:\n # no new state, use previous\n try:\n return self._cache[task_id]\n except KeyError:\n # result probably pending.\n return {'status': states.PENDING, 'result': None}\n poll = get_task_meta # XXX compat\n\n def drain_events(self, connection, consumer,\n timeout=None, on_interval=None, now=monotonic, wait=None):\n wait = wait or connection.drain_events\n results = {}\n\n def callback(meta, message):\n if meta['status'] in states.READY_STATES:\n results[meta['task_id']] = self.meta_from_decoded(meta)\n\n consumer.callbacks[:] = [callback]\n time_start = now()\n\n while 1:\n # Total time spent may exceed a single call to wait()\n if timeout and now() - time_start >= timeout:\n raise socket.timeout()\n try:\n wait(timeout=1)\n except socket.timeout:\n pass\n if on_interval:\n on_interval()\n if results: # got event on the wanted channel.\n break\n self._cache.update(results)\n return results\n\n def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):\n wait = self.drain_events\n with self.app.pool.acquire_channel(block=True) as (conn, channel):\n binding = self._create_binding(task_id)\n with self.Consumer(channel, binding,\n no_ack=no_ack, accept=self.accept) as consumer:\n while 1:\n try:\n return wait(\n conn, consumer, timeout, on_interval)[task_id]\n except KeyError:\n continue\n\n def _many_bindings(self, ids):\n return [self._create_binding(task_id) for task_id in ids]\n\n def get_many(self, task_ids, timeout=None, no_ack=True, on_message=None,\n now=monotonic, getfields=itemgetter('status', 'task_id'),\n READY_STATES=states.READY_STATES,\n PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):\n with self.app.pool.acquire_channel(block=True) as (conn, channel):\n ids = set(task_ids)\n cached_ids = set()\n mark_cached = cached_ids.add\n for task_id in ids:\n try:\n cached = self._cache[task_id]\n except KeyError:\n pass\n else:\n if cached['status'] in READY_STATES:\n yield task_id, cached\n mark_cached(task_id)\n ids.difference_update(cached_ids)\n results = deque()\n push_result = results.append\n push_cache = self._cache.__setitem__\n decode_result = self.meta_from_decoded\n\n def _on_message(message):\n body = decode_result(message.decode())\n if on_message is not None:\n on_message(body)\n state, uid = getfields(body)\n if state in READY_STATES:\n push_result(body) \\\n if uid in task_ids else push_cache(uid, body)\n\n bindings = self._many_bindings(task_ids)\n with self.Consumer(channel, bindings, on_message=_on_message,\n accept=self.accept, no_ack=no_ack):\n wait = conn.drain_events\n popleft = results.popleft\n while ids:\n wait(timeout=timeout)\n while results:\n state = popleft()\n task_id = state['task_id']\n ids.discard(task_id)\n push_cache(task_id, state)\n yield task_id, state\n\n def reload_task_result(self, task_id):\n raise NotImplementedError(\n 'reload_task_result is not supported by this backend.')\n\n def reload_group_result(self, task_id):\n \"\"\"Reload group result, even if it has been previously fetched.\"\"\"\n raise NotImplementedError(\n 'reload_group_result is not supported by this backend.')\n\n def save_group(self, group_id, result):\n raise NotImplementedError(\n 'save_group is not supported by this backend.')\n\n def restore_group(self, group_id, cache=True):\n raise NotImplementedError(\n 'restore_group is not supported by this backend.')\n\n def delete_group(self, group_id):\n raise NotImplementedError(\n 'delete_group is not supported by this backend.')\n\n def __reduce__(self, args=(), kwargs={}):\n kwargs.update(\n connection=self._connection,\n exchange=self.exchange.name,\n exchange_type=self.exchange.type,\n persistent=self.persistent,\n serializer=self.serializer,\n auto_delete=self.auto_delete,\n expires=self.expires,\n )\n return super(AMQPBackend, self).__reduce__(args, kwargs)\n",
"path": "celery/backends/amqp.py"
}
] | diff --git a/celery/backends/amqp.py b/celery/backends/amqp.py
index 596a4c667c9..4871e06235a 100644
--- a/celery/backends/amqp.py
+++ b/celery/backends/amqp.py
@@ -195,7 +195,7 @@ def drain_events(self, connection, consumer,
def callback(meta, message):
if meta['status'] in states.READY_STATES:
- results[meta['task_id']] = meta
+ results[meta['task_id']] = self.meta_from_decoded(meta)
consumer.callbacks[:] = [callback]
time_start = now()
diff --git a/celery/tests/backends/test_amqp.py b/celery/tests/backends/test_amqp.py
index 031481c8d25..32bda1c9c89 100644
--- a/celery/tests/backends/test_amqp.py
+++ b/celery/tests/backends/test_amqp.py
@@ -13,6 +13,7 @@
from celery.backends.amqp import AMQPBackend
from celery.exceptions import TimeoutError
from celery.five import Empty, Queue, range
+from celery.result import AsyncResult
from celery.utils import uuid
from celery.tests.case import (
@@ -246,10 +247,20 @@ def test_wait_for(self):
with self.assertRaises(TimeoutError):
b.wait_for(tid, timeout=0.01, cache=False)
- def test_drain_events_remaining_timeouts(self):
+ def test_drain_events_decodes_exceptions_in_meta(self):
+ tid = uuid()
+ b = self.create_backend(serializer="json")
+ b.store_result(tid, RuntimeError("aap"), states.FAILURE)
+ result = AsyncResult(tid, backend=b)
- class Connection(object):
+ with self.assertRaises(Exception) as cm:
+ result.get()
+ self.assertEqual(cm.exception.__class__.__name__, "RuntimeError")
+ self.assertEqual(str(cm.exception), "aap")
+
+ def test_drain_events_remaining_timeouts(self):
+ class Connection(object):
def drain_events(self, timeout=None):
pass
diff --git a/tox.ini b/tox.ini
index 80cfd5c5544..4977e8e4b46 100644
--- a/tox.ini
+++ b/tox.ini
@@ -48,7 +48,7 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- pip install -U -r{toxinidir}/requirements/dev.txt
+ pip install -q -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:pypy3]
@@ -59,7 +59,7 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- pip install -U -r{toxinidir}/requirements/dev.txt
+ pip install -q -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:docs]
|
conda__conda-build-1593 | Python code compiled when it shouldn't be
Ran into an issue earlier, which was correctly identified thanks to @msarahan . It turns out `conda-build` was compiling Python code even though Python was not in `requirements/build`.
xref: https://github.com/conda-forge/git-feedstock/pull/20#issuecomment-267858227
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom glob import glob\nimport io\nimport locale\nimport mmap\nimport re\nimport os\nimport fnmatch\nfrom os.path import (basename, dirname, join, splitext, isdir, isfile, exists,\n islink, realpath, relpath, normpath)\nimport stat\nfrom subprocess import call, check_output\nimport sys\ntry:\n from os import readlink\nexcept ImportError:\n readlink = False\n\nfrom conda_build.os_utils import external\nfrom .conda_interface import lchmod\nfrom .conda_interface import walk_prefix\nfrom .conda_interface import md5_file\nfrom .conda_interface import PY3\n\nfrom conda_build import utils\n\nif sys.platform.startswith('linux'):\n from conda_build.os_utils import elf\nelif sys.platform == 'darwin':\n from conda_build.os_utils import macho\n\nSHEBANG_PAT = re.compile(br'^#!.+$', re.M)\n\n\ndef is_obj(path):\n assert sys.platform != 'win32'\n return bool((sys.platform.startswith('linux') and elf.is_elf(path)) or\n (sys.platform == 'darwin' and macho.is_macho(path)))\n\n\ndef fix_shebang(f, prefix, build_python, osx_is_app=False):\n path = join(prefix, f)\n if is_obj(path):\n return\n elif os.path.islink(path):\n return\n elif not os.path.isfile(path):\n return\n\n if os.stat(path).st_size == 0:\n return\n\n with io.open(path, encoding=locale.getpreferredencoding(), mode='r+') as fi:\n try:\n data = fi.read(100)\n except UnicodeDecodeError: # file is binary\n return\n\n # regexp on the memory mapped file so we only read it into\n # memory if the regexp matches.\n mm = mmap.mmap(fi.fileno(), 0)\n m = SHEBANG_PAT.match(mm)\n\n if not (m and b'python' in m.group()):\n return\n\n data = mm[:]\n\n encoding = sys.stdout.encoding or 'utf8'\n\n py_exec = ('/bin/bash ' + prefix + '/bin/python.app'\n if sys.platform == 'darwin' and osx_is_app else\n prefix + '/bin/' + basename(build_python))\n new_data = SHEBANG_PAT.sub(b'#!' + py_exec.encode(encoding), data, count=1)\n if new_data == data:\n return\n print(\"updating shebang:\", f)\n with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:\n fo.write(new_data.decode(encoding))\n os.chmod(path, 0o775)\n\n\ndef write_pth(egg_path, config):\n fn = basename(egg_path)\n with open(join(utils.get_site_packages(config.build_prefix),\n '%s.pth' % (fn.split('-')[0])), 'w') as fo:\n fo.write('./%s\\n' % fn)\n\n\ndef remove_easy_install_pth(files, prefix, config, preserve_egg_dir=False):\n \"\"\"\n remove the need for easy-install.pth and finally remove easy-install.pth\n itself\n \"\"\"\n absfiles = [join(prefix, f) for f in files]\n sp_dir = utils.get_site_packages(prefix)\n for egg_path in glob(join(sp_dir, '*-py*.egg')):\n if isdir(egg_path):\n if preserve_egg_dir or not any(join(egg_path, i) in absfiles for i\n in walk_prefix(egg_path, False, windows_forward_slashes=False)):\n write_pth(egg_path, config=config)\n continue\n\n print('found egg dir:', egg_path)\n try:\n os.rename(join(egg_path, 'EGG-INFO'),\n egg_path + '-info')\n except OSError:\n pass\n utils.rm_rf(join(egg_path, 'EGG-INFO'))\n for fn in os.listdir(egg_path):\n if fn == '__pycache__':\n utils.rm_rf(join(egg_path, fn))\n else:\n # this might be a name-space package\n # so the package directory already exists\n # from another installed dependency\n if os.path.exists(join(sp_dir, fn)):\n try:\n utils.copy_into(join(egg_path, fn), join(sp_dir, fn), config.timeout)\n utils.rm_rf(join(egg_path, fn))\n except IOError as e:\n fn = os.path.basename(str(e).split()[-1])\n raise IOError(\"Tried to merge folder {egg_path} into {sp_dir}, but {fn}\"\n \" exists in both locations. Please either add \"\n \"build/preserve_egg_dir: True to meta.yaml, or manually \"\n \"remove the file during your install process to avoid \"\n \"this conflict.\"\n .format(egg_path=egg_path, sp_dir=sp_dir, fn=fn))\n else:\n os.rename(join(egg_path, fn), join(sp_dir, fn))\n\n elif isfile(egg_path):\n if egg_path not in absfiles:\n continue\n print('found egg:', egg_path)\n write_pth(egg_path, config=config)\n\n utils.rm_rf(join(sp_dir, 'easy-install.pth'))\n\n\ndef rm_py_along_so(prefix):\n \"\"\"remove .py (.pyc) files alongside .so or .pyd files\"\"\"\n for root, _, files in os.walk(prefix):\n for fn in files:\n if fn.endswith(('.so', '.pyd')):\n name, _ = splitext(fn)\n for ext in '.py', '.pyc', '.pyo':\n if name + ext in files:\n os.unlink(join(root, name + ext))\n\n\ndef rm_pyo(files, prefix):\n \"\"\"pyo considered harmful: https://www.python.org/dev/peps/pep-0488/\n\n The build may have proceeded with:\n [install]\n optimize = 1\n .. in setup.cfg in which case we can end up with some stdlib __pycache__\n files ending in .opt-N.pyc on Python 3, as well as .pyo files for the\n package's own python. \"\"\"\n re_pyo = re.compile(r'.*(?:\\.pyo$|\\.opt-[0-9]\\.pyc)')\n for fn in files:\n if re_pyo.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef rm_pyc(files, prefix):\n re_pyc = re.compile(r'.*(?:\\.pyc$)')\n for fn in files:\n if re_pyc.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef compile_missing_pyc(files, cwd, python_exe, skip_compile_pyc=()):\n compile_files = []\n skip_compile_pyc_n = [os.path.normpath(skip) for skip in skip_compile_pyc]\n skipped_files = set()\n for skip in skip_compile_pyc_n:\n skipped_files.update(set(fnmatch.filter(files, skip)))\n unskipped_files = set(files) - skipped_files\n for fn in unskipped_files:\n # omit files in Library/bin, Scripts, and the root prefix - they are not generally imported\n if sys.platform == 'win32':\n if any([fn.lower().startswith(start) for start in ['library/bin', 'library\\\\bin',\n 'scripts']]):\n continue\n else:\n if fn.startswith('bin'):\n continue\n cache_prefix = (\"__pycache__\" + os.sep) if PY3 else \"\"\n if (fn.endswith(\".py\") and\n os.path.dirname(fn) + cache_prefix + os.path.basename(fn) + 'c' not in files):\n compile_files.append(fn)\n\n if compile_files:\n if not os.path.isfile(python_exe):\n print('compiling .pyc files... failed as no python interpreter was found')\n else:\n print('compiling .pyc files...')\n for f in compile_files:\n call([python_exe, '-Wi', '-m', 'py_compile', f], cwd=cwd)\n\n\ndef post_process(files, prefix, config, preserve_egg_dir=False, noarch=False, skip_compile_pyc=()):\n rm_pyo(files, prefix)\n if noarch:\n rm_pyc(files, prefix)\n else:\n compile_missing_pyc(files, cwd=prefix, python_exe=config.build_python,\n skip_compile_pyc=skip_compile_pyc)\n remove_easy_install_pth(files, prefix, config, preserve_egg_dir=preserve_egg_dir)\n rm_py_along_so(prefix)\n\n\ndef find_lib(link, prefix, path=None):\n from conda_build.build import prefix_files\n files = prefix_files(prefix)\n if link.startswith(prefix):\n link = normpath(link[len(prefix) + 1:])\n if link not in files:\n sys.exit(\"Error: Could not find %s\" % link)\n return link\n if link.startswith('/'): # but doesn't start with the build prefix\n return\n if link.startswith('@rpath/'):\n # Assume the rpath already points to lib, so there is no need to\n # change it.\n return\n if '/' not in link or link.startswith('@executable_path/'):\n link = basename(link)\n file_names = defaultdict(list)\n for f in files:\n file_names[basename(f)].append(f)\n if link not in file_names:\n sys.exit(\"Error: Could not find %s\" % link)\n if len(file_names[link]) > 1:\n if path and basename(path) == link:\n # The link is for the file itself, just use it\n return path\n # Allow for the possibility of the same library appearing in\n # multiple places.\n md5s = set()\n for f in file_names[link]:\n md5s.add(md5_file(join(prefix, f)))\n if len(md5s) > 1:\n sys.exit(\"Error: Found multiple instances of %s: %s\" % (link, file_names[link]))\n else:\n file_names[link].sort()\n print(\"Found multiple instances of %s (%s). \"\n \"Choosing the first one.\" % (link, file_names[link]))\n return file_names[link][0]\n print(\"Don't know how to find %s, skipping\" % link)\n\n\ndef osx_ch_link(path, link_dict, prefix):\n link = link_dict['name']\n print(\"Fixing linking of %s in %s\" % (link, path))\n link_loc = find_lib(link, prefix, path)\n if not link_loc:\n return\n\n lib_to_link = relpath(dirname(link_loc), 'lib')\n # path_to_lib = utils.relative(path[len(prefix) + 1:])\n\n # e.g., if\n # path = '/build_prefix/lib/some/stuff/libstuff.dylib'\n # link_loc = 'lib/things/libthings.dylib'\n\n # then\n\n # lib_to_link = 'things'\n # path_to_lib = '../..'\n\n # @rpath always means 'lib', link will be at\n # @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.\n\n # For when we can't use @rpath, @loader_path means the path to the library\n # ('path'), so from path to link is\n # @loader_path/path_to_lib/lib_to_link/basename(link), like\n # @loader_path/../../things/libthings.dylib.\n\n ret = '@rpath/%s/%s' % (lib_to_link, basename(link))\n\n # XXX: IF the above fails for whatever reason, the below can be used\n # TODO: This might contain redundant ..'s if link and path are both in\n # some subdirectory of lib.\n # ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))\n\n ret = ret.replace('/./', '/')\n\n return ret\n\n\ndef mk_relative_osx(path, prefix, build_prefix=None):\n '''\n if build_prefix is None, the_n this is a standard conda build. The path\n and all dependencies are in the build_prefix.\n\n if package is built in develop mode, build_prefix is specified. Object\n specified by 'path' needs to relink runtime dependences to libs found in\n build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'\n '''\n if build_prefix is None:\n assert path.startswith(prefix + '/')\n else:\n prefix = build_prefix\n\n assert sys.platform == 'darwin' and is_obj(path)\n s = macho.install_name_change(path, partial(osx_ch_link, prefix=prefix))\n\n names = macho.otool(path)\n if names:\n # Add an rpath to every executable to increase the chances of it\n # being found.\n rpath = join('@loader_path',\n relpath(join(prefix, 'lib'),\n dirname(path)), '').replace('/./', '/')\n macho.add_rpath(path, rpath, verbose=True)\n\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # .. and remove config.build_prefix/lib which was added in-place of\n # DYLD_FALLBACK_LIBRARY_PATH since El Capitan's SIP.\n # macho.delete_rpath(path, config.build_prefix + '/lib', verbose = True)\n\n if s:\n # Skip for stub files, which have to use binary_has_prefix_files to be\n # made relocatable.\n assert_relative_osx(path, prefix)\n\n\ndef mk_relative_linux(f, prefix, rpaths=('lib',)):\n 'Respects the original values and converts abs to $ORIGIN-relative'\n\n elf = join(prefix, f)\n origin = dirname(elf)\n\n patchelf = external.find_executable('patchelf', prefix)\n try:\n existing = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]\n except:\n print('patchelf: --print-rpath failed for %s\\n' % (elf))\n return\n existing = existing.split(os.pathsep)\n new = []\n for old in existing:\n if old.startswith('$ORIGIN/'):\n new.append(old)\n elif old.startswith('/'):\n # Test if this absolute path is outside of prefix. That is fatal.\n relpath = os.path.relpath(old, prefix)\n if relpath.startswith('..' + os.sep):\n print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))\n else:\n relpath = '$ORIGIN/' + os.path.relpath(old, origin)\n if relpath not in new:\n new.append(relpath)\n # Ensure that the asked-for paths are also in new.\n for rpath in rpaths:\n if not rpath.startswith('/'):\n # IMHO utils.relative shouldn't exist, but I am too paranoid to remove\n # it, so instead, make sure that what I think it should be replaced by\n # gives the same result and assert if not. Yeah, I am a chicken.\n rel_ours = os.path.normpath(utils.relative(f, rpath))\n rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))\n assert rel_ours == rel_stdlib, \\\n 'utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(\n rel_ours, rel_stdlib, f, rpath)\n rpath = '$ORIGIN/' + rel_stdlib\n if rpath not in new:\n new.append(rpath)\n rpath = ':'.join(new)\n print('patchelf: file: %s\\n setting rpath to: %s' % (elf, rpath))\n call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])\n\n\ndef assert_relative_osx(path, prefix):\n for name in macho.get_dylibs(path):\n assert not name.startswith(prefix), path\n\n\ndef mk_relative(m, f, prefix):\n assert sys.platform != 'win32'\n path = join(prefix, f)\n if not is_obj(path):\n return\n\n if sys.platform.startswith('linux'):\n mk_relative_linux(f, prefix=prefix, rpaths=m.get_value('build/rpaths', ['lib']))\n elif sys.platform == 'darwin':\n mk_relative_osx(path, prefix=prefix)\n\n\ndef fix_permissions(files, prefix):\n print(\"Fixing permissions\")\n for root, dirs, _ in os.walk(prefix):\n for dn in dirs:\n lchmod(join(root, dn), 0o775)\n\n for f in files:\n path = join(prefix, f)\n st = os.lstat(path)\n old_mode = stat.S_IMODE(st.st_mode)\n new_mode = old_mode\n # broadcast execute\n if old_mode & stat.S_IXUSR:\n new_mode = new_mode | stat.S_IXGRP | stat.S_IXOTH\n # ensure user and group can write and all can read\n new_mode = new_mode | stat.S_IWUSR | stat.S_IWGRP | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH # noqa\n if old_mode != new_mode:\n lchmod(path, new_mode)\n\n\ndef post_build(m, files, prefix, build_python, croot):\n print('number of files:', len(files))\n fix_permissions(files, prefix)\n\n for f in files:\n make_hardlink_copy(f, prefix)\n\n if sys.platform == 'win32':\n return\n\n binary_relocation = m.binary_relocation()\n if not binary_relocation:\n print(\"Skipping binary relocation logic\")\n osx_is_app = bool(m.get_value('build/osx_is_app', False))\n\n check_symlinks(files, prefix, croot)\n\n for f in files:\n if f.startswith('bin/'):\n fix_shebang(f, prefix=prefix, build_python=build_python, osx_is_app=osx_is_app)\n if binary_relocation is True or (isinstance(f, list) and f in binary_relocation):\n mk_relative(m, f, prefix)\n\n\ndef check_symlinks(files, prefix, croot):\n if readlink is False:\n return # Not on Unix system\n msgs = []\n real_build_prefix = realpath(prefix)\n for f in files:\n path = join(real_build_prefix, f)\n if islink(path):\n link_path = readlink(path)\n real_link_path = realpath(path)\n # symlinks to binaries outside of the same dir don't work. RPATH stuff gets confused\n # because ld.so follows symlinks in RPATHS\n # If condition exists, then copy the file rather than symlink it.\n if (not os.path.dirname(link_path) == os.path.dirname(real_link_path) and\n is_obj(f)):\n os.remove(path)\n utils.copy_into(real_link_path, path)\n elif real_link_path.startswith(real_build_prefix):\n # If the path is in the build prefix, this is fine, but\n # the link needs to be relative\n if not link_path.startswith('.'):\n # Don't change the link structure if it is already a\n # relative link. It's possible that ..'s later in the path\n # can result in a broken link still, but we'll assume that\n # such crazy things don't happen.\n print(\"Making absolute symlink %s -> %s relative\" % (f, link_path))\n os.unlink(path)\n os.symlink(relpath(real_link_path, dirname(path)), path)\n else:\n # Symlinks to absolute paths on the system (like /usr) are fine.\n if real_link_path.startswith(croot):\n msgs.append(\"%s is a symlink to a path that may not \"\n \"exist after the build is completed (%s)\" % (f, link_path))\n\n if msgs:\n for msg in msgs:\n print(\"Error: %s\" % msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef make_hardlink_copy(path, prefix):\n \"\"\"Hardlinks create invalid packages. Copy files to break the link.\n Symlinks are OK, and unaffected here.\"\"\"\n if not os.path.isabs(path) and not os.path.exists(path):\n path = os.path.normpath(os.path.join(prefix, path))\n nlinks = os.lstat(path).st_nlink\n dest = 'tmpfile'\n if os.path.isabs(path):\n dest = os.path.join(os.getcwd(), dest)\n if nlinks > 1:\n # copy file to new name\n utils.copy_into(path, dest)\n # remove old file\n utils.rm_rf(path)\n # rename copy to original filename\n utils.copy_into(dest, path)\n utils.rm_rf(dest)\n\n\ndef get_build_metadata(m, config):\n src_dir = config.work_dir\n\n if \"build\" not in m.meta:\n m.meta[\"build\"] = {}\n if exists(join(src_dir, '__conda_version__.txt')):\n print(\"Deprecation warning: support for __conda_version__ will be removed in Conda build 3.0.\" # noqa\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n with open(join(src_dir, '__conda_version__.txt')) as f:\n version = f.read().strip()\n print(\"Setting version from __conda_version__.txt: %s\" % version)\n m.meta['package']['version'] = version\n if exists(join(src_dir, '__conda_buildnum__.txt')):\n print(\"Deprecation warning: support for __conda_buildnum__ will be removed in Conda build 3.0.\" # noqa\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n with open(join(src_dir, '__conda_buildnum__.txt')) as f:\n build_number = f.read().strip()\n print(\"Setting build number from __conda_buildnum__.txt: %s\" %\n build_number)\n m.meta['build']['number'] = build_number\n if exists(join(src_dir, '__conda_buildstr__.txt')):\n print(\"Deprecation warning: support for __conda_buildstr__ will be removed in Conda build 3.0.\" # noqa\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n with open(join(src_dir, '__conda_buildstr__.txt')) as f:\n buildstr = f.read().strip()\n print(\"Setting version from __conda_buildstr__.txt: %s\" % buildstr)\n m.meta['build']['string'] = buildstr\n",
"path": "conda_build/post.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom glob import glob\nimport io\nimport locale\nimport mmap\nimport re\nimport os\nimport fnmatch\nfrom os.path import (basename, dirname, join, splitext, isdir, isfile, exists,\n islink, realpath, relpath, normpath)\nimport stat\nfrom subprocess import call, check_output\nimport sys\ntry:\n from os import readlink\nexcept ImportError:\n readlink = False\n\nfrom conda_build.os_utils import external\nfrom .conda_interface import lchmod\nfrom .conda_interface import walk_prefix\nfrom .conda_interface import md5_file\nfrom .conda_interface import PY3\n\nfrom conda_build import utils\n\nif sys.platform.startswith('linux'):\n from conda_build.os_utils import elf\nelif sys.platform == 'darwin':\n from conda_build.os_utils import macho\n\nSHEBANG_PAT = re.compile(br'^#!.+$', re.M)\n\n\ndef is_obj(path):\n assert sys.platform != 'win32'\n return bool((sys.platform.startswith('linux') and elf.is_elf(path)) or\n (sys.platform == 'darwin' and macho.is_macho(path)))\n\n\ndef fix_shebang(f, prefix, build_python, osx_is_app=False):\n path = join(prefix, f)\n if is_obj(path):\n return\n elif os.path.islink(path):\n return\n elif not os.path.isfile(path):\n return\n\n if os.stat(path).st_size == 0:\n return\n\n with io.open(path, encoding=locale.getpreferredencoding(), mode='r+') as fi:\n try:\n data = fi.read(100)\n except UnicodeDecodeError: # file is binary\n return\n\n # regexp on the memory mapped file so we only read it into\n # memory if the regexp matches.\n mm = mmap.mmap(fi.fileno(), 0)\n m = SHEBANG_PAT.match(mm)\n\n if not (m and b'python' in m.group()):\n return\n\n data = mm[:]\n\n encoding = sys.stdout.encoding or 'utf8'\n\n py_exec = ('/bin/bash ' + prefix + '/bin/python.app'\n if sys.platform == 'darwin' and osx_is_app else\n prefix + '/bin/' + basename(build_python))\n new_data = SHEBANG_PAT.sub(b'#!' + py_exec.encode(encoding), data, count=1)\n if new_data == data:\n return\n print(\"updating shebang:\", f)\n with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:\n fo.write(new_data.decode(encoding))\n os.chmod(path, 0o775)\n\n\ndef write_pth(egg_path, config):\n fn = basename(egg_path)\n with open(join(utils.get_site_packages(config.build_prefix),\n '%s.pth' % (fn.split('-')[0])), 'w') as fo:\n fo.write('./%s\\n' % fn)\n\n\ndef remove_easy_install_pth(files, prefix, config, preserve_egg_dir=False):\n \"\"\"\n remove the need for easy-install.pth and finally remove easy-install.pth\n itself\n \"\"\"\n absfiles = [join(prefix, f) for f in files]\n sp_dir = utils.get_site_packages(prefix)\n for egg_path in glob(join(sp_dir, '*-py*.egg')):\n if isdir(egg_path):\n if preserve_egg_dir or not any(join(egg_path, i) in absfiles for i\n in walk_prefix(egg_path, False, windows_forward_slashes=False)):\n write_pth(egg_path, config=config)\n continue\n\n print('found egg dir:', egg_path)\n try:\n os.rename(join(egg_path, 'EGG-INFO'),\n egg_path + '-info')\n except OSError:\n pass\n utils.rm_rf(join(egg_path, 'EGG-INFO'))\n for fn in os.listdir(egg_path):\n if fn == '__pycache__':\n utils.rm_rf(join(egg_path, fn))\n else:\n # this might be a name-space package\n # so the package directory already exists\n # from another installed dependency\n if os.path.exists(join(sp_dir, fn)):\n try:\n utils.copy_into(join(egg_path, fn), join(sp_dir, fn), config.timeout)\n utils.rm_rf(join(egg_path, fn))\n except IOError as e:\n fn = os.path.basename(str(e).split()[-1])\n raise IOError(\"Tried to merge folder {egg_path} into {sp_dir}, but {fn}\"\n \" exists in both locations. Please either add \"\n \"build/preserve_egg_dir: True to meta.yaml, or manually \"\n \"remove the file during your install process to avoid \"\n \"this conflict.\"\n .format(egg_path=egg_path, sp_dir=sp_dir, fn=fn))\n else:\n os.rename(join(egg_path, fn), join(sp_dir, fn))\n\n elif isfile(egg_path):\n if egg_path not in absfiles:\n continue\n print('found egg:', egg_path)\n write_pth(egg_path, config=config)\n\n utils.rm_rf(join(sp_dir, 'easy-install.pth'))\n\n\ndef rm_py_along_so(prefix):\n \"\"\"remove .py (.pyc) files alongside .so or .pyd files\"\"\"\n for root, _, files in os.walk(prefix):\n for fn in files:\n if fn.endswith(('.so', '.pyd')):\n name, _ = splitext(fn)\n for ext in '.py', '.pyc', '.pyo':\n if name + ext in files:\n os.unlink(join(root, name + ext))\n\n\ndef rm_pyo(files, prefix):\n \"\"\"pyo considered harmful: https://www.python.org/dev/peps/pep-0488/\n\n The build may have proceeded with:\n [install]\n optimize = 1\n .. in setup.cfg in which case we can end up with some stdlib __pycache__\n files ending in .opt-N.pyc on Python 3, as well as .pyo files for the\n package's own python. \"\"\"\n re_pyo = re.compile(r'.*(?:\\.pyo$|\\.opt-[0-9]\\.pyc)')\n for fn in files:\n if re_pyo.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef rm_pyc(files, prefix):\n re_pyc = re.compile(r'.*(?:\\.pyc$)')\n for fn in files:\n if re_pyc.match(fn):\n os.unlink(os.path.join(prefix, fn))\n\n\ndef compile_missing_pyc(files, cwd, python_exe, skip_compile_pyc=()):\n if not os.path.isfile(python_exe):\n return\n compile_files = []\n skip_compile_pyc_n = [os.path.normpath(skip) for skip in skip_compile_pyc]\n skipped_files = set()\n for skip in skip_compile_pyc_n:\n skipped_files.update(set(fnmatch.filter(files, skip)))\n unskipped_files = set(files) - skipped_files\n for fn in unskipped_files:\n # omit files in Library/bin, Scripts, and the root prefix - they are not generally imported\n if sys.platform == 'win32':\n if any([fn.lower().startswith(start) for start in ['library/bin', 'library\\\\bin',\n 'scripts']]):\n continue\n else:\n if fn.startswith('bin'):\n continue\n cache_prefix = (\"__pycache__\" + os.sep) if PY3 else \"\"\n if (fn.endswith(\".py\") and\n os.path.dirname(fn) + cache_prefix + os.path.basename(fn) + 'c' not in files):\n compile_files.append(fn)\n\n if compile_files:\n if not os.path.isfile(python_exe):\n print('compiling .pyc files... failed as no python interpreter was found')\n else:\n print('compiling .pyc files...')\n for f in compile_files:\n call([python_exe, '-Wi', '-m', 'py_compile', f], cwd=cwd)\n\n\ndef post_process(files, prefix, config, preserve_egg_dir=False, noarch=False, skip_compile_pyc=()):\n rm_pyo(files, prefix)\n if noarch:\n rm_pyc(files, prefix)\n else:\n compile_missing_pyc(files, cwd=prefix, python_exe=config.build_python,\n skip_compile_pyc=skip_compile_pyc)\n remove_easy_install_pth(files, prefix, config, preserve_egg_dir=preserve_egg_dir)\n rm_py_along_so(prefix)\n\n\ndef find_lib(link, prefix, path=None):\n from conda_build.build import prefix_files\n files = prefix_files(prefix)\n if link.startswith(prefix):\n link = normpath(link[len(prefix) + 1:])\n if link not in files:\n sys.exit(\"Error: Could not find %s\" % link)\n return link\n if link.startswith('/'): # but doesn't start with the build prefix\n return\n if link.startswith('@rpath/'):\n # Assume the rpath already points to lib, so there is no need to\n # change it.\n return\n if '/' not in link or link.startswith('@executable_path/'):\n link = basename(link)\n file_names = defaultdict(list)\n for f in files:\n file_names[basename(f)].append(f)\n if link not in file_names:\n sys.exit(\"Error: Could not find %s\" % link)\n if len(file_names[link]) > 1:\n if path and basename(path) == link:\n # The link is for the file itself, just use it\n return path\n # Allow for the possibility of the same library appearing in\n # multiple places.\n md5s = set()\n for f in file_names[link]:\n md5s.add(md5_file(join(prefix, f)))\n if len(md5s) > 1:\n sys.exit(\"Error: Found multiple instances of %s: %s\" % (link, file_names[link]))\n else:\n file_names[link].sort()\n print(\"Found multiple instances of %s (%s). \"\n \"Choosing the first one.\" % (link, file_names[link]))\n return file_names[link][0]\n print(\"Don't know how to find %s, skipping\" % link)\n\n\ndef osx_ch_link(path, link_dict, prefix):\n link = link_dict['name']\n print(\"Fixing linking of %s in %s\" % (link, path))\n link_loc = find_lib(link, prefix, path)\n if not link_loc:\n return\n\n lib_to_link = relpath(dirname(link_loc), 'lib')\n # path_to_lib = utils.relative(path[len(prefix) + 1:])\n\n # e.g., if\n # path = '/build_prefix/lib/some/stuff/libstuff.dylib'\n # link_loc = 'lib/things/libthings.dylib'\n\n # then\n\n # lib_to_link = 'things'\n # path_to_lib = '../..'\n\n # @rpath always means 'lib', link will be at\n # @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.\n\n # For when we can't use @rpath, @loader_path means the path to the library\n # ('path'), so from path to link is\n # @loader_path/path_to_lib/lib_to_link/basename(link), like\n # @loader_path/../../things/libthings.dylib.\n\n ret = '@rpath/%s/%s' % (lib_to_link, basename(link))\n\n # XXX: IF the above fails for whatever reason, the below can be used\n # TODO: This might contain redundant ..'s if link and path are both in\n # some subdirectory of lib.\n # ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))\n\n ret = ret.replace('/./', '/')\n\n return ret\n\n\ndef mk_relative_osx(path, prefix, build_prefix=None):\n '''\n if build_prefix is None, the_n this is a standard conda build. The path\n and all dependencies are in the build_prefix.\n\n if package is built in develop mode, build_prefix is specified. Object\n specified by 'path' needs to relink runtime dependences to libs found in\n build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'\n '''\n if build_prefix is None:\n assert path.startswith(prefix + '/')\n else:\n prefix = build_prefix\n\n assert sys.platform == 'darwin' and is_obj(path)\n s = macho.install_name_change(path, partial(osx_ch_link, prefix=prefix))\n\n names = macho.otool(path)\n if names:\n # Add an rpath to every executable to increase the chances of it\n # being found.\n rpath = join('@loader_path',\n relpath(join(prefix, 'lib'),\n dirname(path)), '').replace('/./', '/')\n macho.add_rpath(path, rpath, verbose=True)\n\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # .. and remove config.build_prefix/lib which was added in-place of\n # DYLD_FALLBACK_LIBRARY_PATH since El Capitan's SIP.\n # macho.delete_rpath(path, config.build_prefix + '/lib', verbose = True)\n\n if s:\n # Skip for stub files, which have to use binary_has_prefix_files to be\n # made relocatable.\n assert_relative_osx(path, prefix)\n\n\ndef mk_relative_linux(f, prefix, rpaths=('lib',)):\n 'Respects the original values and converts abs to $ORIGIN-relative'\n\n elf = join(prefix, f)\n origin = dirname(elf)\n\n patchelf = external.find_executable('patchelf', prefix)\n try:\n existing = check_output([patchelf, '--print-rpath', elf]).decode('utf-8').splitlines()[0]\n except:\n print('patchelf: --print-rpath failed for %s\\n' % (elf))\n return\n existing = existing.split(os.pathsep)\n new = []\n for old in existing:\n if old.startswith('$ORIGIN/'):\n new.append(old)\n elif old.startswith('/'):\n # Test if this absolute path is outside of prefix. That is fatal.\n relpath = os.path.relpath(old, prefix)\n if relpath.startswith('..' + os.sep):\n print('Warning: rpath {0} is outside prefix {1} (removing it)'.format(old, prefix))\n else:\n relpath = '$ORIGIN/' + os.path.relpath(old, origin)\n if relpath not in new:\n new.append(relpath)\n # Ensure that the asked-for paths are also in new.\n for rpath in rpaths:\n if not rpath.startswith('/'):\n # IMHO utils.relative shouldn't exist, but I am too paranoid to remove\n # it, so instead, make sure that what I think it should be replaced by\n # gives the same result and assert if not. Yeah, I am a chicken.\n rel_ours = os.path.normpath(utils.relative(f, rpath))\n rel_stdlib = os.path.normpath(os.path.relpath(rpath, os.path.dirname(f)))\n assert rel_ours == rel_stdlib, \\\n 'utils.relative {0} and relpath {1} disagree for {2}, {3}'.format(\n rel_ours, rel_stdlib, f, rpath)\n rpath = '$ORIGIN/' + rel_stdlib\n if rpath not in new:\n new.append(rpath)\n rpath = ':'.join(new)\n print('patchelf: file: %s\\n setting rpath to: %s' % (elf, rpath))\n call([patchelf, '--force-rpath', '--set-rpath', rpath, elf])\n\n\ndef assert_relative_osx(path, prefix):\n for name in macho.get_dylibs(path):\n assert not name.startswith(prefix), path\n\n\ndef mk_relative(m, f, prefix):\n assert sys.platform != 'win32'\n path = join(prefix, f)\n if not is_obj(path):\n return\n\n if sys.platform.startswith('linux'):\n mk_relative_linux(f, prefix=prefix, rpaths=m.get_value('build/rpaths', ['lib']))\n elif sys.platform == 'darwin':\n mk_relative_osx(path, prefix=prefix)\n\n\ndef fix_permissions(files, prefix):\n print(\"Fixing permissions\")\n for root, dirs, _ in os.walk(prefix):\n for dn in dirs:\n lchmod(join(root, dn), 0o775)\n\n for f in files:\n path = join(prefix, f)\n st = os.lstat(path)\n old_mode = stat.S_IMODE(st.st_mode)\n new_mode = old_mode\n # broadcast execute\n if old_mode & stat.S_IXUSR:\n new_mode = new_mode | stat.S_IXGRP | stat.S_IXOTH\n # ensure user and group can write and all can read\n new_mode = new_mode | stat.S_IWUSR | stat.S_IWGRP | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH # noqa\n if old_mode != new_mode:\n lchmod(path, new_mode)\n\n\ndef post_build(m, files, prefix, build_python, croot):\n print('number of files:', len(files))\n fix_permissions(files, prefix)\n\n for f in files:\n make_hardlink_copy(f, prefix)\n\n if sys.platform == 'win32':\n return\n\n binary_relocation = m.binary_relocation()\n if not binary_relocation:\n print(\"Skipping binary relocation logic\")\n osx_is_app = bool(m.get_value('build/osx_is_app', False))\n\n check_symlinks(files, prefix, croot)\n\n for f in files:\n if f.startswith('bin/'):\n fix_shebang(f, prefix=prefix, build_python=build_python, osx_is_app=osx_is_app)\n if binary_relocation is True or (isinstance(f, list) and f in binary_relocation):\n mk_relative(m, f, prefix)\n\n\ndef check_symlinks(files, prefix, croot):\n if readlink is False:\n return # Not on Unix system\n msgs = []\n real_build_prefix = realpath(prefix)\n for f in files:\n path = join(real_build_prefix, f)\n if islink(path):\n link_path = readlink(path)\n real_link_path = realpath(path)\n # symlinks to binaries outside of the same dir don't work. RPATH stuff gets confused\n # because ld.so follows symlinks in RPATHS\n # If condition exists, then copy the file rather than symlink it.\n if (not os.path.dirname(link_path) == os.path.dirname(real_link_path) and\n is_obj(f)):\n os.remove(path)\n utils.copy_into(real_link_path, path)\n elif real_link_path.startswith(real_build_prefix):\n # If the path is in the build prefix, this is fine, but\n # the link needs to be relative\n if not link_path.startswith('.'):\n # Don't change the link structure if it is already a\n # relative link. It's possible that ..'s later in the path\n # can result in a broken link still, but we'll assume that\n # such crazy things don't happen.\n print(\"Making absolute symlink %s -> %s relative\" % (f, link_path))\n os.unlink(path)\n os.symlink(relpath(real_link_path, dirname(path)), path)\n else:\n # Symlinks to absolute paths on the system (like /usr) are fine.\n if real_link_path.startswith(croot):\n msgs.append(\"%s is a symlink to a path that may not \"\n \"exist after the build is completed (%s)\" % (f, link_path))\n\n if msgs:\n for msg in msgs:\n print(\"Error: %s\" % msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef make_hardlink_copy(path, prefix):\n \"\"\"Hardlinks create invalid packages. Copy files to break the link.\n Symlinks are OK, and unaffected here.\"\"\"\n if not os.path.isabs(path) and not os.path.exists(path):\n path = os.path.normpath(os.path.join(prefix, path))\n nlinks = os.lstat(path).st_nlink\n dest = 'tmpfile'\n if os.path.isabs(path):\n dest = os.path.join(os.getcwd(), dest)\n if nlinks > 1:\n # copy file to new name\n utils.copy_into(path, dest)\n # remove old file\n utils.rm_rf(path)\n # rename copy to original filename\n utils.copy_into(dest, path)\n utils.rm_rf(dest)\n\n\ndef get_build_metadata(m, config):\n src_dir = config.work_dir\n\n if \"build\" not in m.meta:\n m.meta[\"build\"] = {}\n if exists(join(src_dir, '__conda_version__.txt')):\n print(\"Deprecation warning: support for __conda_version__ will be removed in Conda build 3.0.\" # noqa\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n with open(join(src_dir, '__conda_version__.txt')) as f:\n version = f.read().strip()\n print(\"Setting version from __conda_version__.txt: %s\" % version)\n m.meta['package']['version'] = version\n if exists(join(src_dir, '__conda_buildnum__.txt')):\n print(\"Deprecation warning: support for __conda_buildnum__ will be removed in Conda build 3.0.\" # noqa\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n with open(join(src_dir, '__conda_buildnum__.txt')) as f:\n build_number = f.read().strip()\n print(\"Setting build number from __conda_buildnum__.txt: %s\" %\n build_number)\n m.meta['build']['number'] = build_number\n if exists(join(src_dir, '__conda_buildstr__.txt')):\n print(\"Deprecation warning: support for __conda_buildstr__ will be removed in Conda build 3.0.\" # noqa\n \"Try Jinja templates instead: \"\n \"http://conda.pydata.org/docs/building/meta-yaml.html#templating-with-jinja\")\n with open(join(src_dir, '__conda_buildstr__.txt')) as f:\n buildstr = f.read().strip()\n print(\"Setting version from __conda_buildstr__.txt: %s\" % buildstr)\n m.meta['build']['string'] = buildstr\n",
"path": "conda_build/post.py"
}
] | diff --git a/conda_build/post.py b/conda_build/post.py
index 1c9c67d36b..ef42d9cf1c 100644
--- a/conda_build/post.py
+++ b/conda_build/post.py
@@ -176,6 +176,8 @@ def rm_pyc(files, prefix):
def compile_missing_pyc(files, cwd, python_exe, skip_compile_pyc=()):
+ if not os.path.isfile(python_exe):
+ return
compile_files = []
skip_compile_pyc_n = [os.path.normpath(skip) for skip in skip_compile_pyc]
skipped_files = set()
|
nautobot__nautobot-2179 | [1.4] Jobs Result Filter - Error when Search field "Status" is empty
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.9
* Nautobot version: e02bb08e6c2a (v1.4.0rc1)
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Open Job Results Page
2. Filter a "Job"
3. Klick "Apply"
<!-- What did you expect to happen? -->
### Expected Behavior
The Job Results table should be populated with selected entries
<!-- What happened instead? -->
### Observed Behavior
Error message:
> Invalid filters were specified:
> * status
> * Select a valid choice. is not one of the available choices.
When selecting for e.g. "Completed" for the status, it's working fine
| [
{
"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db.models.fields import TextField\nfrom django.forms import ModelMultipleChoiceField, inlineformset_factory\nfrom django.urls.base import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.dcim.models import DeviceRole, DeviceType, Location, Platform, Region, Site\nfrom nautobot.tenancy.models import Tenant, TenantGroup\nfrom nautobot.utilities.deprecation import class_deprecated_in_favor_of\nfrom nautobot.utilities.forms import (\n add_blank_choice,\n APISelect,\n APISelectMultiple,\n BootstrapMixin,\n BulkEditForm,\n BulkEditNullBooleanSelect,\n ColorSelect,\n CommentField,\n CSVContentTypeField,\n CSVModelChoiceField,\n CSVModelForm,\n CSVMultipleChoiceField,\n CSVMultipleContentTypeField,\n DateTimePicker,\n DynamicModelChoiceField,\n DynamicModelMultipleChoiceField,\n JSONField,\n MultipleContentTypeField,\n SlugField,\n StaticSelect2,\n StaticSelect2Multiple,\n TagFilterField,\n)\nfrom nautobot.utilities.forms.constants import BOOLEAN_WITH_BLANK_CHOICES\nfrom nautobot.virtualization.models import Cluster, ClusterGroup\nfrom nautobot.extras.choices import (\n JobExecutionType,\n JobResultStatusChoices,\n ObjectChangeActionChoices,\n RelationshipTypeChoices,\n)\nfrom nautobot.extras.constants import JOB_OVERRIDABLE_FIELDS\nfrom nautobot.extras.datasources import get_datasource_content_choices\nfrom nautobot.extras.models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomFieldChoice,\n CustomLink,\n DynamicGroup,\n DynamicGroupMembership,\n ExportTemplate,\n GitRepository,\n GraphQLQuery,\n ImageAttachment,\n Job,\n JobHook,\n JobResult,\n Note,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n ScheduledJob,\n Secret,\n SecretsGroup,\n SecretsGroupAssociation,\n Status,\n Tag,\n Webhook,\n)\nfrom nautobot.extras.registry import registry\nfrom nautobot.extras.utils import ChangeLoggedModelsQuery, FeatureQuery, TaggableClassesQuery\nfrom .base import (\n NautobotBulkEditForm,\n NautobotFilterForm,\n NautobotModelForm,\n)\nfrom .mixins import (\n CustomFieldModelBulkEditFormMixin,\n CustomFieldModelFormMixin,\n RelationshipModelFormMixin,\n)\n\n\n__all__ = (\n \"BaseDynamicGroupMembershipFormSet\",\n \"ComputedFieldForm\",\n \"ComputedFieldFilterForm\",\n \"ConfigContextForm\",\n \"ConfigContextBulkEditForm\",\n \"ConfigContextFilterForm\",\n \"ConfigContextSchemaForm\",\n \"ConfigContextSchemaBulkEditForm\",\n \"ConfigContextSchemaFilterForm\",\n \"CustomFieldForm\",\n \"CustomFieldModelCSVForm\",\n \"CustomFieldBulkCreateForm\", # 2.0 TODO remove this deprecated class\n \"CustomFieldChoiceFormSet\",\n \"CustomLinkForm\",\n \"CustomLinkFilterForm\",\n \"DynamicGroupForm\",\n \"DynamicGroupFilterForm\",\n \"DynamicGroupMembershipFormSet\",\n \"ExportTemplateForm\",\n \"ExportTemplateFilterForm\",\n \"GitRepositoryForm\",\n \"GitRepositoryCSVForm\",\n \"GitRepositoryBulkEditForm\",\n \"GitRepositoryFilterForm\",\n \"GraphQLQueryForm\",\n \"GraphQLQueryFilterForm\",\n \"ImageAttachmentForm\",\n \"JobForm\",\n \"JobEditForm\",\n \"JobFilterForm\",\n \"JobHookForm\",\n \"JobHookFilterForm\",\n \"JobScheduleForm\",\n \"JobResultFilterForm\",\n \"LocalContextFilterForm\",\n \"LocalContextModelForm\",\n \"LocalContextModelBulkEditForm\",\n \"NoteForm\",\n \"ObjectChangeFilterForm\",\n \"PasswordInputWithPlaceholder\",\n \"RelationshipForm\",\n \"RelationshipFilterForm\",\n \"RelationshipAssociationFilterForm\",\n \"ScheduledJobFilterForm\",\n \"SecretForm\",\n \"SecretCSVForm\",\n \"SecretFilterForm\",\n \"SecretsGroupForm\",\n \"SecretsGroupFilterForm\",\n \"SecretsGroupAssociationFormSet\",\n \"StatusForm\",\n \"StatusCSVForm\",\n \"StatusFilterForm\",\n \"StatusBulkEditForm\",\n \"TagForm\",\n \"TagCSVForm\",\n \"TagFilterForm\",\n \"TagBulkEditForm\",\n \"WebhookForm\",\n \"WebhookFilterForm\",\n)\n\n\n#\n# Computed Fields\n#\n\n\nclass ComputedFieldForm(BootstrapMixin, forms.ModelForm):\n\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()).order_by(\"app_label\", \"model\"),\n required=True,\n label=\"Content Type\",\n )\n slug = SlugField(\n slug_source=\"label\",\n help_text=\"Internal name of this field. Please use underscores rather than dashes.\",\n )\n template = forms.CharField(\n widget=forms.Textarea,\n help_text=(\n \"Jinja2 template code for field value.<br>\"\n \"Use <code>obj</code> to refer to the object to which this computed field is attached.\"\n ),\n )\n\n class Meta:\n model = ComputedField\n fields = (\n \"content_type\",\n \"label\",\n \"slug\",\n \"description\",\n \"template\",\n \"fallback_value\",\n \"weight\",\n \"advanced_ui\",\n )\n\n\nclass ComputedFieldFilterForm(BootstrapMixin, forms.Form):\n model = ComputedField\n q = forms.CharField(required=False, label=\"Search\")\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()).order_by(\"app_label\", \"model\"),\n required=False,\n label=\"Content Type\",\n )\n\n\n#\n# Config contexts\n#\n\n\nclass ConfigContextForm(BootstrapMixin, forms.ModelForm):\n regions = DynamicModelMultipleChoiceField(queryset=Region.objects.all(), required=False)\n sites = DynamicModelMultipleChoiceField(queryset=Site.objects.all(), required=False)\n locations = DynamicModelMultipleChoiceField(queryset=Location.objects.all(), required=False)\n roles = DynamicModelMultipleChoiceField(queryset=DeviceRole.objects.all(), required=False)\n device_types = DynamicModelMultipleChoiceField(queryset=DeviceType.objects.all(), required=False)\n platforms = DynamicModelMultipleChoiceField(queryset=Platform.objects.all(), required=False)\n cluster_groups = DynamicModelMultipleChoiceField(queryset=ClusterGroup.objects.all(), required=False)\n clusters = DynamicModelMultipleChoiceField(queryset=Cluster.objects.all(), required=False)\n tenant_groups = DynamicModelMultipleChoiceField(queryset=TenantGroup.objects.all(), required=False)\n tenants = DynamicModelMultipleChoiceField(queryset=Tenant.objects.all(), required=False)\n\n data = JSONField(label=\"\")\n\n class Meta:\n model = ConfigContext\n fields = (\n \"name\",\n \"weight\",\n \"description\",\n \"schema\",\n \"is_active\",\n \"regions\",\n \"sites\",\n \"locations\",\n \"roles\",\n \"device_types\",\n \"platforms\",\n \"cluster_groups\",\n \"clusters\",\n \"tenant_groups\",\n \"tenants\",\n \"tags\",\n \"data\",\n )\n\n\nclass ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=ConfigContext.objects.all(), widget=forms.MultipleHiddenInput)\n schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), required=False)\n weight = forms.IntegerField(required=False, min_value=0)\n is_active = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect())\n description = forms.CharField(required=False, max_length=100)\n\n class Meta:\n nullable_fields = [\n \"description\",\n \"schema\",\n ]\n\n\nclass ConfigContextFilterForm(BootstrapMixin, forms.Form):\n q = forms.CharField(required=False, label=\"Search\")\n # FIXME(glenn) filtering by owner_content_type\n schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), to_field_name=\"slug\", required=False)\n region = DynamicModelMultipleChoiceField(queryset=Region.objects.all(), to_field_name=\"slug\", required=False)\n site = DynamicModelMultipleChoiceField(queryset=Site.objects.all(), to_field_name=\"slug\", required=False)\n location = DynamicModelMultipleChoiceField(queryset=Location.objects.all(), to_field_name=\"slug\", required=False)\n role = DynamicModelMultipleChoiceField(queryset=DeviceRole.objects.all(), to_field_name=\"slug\", required=False)\n type = DynamicModelMultipleChoiceField(queryset=DeviceType.objects.all(), to_field_name=\"slug\", required=False)\n platform = DynamicModelMultipleChoiceField(queryset=Platform.objects.all(), to_field_name=\"slug\", required=False)\n cluster_group = DynamicModelMultipleChoiceField(\n queryset=ClusterGroup.objects.all(), to_field_name=\"slug\", required=False\n )\n cluster_id = DynamicModelMultipleChoiceField(queryset=Cluster.objects.all(), required=False, label=\"Cluster\")\n tenant_group = DynamicModelMultipleChoiceField(\n queryset=TenantGroup.objects.all(), to_field_name=\"slug\", required=False\n )\n tenant = DynamicModelMultipleChoiceField(queryset=Tenant.objects.all(), to_field_name=\"slug\", required=False)\n tag = DynamicModelMultipleChoiceField(queryset=Tag.objects.all(), to_field_name=\"slug\", required=False)\n\n\n#\n# Config context schemas\n#\n\n\nclass ConfigContextSchemaForm(NautobotModelForm):\n data_schema = JSONField(label=\"\")\n slug = SlugField()\n\n class Meta:\n model = ConfigContextSchema\n fields = (\n \"name\",\n \"slug\",\n \"description\",\n \"data_schema\",\n )\n\n\nclass ConfigContextSchemaBulkEditForm(NautobotBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=ConfigContextSchema.objects.all(), widget=forms.MultipleHiddenInput)\n description = forms.CharField(required=False, max_length=100)\n\n class Meta:\n nullable_fields = [\n \"description\",\n ]\n\n\nclass ConfigContextSchemaFilterForm(BootstrapMixin, forms.Form):\n q = forms.CharField(required=False, label=\"Search\")\n # FIXME(glenn) filtering by owner_content_type\n\n\n#\n# Custom fields\n#\n\n\n# CustomFieldChoice inline formset for use with providing dynamic rows when creating/editing choices\n# for `CustomField` objects in UI views. Fields/exclude must be set but since we're using all the\n# fields we're just setting `exclude=()` here.\nCustomFieldChoiceFormSet = inlineformset_factory(\n parent_model=CustomField,\n model=CustomFieldChoice,\n exclude=(),\n extra=5,\n widgets={\n \"value\": forms.TextInput(attrs={\"class\": \"form-control\"}),\n \"weight\": forms.NumberInput(attrs={\"class\": \"form-control\"}),\n },\n)\n\n\nclass CustomFieldForm(BootstrapMixin, forms.ModelForm):\n label = forms.CharField(required=True, max_length=50, help_text=\"Name of the field as displayed to users.\")\n slug = SlugField(\n max_length=50,\n slug_source=\"label\",\n help_text=\"Internal name of this field. Please use underscores rather than dashes.\",\n )\n description = forms.CharField(\n required=False,\n help_text=\"Also used as the help text when editing models using this custom field.<br>\"\n '<a href=\"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" target=\"_blank\">'\n \"Markdown</a> syntax is supported.\",\n )\n content_types = MultipleContentTypeField(\n feature=\"custom_fields\", help_text=\"The object(s) to which this field applies.\"\n )\n\n class Meta:\n model = CustomField\n fields = (\n \"label\",\n \"slug\",\n \"type\",\n \"weight\",\n \"description\",\n \"required\",\n \"default\",\n \"filter_logic\",\n \"advanced_ui\",\n \"content_types\",\n \"validation_minimum\",\n \"validation_maximum\",\n \"validation_regex\",\n )\n\n\nclass CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelFormMixin):\n \"\"\"Base class for CSV export of models that support custom fields.\"\"\"\n\n def _append_customfield_fields(self):\n\n # Append form fields\n for cf in CustomField.objects.filter(content_types=self.obj_type):\n field_name = \"cf_{}\".format(cf.slug)\n self.fields[field_name] = cf.to_form_field(for_csv_import=True)\n\n # Annotate the field in the list of CustomField form fields\n self.custom_fields.append(field_name)\n\n\n# 2.0 TODO: remove this class\n@class_deprecated_in_favor_of(CustomFieldModelBulkEditFormMixin)\nclass CustomFieldBulkCreateForm(CustomFieldModelBulkEditFormMixin):\n \"\"\"No longer needed as a separate class - use CustomFieldModelBulkEditFormMixin instead.\"\"\"\n\n\n#\n# Custom Links\n#\n\n\nclass CustomLinkForm(BootstrapMixin, forms.ModelForm):\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_links\").get_query()).order_by(\"app_label\", \"model\"),\n label=\"Content Type\",\n )\n\n class Meta:\n model = CustomLink\n fields = (\n \"content_type\",\n \"name\",\n \"text\",\n \"target_url\",\n \"weight\",\n \"group_name\",\n \"button_class\",\n \"new_window\",\n )\n\n\nclass CustomLinkFilterForm(BootstrapMixin, forms.Form):\n model = CustomLink\n q = forms.CharField(required=False, label=\"Search\")\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_links\").get_query()).order_by(\"app_label\", \"model\"),\n required=False,\n label=\"Content Type\",\n )\n\n\n#\n# Dynamic Groups\n#\n\n\nclass DynamicGroupForm(NautobotModelForm):\n \"\"\"DynamicGroup model form.\"\"\"\n\n slug = SlugField()\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"dynamic_groups\").get_query()).order_by(\"app_label\", \"model\"),\n label=\"Content Type\",\n )\n\n class Meta:\n model = DynamicGroup\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n \"content_type\",\n ]\n\n\nclass DynamicGroupMembershipFormSetForm(forms.ModelForm):\n \"\"\"DynamicGroupMembership model form for use inline on DynamicGroupFormSet.\"\"\"\n\n group = DynamicModelChoiceField(\n queryset=DynamicGroup.objects.all(),\n query_params={\"content_type\": \"$content_type\"},\n )\n\n class Meta:\n model = DynamicGroupMembership\n fields = (\"operator\", \"group\", \"weight\")\n\n\n# Inline formset for use with providing dynamic rows when creating/editing memberships of child\n# DynamicGroups to a parent DynamicGroup.\nBaseDynamicGroupMembershipFormSet = inlineformset_factory(\n parent_model=DynamicGroup,\n model=DynamicGroupMembership,\n form=DynamicGroupMembershipFormSetForm,\n extra=4,\n fk_name=\"parent_group\",\n widgets={\n \"operator\": StaticSelect2,\n \"weight\": forms.HiddenInput(),\n },\n)\n\n\nclass DynamicGroupMembershipFormSet(BaseDynamicGroupMembershipFormSet):\n \"\"\"\n Inline formset for use with providing dynamic rows when creating/editing memberships of child\n groups to a parent DynamicGroup.\n \"\"\"\n\n\nclass DynamicGroupFilterForm(BootstrapMixin, forms.Form):\n \"\"\"DynamicGroup filter form.\"\"\"\n\n model = DynamicGroup\n q = forms.CharField(required=False, label=\"Search\")\n content_type = MultipleContentTypeField(feature=\"dynamic_groups\", choices_as_strings=True, label=\"Content Type\")\n\n\n#\n# Export Templates\n#\n\n\nclass ExportTemplateForm(BootstrapMixin, forms.ModelForm):\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_templates\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n label=\"Content Type\",\n )\n\n class Meta:\n model = ExportTemplate\n fields = (\n \"content_type\",\n \"name\",\n \"description\",\n \"template_code\",\n \"mime_type\",\n \"file_extension\",\n )\n\n\nclass ExportTemplateFilterForm(BootstrapMixin, forms.Form):\n model = ExportTemplate\n q = forms.CharField(required=False, label=\"Search\")\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_templates\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n required=False,\n label=\"Content Type\",\n )\n\n\n#\n# Git repositories and other data sources\n#\n\n\ndef get_git_datasource_content_choices():\n return get_datasource_content_choices(\"extras.gitrepository\")\n\n\nclass PasswordInputWithPlaceholder(forms.PasswordInput):\n \"\"\"PasswordInput that is populated with a placeholder value if any existing value is present.\"\"\"\n\n def __init__(self, attrs=None, placeholder=\"\", render_value=False):\n if placeholder:\n render_value = True\n self._placeholder = placeholder\n super().__init__(attrs=attrs, render_value=render_value)\n\n def get_context(self, name, value, attrs):\n if value:\n value = self._placeholder\n return super().get_context(name, value, attrs)\n\n\nclass GitRepositoryForm(BootstrapMixin, RelationshipModelFormMixin):\n\n slug = SlugField(help_text=\"Filesystem-friendly unique shorthand\")\n\n remote_url = forms.URLField(\n required=True,\n label=\"Remote URL\",\n help_text=\"Only http:// and https:// URLs are presently supported\",\n )\n\n _token = forms.CharField(\n required=False,\n label=\"Token\",\n widget=PasswordInputWithPlaceholder(placeholder=GitRepository.TOKEN_PLACEHOLDER),\n help_text=\"<em>Deprecated</em> - use a secrets group instead.\",\n )\n\n username = forms.CharField(\n required=False,\n label=\"Username\",\n help_text=\"Username for token authentication.<br><em>Deprecated</em> - use a secrets group instead\",\n )\n\n secrets_group = DynamicModelChoiceField(required=False, queryset=SecretsGroup.objects.all())\n\n provided_contents = forms.MultipleChoiceField(\n required=False,\n label=\"Provides\",\n choices=get_git_datasource_content_choices,\n )\n\n class Meta:\n model = GitRepository\n fields = [\n \"name\",\n \"slug\",\n \"remote_url\",\n \"branch\",\n \"username\",\n \"_token\",\n \"secrets_group\",\n \"provided_contents\",\n \"tags\",\n ]\n\n def clean(self):\n super().clean()\n\n # set dryrun after a successful clean\n if \"_dryrun_create\" in self.data or \"_dryrun_update\" in self.data:\n self.instance.set_dryrun()\n\n\nclass GitRepositoryCSVForm(CSVModelForm):\n secrets_group = CSVModelChoiceField(\n queryset=SecretsGroup.objects.all(),\n to_field_name=\"name\",\n required=False,\n help_text=\"Secrets group for repository access (if any)\",\n )\n\n class Meta:\n model = GitRepository\n fields = GitRepository.csv_headers\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"provided_contents\"] = CSVMultipleChoiceField(\n choices=get_git_datasource_content_choices(),\n required=False,\n help_text=mark_safe(\n \"The data types this repository provides. Multiple values must be comma-separated and wrapped in \"\n 'double quotes (e.g. <code>\"extras.job,extras.configcontext\"</code>).'\n ),\n )\n\n\nclass GitRepositoryBulkEditForm(NautobotBulkEditForm):\n pk = forms.ModelMultipleChoiceField(\n queryset=GitRepository.objects.all(),\n widget=forms.MultipleHiddenInput(),\n )\n remote_url = forms.CharField(\n label=\"Remote URL\",\n required=False,\n )\n branch = forms.CharField(\n required=False,\n )\n _token = forms.CharField(\n required=False,\n label=\"Token\",\n widget=PasswordInputWithPlaceholder(placeholder=GitRepository.TOKEN_PLACEHOLDER),\n help_text=\"<em>Deprecated</em> - use a secrets group instead.\",\n )\n username = forms.CharField(\n required=False,\n label=\"Username\",\n help_text=\"<em>Deprecated</em> - use a secrets group instead.\",\n )\n\n secrets_group = DynamicModelChoiceField(required=False, queryset=SecretsGroup.objects.all())\n\n class Meta:\n model = GitRepository\n nullable_fields = [\"secrets_group\"]\n\n\nclass GitRepositoryFilterForm(BootstrapMixin, forms.Form):\n model = GitRepository\n q = forms.CharField(required=False, label=\"Search\")\n name = forms.CharField(required=False)\n branch = forms.CharField(required=False)\n provided_contents = forms.ChoiceField(\n required=False,\n label=\"Provides\",\n choices=add_blank_choice(get_git_datasource_content_choices()),\n )\n\n\n#\n# GraphQL saved queries\n#\n\n\nclass GraphQLQueryForm(BootstrapMixin, forms.ModelForm):\n slug = SlugField()\n query = TextField()\n\n class Meta:\n model = GraphQLQuery\n fields = (\n \"name\",\n \"slug\",\n \"query\",\n )\n\n def get_action_url(self):\n return reverse(\"extras:graphqlquery_add\")\n\n\nclass GraphQLQueryFilterForm(BootstrapMixin, forms.Form):\n model = GraphQLQuery\n q = forms.CharField(required=False, label=\"Search\")\n\n\n#\n# Image attachments\n#\n\n\nclass ImageAttachmentForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ImageAttachment\n fields = [\n \"name\",\n \"image\",\n ]\n\n\n#\n# Jobs\n#\n\n\nclass JobForm(BootstrapMixin, forms.Form):\n \"\"\"\n This form is used to render the user input fields for a Job class. Its fields are dynamically\n controlled by the job definition. See `nautobot.extras.jobs.BaseJob.as_form`\n \"\"\"\n\n _commit = forms.BooleanField(\n required=False,\n initial=True,\n label=\"Commit changes\",\n help_text=\"Commit changes to the database (uncheck for a dry-run)\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Move _commit to the end of the form\n commit = self.fields.pop(\"_commit\")\n self.fields[\"_commit\"] = commit\n\n @property\n def requires_input(self):\n \"\"\"\n A boolean indicating whether the form requires user input (ignore the _commit field).\n \"\"\"\n return bool(len(self.fields) > 1)\n\n\nclass JobEditForm(NautobotModelForm):\n slug = SlugField()\n\n class Meta:\n model = Job\n fields = [\n \"slug\",\n \"enabled\",\n \"name_override\",\n \"name\",\n \"grouping_override\",\n \"grouping\",\n \"description_override\",\n \"description\",\n \"commit_default_override\",\n \"commit_default\",\n \"hidden_override\",\n \"hidden\",\n \"read_only_override\",\n \"read_only\",\n \"approval_required_override\",\n \"approval_required\",\n \"soft_time_limit_override\",\n \"soft_time_limit\",\n \"time_limit_override\",\n \"time_limit\",\n \"has_sensitive_variables\",\n \"has_sensitive_variables_override\",\n \"tags\",\n ]\n\n def clean(self):\n \"\"\"\n For all overridable fields, if they aren't marked as overridden, revert them to the underlying value if known.\n \"\"\"\n cleaned_data = super().clean() or self.cleaned_data\n job_class = self.instance.job_class\n if job_class is not None:\n for field_name in JOB_OVERRIDABLE_FIELDS:\n if not cleaned_data.get(f\"{field_name}_override\", False):\n cleaned_data[field_name] = getattr(job_class, field_name)\n return cleaned_data\n\n\nclass JobFilterForm(BootstrapMixin, forms.Form):\n model = Job\n q = forms.CharField(required=False, label=\"Search\")\n installed = forms.NullBooleanField(\n initial=True,\n required=False,\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n enabled = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n has_sensitive_variables = forms.NullBooleanField(\n required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES)\n )\n commit_default = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n hidden = forms.NullBooleanField(\n initial=False,\n required=False,\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n read_only = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n approval_required = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n is_job_hook_receiver = forms.NullBooleanField(\n initial=False,\n required=False,\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n tag = TagFilterField(model)\n\n\nclass JobHookForm(BootstrapMixin, forms.ModelForm):\n content_types = MultipleContentTypeField(\n queryset=ChangeLoggedModelsQuery().as_queryset(), required=True, label=\"Content Type(s)\"\n )\n\n class Meta:\n model = JobHook\n fields = (\n \"name\",\n \"content_types\",\n \"job\",\n \"enabled\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n )\n\n def clean(self):\n data = super().clean()\n\n conflicts = JobHook.check_for_conflicts(\n instance=self.instance,\n content_types=self.cleaned_data.get(\"content_types\"),\n job=self.cleaned_data.get(\"job\"),\n type_create=self.cleaned_data.get(\"type_create\"),\n type_update=self.cleaned_data.get(\"type_update\"),\n type_delete=self.cleaned_data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise ValidationError(conflicts)\n\n return data\n\n\nclass JobHookFilterForm(BootstrapMixin, forms.Form):\n model = JobHook\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n queryset=ChangeLoggedModelsQuery().as_queryset(),\n choices_as_strings=True,\n required=False,\n label=\"Content Type(s)\",\n )\n enabled = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n job = DynamicModelMultipleChoiceField(\n label=\"Job\",\n queryset=Job.objects.all(),\n required=False,\n to_field_name=\"slug\",\n widget=APISelectMultiple(api_url=\"/api/extras/jobs/\", api_version=\"1.3\"),\n )\n type_create = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_update = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_delete = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n\n\nclass JobScheduleForm(BootstrapMixin, forms.Form):\n \"\"\"\n This form is rendered alongside the JobForm but deals specifically with the fields needed to either\n execute the job immediately, or schedule it for later. Each field name is prefixed with an underscore\n because in the POST body, they share a namespace with the JobForm which includes fields defined by the\n job author, so the underscore prefix helps to avoid name collisions.\n \"\"\"\n\n _schedule_type = forms.ChoiceField(\n choices=JobExecutionType,\n help_text=\"The job can either run immediately, once in the future, or on a recurring schedule.\",\n label=\"Type\",\n )\n _schedule_name = forms.CharField(\n required=False,\n label=\"Schedule name\",\n help_text=\"Name for the job schedule.\",\n )\n _schedule_start_time = forms.DateTimeField(\n required=False,\n label=\"Starting date and time\",\n widget=DateTimePicker(),\n )\n _recurrence_custom_time = forms.CharField(\n required=False,\n label=\"Crontab\",\n help_text=\"Custom crontab syntax (* * * * *)\",\n )\n\n def clean(self):\n \"\"\"\n Validate all required information is present if the job needs to be scheduled\n \"\"\"\n cleaned_data = super().clean()\n\n if \"_schedule_type\" in cleaned_data and cleaned_data.get(\"_schedule_type\") != JobExecutionType.TYPE_IMMEDIATELY:\n if not cleaned_data.get(\"_schedule_name\"):\n raise ValidationError({\"_schedule_name\": \"Please provide a name for the job schedule.\"})\n\n if (\n not cleaned_data.get(\"_schedule_start_time\")\n and cleaned_data.get(\"_schedule_type\") != JobExecutionType.TYPE_CUSTOM\n ) or (\n cleaned_data.get(\"_schedule_start_time\")\n and cleaned_data.get(\"_schedule_start_time\") < ScheduledJob.earliest_possible_time()\n ):\n raise ValidationError(\n {\n \"_schedule_start_time\": \"Please enter a valid date and time greater than or equal to the current date and time.\"\n }\n )\n\n if cleaned_data.get(\"_schedule_type\") == JobExecutionType.TYPE_CUSTOM:\n try:\n ScheduledJob.get_crontab(cleaned_data.get(\"_recurrence_custom_time\"))\n except Exception as e:\n raise ValidationError({\"_recurrence_custom_time\": e})\n\n\nclass JobResultFilterForm(BootstrapMixin, forms.Form):\n model = JobResult\n q = forms.CharField(required=False, label=\"Search\")\n job_model = DynamicModelMultipleChoiceField(\n label=\"Job\",\n queryset=Job.objects.all(),\n required=False,\n to_field_name=\"slug\",\n widget=APISelectMultiple(api_url=\"/api/extras/jobs/\", api_version=\"1.3\"),\n )\n # FIXME(glenn) Filtering by obj_type?\n name = forms.CharField(required=False)\n user = DynamicModelMultipleChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n label=\"User\",\n widget=APISelectMultiple(\n api_url=\"/api/users/users/\",\n ),\n )\n status = forms.ChoiceField(\n choices=add_blank_choice(JobResultStatusChoices),\n required=False,\n widget=StaticSelect2(),\n )\n\n\nclass ScheduledJobFilterForm(BootstrapMixin, forms.Form):\n model = ScheduledJob\n q = forms.CharField(required=False, label=\"Search\")\n name = forms.CharField(required=False)\n job_model = DynamicModelMultipleChoiceField(\n label=\"Job\",\n queryset=Job.objects.all(),\n required=False,\n to_field_name=\"slug\",\n widget=APISelectMultiple(api_url=\"/api/extras/job-models/\"),\n )\n total_run_count = forms.IntegerField(required=False)\n\n\n#\n# Notes\n#\n\n\nclass NoteForm(BootstrapMixin, forms.ModelForm):\n note = CommentField\n\n class Meta:\n model = Note\n fields = [\"assigned_object_type\", \"assigned_object_id\", \"note\"]\n widgets = {\n \"assigned_object_type\": forms.HiddenInput,\n \"assigned_object_id\": forms.HiddenInput,\n }\n\n\n#\n# Filter form for local config context data\n#\n\n\nclass LocalContextFilterForm(forms.Form):\n local_context_data = forms.NullBooleanField(\n required=False,\n label=\"Has local config context data\",\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n local_context_schema = DynamicModelMultipleChoiceField(\n queryset=ConfigContextSchema.objects.all(), to_field_name=\"slug\", required=False\n )\n\n\n#\n# Model form for local config context data\n#\n\n\nclass LocalContextModelForm(forms.ModelForm):\n local_context_schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), required=False)\n local_context_data = JSONField(required=False, label=\"\")\n\n\nclass LocalContextModelBulkEditForm(BulkEditForm):\n local_context_schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), required=False)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # append nullable fields\n self.nullable_fields.append(\"local_context_schema\")\n\n\n#\n# Change logging\n#\n\n\nclass ObjectChangeFilterForm(BootstrapMixin, forms.Form):\n model = ObjectChange\n q = forms.CharField(required=False, label=\"Search\")\n time__gte = forms.DateTimeField(label=\"After\", required=False, widget=DateTimePicker())\n time__lte = forms.DateTimeField(label=\"Before\", required=False, widget=DateTimePicker())\n action = forms.ChoiceField(\n choices=add_blank_choice(ObjectChangeActionChoices),\n required=False,\n widget=StaticSelect2(),\n )\n user_id = DynamicModelMultipleChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n label=\"User\",\n widget=APISelectMultiple(\n api_url=\"/api/users/users/\",\n ),\n )\n changed_object_type_id = DynamicModelMultipleChoiceField(\n queryset=ContentType.objects.all(),\n required=False,\n label=\"Object Type\",\n widget=APISelectMultiple(\n api_url=\"/api/extras/content-types/\",\n ),\n )\n\n\n#\n# Relationship\n#\n\n\nclass RelationshipForm(BootstrapMixin, forms.ModelForm):\n\n slug = SlugField(help_text=\"Internal name of this relationship. Please use underscores rather than dashes.\")\n source_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()).order_by(\"app_label\", \"model\"),\n help_text=\"The source object type to which this relationship applies.\",\n )\n source_filter = JSONField(\n required=False,\n help_text=\"Filterset filter matching the applicable source objects of the selected type.<br>\"\n 'Enter in <a href=\"https://json.org/\">JSON</a> format.',\n )\n destination_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()).order_by(\"app_label\", \"model\"),\n help_text=\"The destination object type to which this relationship applies.\",\n )\n destination_filter = JSONField(\n required=False,\n help_text=\"Filterset filter matching the applicable destination objects of the selected type.<br>\"\n 'Enter in <a href=\"https://json.org/\">JSON</a> format.',\n )\n\n class Meta:\n model = Relationship\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n \"type\",\n \"advanced_ui\",\n \"source_type\",\n \"source_label\",\n \"source_hidden\",\n \"source_filter\",\n \"destination_type\",\n \"destination_label\",\n \"destination_hidden\",\n \"destination_filter\",\n ]\n\n def save(self, commit=True):\n\n # TODO add support for owner when a CR is created in the UI\n obj = super().save(commit)\n\n return obj\n\n\nclass RelationshipFilterForm(BootstrapMixin, forms.Form):\n model = Relationship\n\n type = forms.MultipleChoiceField(choices=RelationshipTypeChoices, required=False, widget=StaticSelect2Multiple())\n\n source_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Source Type\"\n )\n\n destination_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Destination Type\"\n )\n\n\nclass RelationshipAssociationFilterForm(BootstrapMixin, forms.Form):\n model = RelationshipAssociation\n\n relationship = DynamicModelMultipleChoiceField(\n queryset=Relationship.objects.all(),\n to_field_name=\"slug\",\n required=False,\n )\n\n source_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Source Type\"\n )\n\n destination_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Destination Type\"\n )\n\n\n#\n# Secrets\n#\n\n\ndef provider_choices():\n return sorted([(slug, provider.name) for slug, provider in registry[\"secrets_providers\"].items()])\n\n\nclass SecretForm(NautobotModelForm):\n \"\"\"Create/update form for `Secret` objects.\"\"\"\n\n slug = SlugField()\n\n provider = forms.ChoiceField(choices=provider_choices, widget=StaticSelect2())\n\n parameters = JSONField(help_text='Enter parameters in <a href=\"https://json.org/\">JSON</a> format.')\n\n class Meta:\n model = Secret\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n \"provider\",\n \"parameters\",\n \"tags\",\n ]\n\n\nclass SecretCSVForm(CustomFieldModelCSVForm):\n class Meta:\n model = Secret\n fields = Secret.csv_headers\n\n\ndef provider_choices_with_blank():\n return add_blank_choice(sorted([(slug, provider.name) for slug, provider in registry[\"secrets_providers\"].items()]))\n\n\nclass SecretFilterForm(NautobotFilterForm):\n model = Secret\n q = forms.CharField(required=False, label=\"Search\")\n provider = forms.MultipleChoiceField(\n choices=provider_choices_with_blank, widget=StaticSelect2Multiple(), required=False\n )\n tag = TagFilterField(model)\n\n\n# Inline formset for use with providing dynamic rows when creating/editing assignments of Secrets to SecretsGroups.\nSecretsGroupAssociationFormSet = inlineformset_factory(\n parent_model=SecretsGroup,\n model=SecretsGroupAssociation,\n fields=(\"access_type\", \"secret_type\", \"secret\"),\n extra=5,\n widgets={\n \"access_type\": StaticSelect2,\n \"secret_type\": StaticSelect2,\n \"secret\": APISelect(api_url=\"/api/extras/secrets/\"),\n },\n)\n\n\nclass SecretsGroupForm(NautobotModelForm):\n \"\"\"Create/update form for `SecretsGroup` objects.\"\"\"\n\n slug = SlugField()\n\n class Meta:\n model = SecretsGroup\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n ]\n\n\nclass SecretsGroupFilterForm(NautobotFilterForm):\n model = SecretsGroup\n q = forms.CharField(required=False, label=\"Search\")\n\n\n#\n# Statuses\n#\n\n\nclass StatusForm(NautobotModelForm):\n \"\"\"Generic create/update form for `Status` objects.\"\"\"\n\n slug = SlugField()\n content_types = MultipleContentTypeField(feature=\"statuses\", label=\"Content Type(s)\")\n\n class Meta:\n model = Status\n widgets = {\"color\": ColorSelect()}\n fields = [\"name\", \"slug\", \"description\", \"content_types\", \"color\"]\n\n\nclass StatusCSVForm(CustomFieldModelCSVForm):\n \"\"\"Generic CSV bulk import form for `Status` objects.\"\"\"\n\n content_types = CSVMultipleContentTypeField(\n feature=\"statuses\",\n choices_as_strings=True,\n help_text=mark_safe(\n \"The object types to which this status applies. Multiple values \"\n \"must be comma-separated and wrapped in double quotes. (e.g. \"\n '<code>\"dcim.device,dcim.rack\"</code>)'\n ),\n label=\"Content type(s)\",\n )\n\n class Meta:\n model = Status\n fields = Status.csv_headers\n help_texts = {\n \"color\": mark_safe(\"RGB color in hexadecimal (e.g. <code>00ff00</code>)\"),\n }\n\n\nclass StatusFilterForm(NautobotFilterForm):\n \"\"\"Filtering/search form for `Status` objects.\"\"\"\n\n model = Status\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n feature=\"statuses\", choices_as_strings=True, required=False, label=\"Content Type(s)\"\n )\n color = forms.CharField(max_length=6, required=False, widget=ColorSelect())\n\n\nclass StatusBulkEditForm(NautobotBulkEditForm):\n \"\"\"Bulk edit/delete form for `Status` objects.\"\"\"\n\n pk = forms.ModelMultipleChoiceField(queryset=Status.objects.all(), widget=forms.MultipleHiddenInput)\n color = forms.CharField(max_length=6, required=False, widget=ColorSelect())\n content_types = MultipleContentTypeField(feature=\"statuses\", required=False, label=\"Content Type(s)\")\n\n class Meta:\n nullable_fields = []\n\n\n#\n# Tags\n#\n\n\nclass TagForm(NautobotModelForm):\n slug = SlugField()\n content_types = ModelMultipleChoiceField(\n label=\"Content Type(s)\",\n queryset=TaggableClassesQuery().as_queryset,\n )\n\n class Meta:\n model = Tag\n fields = [\"name\", \"slug\", \"color\", \"description\", \"content_types\"]\n\n def clean(self):\n data = super().clean()\n\n if self.instance.present_in_database:\n # check if tag is assigned to any of the removed content_types\n content_types_id = [content_type.id for content_type in self.cleaned_data[\"content_types\"]]\n errors = self.instance.validate_content_types_removal(content_types_id)\n\n if errors:\n raise ValidationError(errors)\n\n return data\n\n\nclass TagCSVForm(CustomFieldModelCSVForm):\n slug = SlugField()\n\n class Meta:\n model = Tag\n fields = Tag.csv_headers\n help_texts = {\n \"color\": mark_safe(\"RGB color in hexadecimal (e.g. <code>00ff00</code>)\"),\n }\n\n\nclass TagFilterForm(NautobotFilterForm):\n model = Tag\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n choices_as_strings=True,\n required=False,\n label=\"Content Type(s)\",\n queryset=TaggableClassesQuery().as_queryset,\n )\n\n\nclass TagBulkEditForm(NautobotBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), widget=forms.MultipleHiddenInput)\n color = forms.CharField(max_length=6, required=False, widget=ColorSelect())\n description = forms.CharField(max_length=200, required=False)\n\n class Meta:\n nullable_fields = [\"description\"]\n\n\n#\n# Webhooks\n#\n\n\nclass WebhookForm(BootstrapMixin, forms.ModelForm):\n content_types = MultipleContentTypeField(feature=\"webhooks\", required=False, label=\"Content Type(s)\")\n\n class Meta:\n model = Webhook\n fields = (\n \"name\",\n \"content_types\",\n \"enabled\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n \"payload_url\",\n \"http_method\",\n \"http_content_type\",\n \"additional_headers\",\n \"body_template\",\n \"secret\",\n \"ssl_verification\",\n \"ca_file_path\",\n )\n\n def clean(self):\n data = super().clean()\n\n conflicts = Webhook.check_for_conflicts(\n instance=self.instance,\n content_types=self.cleaned_data.get(\"content_types\"),\n payload_url=self.cleaned_data.get(\"payload_url\"),\n type_create=self.cleaned_data.get(\"type_create\"),\n type_update=self.cleaned_data.get(\"type_update\"),\n type_delete=self.cleaned_data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise ValidationError(conflicts)\n\n return data\n\n\nclass WebhookFilterForm(BootstrapMixin, forms.Form):\n model = Webhook\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n feature=\"webhooks\", choices_as_strings=True, required=False, label=\"Content Type(s)\"\n )\n type_create = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_update = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_delete = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n enabled = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n",
"path": "nautobot/extras/forms/forms.py"
}
] | [
{
"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db.models.fields import TextField\nfrom django.forms import ModelMultipleChoiceField, inlineformset_factory\nfrom django.urls.base import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.dcim.models import DeviceRole, DeviceType, Location, Platform, Region, Site\nfrom nautobot.tenancy.models import Tenant, TenantGroup\nfrom nautobot.utilities.deprecation import class_deprecated_in_favor_of\nfrom nautobot.utilities.forms import (\n add_blank_choice,\n APISelect,\n APISelectMultiple,\n BootstrapMixin,\n BulkEditForm,\n BulkEditNullBooleanSelect,\n ColorSelect,\n CommentField,\n CSVContentTypeField,\n CSVModelChoiceField,\n CSVModelForm,\n CSVMultipleChoiceField,\n CSVMultipleContentTypeField,\n DateTimePicker,\n DynamicModelChoiceField,\n DynamicModelMultipleChoiceField,\n JSONField,\n MultipleContentTypeField,\n SlugField,\n StaticSelect2,\n StaticSelect2Multiple,\n TagFilterField,\n)\nfrom nautobot.utilities.forms.constants import BOOLEAN_WITH_BLANK_CHOICES\nfrom nautobot.virtualization.models import Cluster, ClusterGroup\nfrom nautobot.extras.choices import (\n JobExecutionType,\n JobResultStatusChoices,\n ObjectChangeActionChoices,\n RelationshipTypeChoices,\n)\nfrom nautobot.extras.constants import JOB_OVERRIDABLE_FIELDS\nfrom nautobot.extras.datasources import get_datasource_content_choices\nfrom nautobot.extras.models import (\n ComputedField,\n ConfigContext,\n ConfigContextSchema,\n CustomField,\n CustomFieldChoice,\n CustomLink,\n DynamicGroup,\n DynamicGroupMembership,\n ExportTemplate,\n GitRepository,\n GraphQLQuery,\n ImageAttachment,\n Job,\n JobHook,\n JobResult,\n Note,\n ObjectChange,\n Relationship,\n RelationshipAssociation,\n ScheduledJob,\n Secret,\n SecretsGroup,\n SecretsGroupAssociation,\n Status,\n Tag,\n Webhook,\n)\nfrom nautobot.extras.registry import registry\nfrom nautobot.extras.utils import ChangeLoggedModelsQuery, FeatureQuery, TaggableClassesQuery\nfrom .base import (\n NautobotBulkEditForm,\n NautobotFilterForm,\n NautobotModelForm,\n)\nfrom .mixins import (\n CustomFieldModelBulkEditFormMixin,\n CustomFieldModelFormMixin,\n RelationshipModelFormMixin,\n)\n\n\n__all__ = (\n \"BaseDynamicGroupMembershipFormSet\",\n \"ComputedFieldForm\",\n \"ComputedFieldFilterForm\",\n \"ConfigContextForm\",\n \"ConfigContextBulkEditForm\",\n \"ConfigContextFilterForm\",\n \"ConfigContextSchemaForm\",\n \"ConfigContextSchemaBulkEditForm\",\n \"ConfigContextSchemaFilterForm\",\n \"CustomFieldForm\",\n \"CustomFieldModelCSVForm\",\n \"CustomFieldBulkCreateForm\", # 2.0 TODO remove this deprecated class\n \"CustomFieldChoiceFormSet\",\n \"CustomLinkForm\",\n \"CustomLinkFilterForm\",\n \"DynamicGroupForm\",\n \"DynamicGroupFilterForm\",\n \"DynamicGroupMembershipFormSet\",\n \"ExportTemplateForm\",\n \"ExportTemplateFilterForm\",\n \"GitRepositoryForm\",\n \"GitRepositoryCSVForm\",\n \"GitRepositoryBulkEditForm\",\n \"GitRepositoryFilterForm\",\n \"GraphQLQueryForm\",\n \"GraphQLQueryFilterForm\",\n \"ImageAttachmentForm\",\n \"JobForm\",\n \"JobEditForm\",\n \"JobFilterForm\",\n \"JobHookForm\",\n \"JobHookFilterForm\",\n \"JobScheduleForm\",\n \"JobResultFilterForm\",\n \"LocalContextFilterForm\",\n \"LocalContextModelForm\",\n \"LocalContextModelBulkEditForm\",\n \"NoteForm\",\n \"ObjectChangeFilterForm\",\n \"PasswordInputWithPlaceholder\",\n \"RelationshipForm\",\n \"RelationshipFilterForm\",\n \"RelationshipAssociationFilterForm\",\n \"ScheduledJobFilterForm\",\n \"SecretForm\",\n \"SecretCSVForm\",\n \"SecretFilterForm\",\n \"SecretsGroupForm\",\n \"SecretsGroupFilterForm\",\n \"SecretsGroupAssociationFormSet\",\n \"StatusForm\",\n \"StatusCSVForm\",\n \"StatusFilterForm\",\n \"StatusBulkEditForm\",\n \"TagForm\",\n \"TagCSVForm\",\n \"TagFilterForm\",\n \"TagBulkEditForm\",\n \"WebhookForm\",\n \"WebhookFilterForm\",\n)\n\n\n#\n# Computed Fields\n#\n\n\nclass ComputedFieldForm(BootstrapMixin, forms.ModelForm):\n\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()).order_by(\"app_label\", \"model\"),\n required=True,\n label=\"Content Type\",\n )\n slug = SlugField(\n slug_source=\"label\",\n help_text=\"Internal name of this field. Please use underscores rather than dashes.\",\n )\n template = forms.CharField(\n widget=forms.Textarea,\n help_text=(\n \"Jinja2 template code for field value.<br>\"\n \"Use <code>obj</code> to refer to the object to which this computed field is attached.\"\n ),\n )\n\n class Meta:\n model = ComputedField\n fields = (\n \"content_type\",\n \"label\",\n \"slug\",\n \"description\",\n \"template\",\n \"fallback_value\",\n \"weight\",\n \"advanced_ui\",\n )\n\n\nclass ComputedFieldFilterForm(BootstrapMixin, forms.Form):\n model = ComputedField\n q = forms.CharField(required=False, label=\"Search\")\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_fields\").get_query()).order_by(\"app_label\", \"model\"),\n required=False,\n label=\"Content Type\",\n )\n\n\n#\n# Config contexts\n#\n\n\nclass ConfigContextForm(BootstrapMixin, forms.ModelForm):\n regions = DynamicModelMultipleChoiceField(queryset=Region.objects.all(), required=False)\n sites = DynamicModelMultipleChoiceField(queryset=Site.objects.all(), required=False)\n locations = DynamicModelMultipleChoiceField(queryset=Location.objects.all(), required=False)\n roles = DynamicModelMultipleChoiceField(queryset=DeviceRole.objects.all(), required=False)\n device_types = DynamicModelMultipleChoiceField(queryset=DeviceType.objects.all(), required=False)\n platforms = DynamicModelMultipleChoiceField(queryset=Platform.objects.all(), required=False)\n cluster_groups = DynamicModelMultipleChoiceField(queryset=ClusterGroup.objects.all(), required=False)\n clusters = DynamicModelMultipleChoiceField(queryset=Cluster.objects.all(), required=False)\n tenant_groups = DynamicModelMultipleChoiceField(queryset=TenantGroup.objects.all(), required=False)\n tenants = DynamicModelMultipleChoiceField(queryset=Tenant.objects.all(), required=False)\n\n data = JSONField(label=\"\")\n\n class Meta:\n model = ConfigContext\n fields = (\n \"name\",\n \"weight\",\n \"description\",\n \"schema\",\n \"is_active\",\n \"regions\",\n \"sites\",\n \"locations\",\n \"roles\",\n \"device_types\",\n \"platforms\",\n \"cluster_groups\",\n \"clusters\",\n \"tenant_groups\",\n \"tenants\",\n \"tags\",\n \"data\",\n )\n\n\nclass ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=ConfigContext.objects.all(), widget=forms.MultipleHiddenInput)\n schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), required=False)\n weight = forms.IntegerField(required=False, min_value=0)\n is_active = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect())\n description = forms.CharField(required=False, max_length=100)\n\n class Meta:\n nullable_fields = [\n \"description\",\n \"schema\",\n ]\n\n\nclass ConfigContextFilterForm(BootstrapMixin, forms.Form):\n q = forms.CharField(required=False, label=\"Search\")\n # FIXME(glenn) filtering by owner_content_type\n schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), to_field_name=\"slug\", required=False)\n region = DynamicModelMultipleChoiceField(queryset=Region.objects.all(), to_field_name=\"slug\", required=False)\n site = DynamicModelMultipleChoiceField(queryset=Site.objects.all(), to_field_name=\"slug\", required=False)\n location = DynamicModelMultipleChoiceField(queryset=Location.objects.all(), to_field_name=\"slug\", required=False)\n role = DynamicModelMultipleChoiceField(queryset=DeviceRole.objects.all(), to_field_name=\"slug\", required=False)\n type = DynamicModelMultipleChoiceField(queryset=DeviceType.objects.all(), to_field_name=\"slug\", required=False)\n platform = DynamicModelMultipleChoiceField(queryset=Platform.objects.all(), to_field_name=\"slug\", required=False)\n cluster_group = DynamicModelMultipleChoiceField(\n queryset=ClusterGroup.objects.all(), to_field_name=\"slug\", required=False\n )\n cluster_id = DynamicModelMultipleChoiceField(queryset=Cluster.objects.all(), required=False, label=\"Cluster\")\n tenant_group = DynamicModelMultipleChoiceField(\n queryset=TenantGroup.objects.all(), to_field_name=\"slug\", required=False\n )\n tenant = DynamicModelMultipleChoiceField(queryset=Tenant.objects.all(), to_field_name=\"slug\", required=False)\n tag = DynamicModelMultipleChoiceField(queryset=Tag.objects.all(), to_field_name=\"slug\", required=False)\n\n\n#\n# Config context schemas\n#\n\n\nclass ConfigContextSchemaForm(NautobotModelForm):\n data_schema = JSONField(label=\"\")\n slug = SlugField()\n\n class Meta:\n model = ConfigContextSchema\n fields = (\n \"name\",\n \"slug\",\n \"description\",\n \"data_schema\",\n )\n\n\nclass ConfigContextSchemaBulkEditForm(NautobotBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=ConfigContextSchema.objects.all(), widget=forms.MultipleHiddenInput)\n description = forms.CharField(required=False, max_length=100)\n\n class Meta:\n nullable_fields = [\n \"description\",\n ]\n\n\nclass ConfigContextSchemaFilterForm(BootstrapMixin, forms.Form):\n q = forms.CharField(required=False, label=\"Search\")\n # FIXME(glenn) filtering by owner_content_type\n\n\n#\n# Custom fields\n#\n\n\n# CustomFieldChoice inline formset for use with providing dynamic rows when creating/editing choices\n# for `CustomField` objects in UI views. Fields/exclude must be set but since we're using all the\n# fields we're just setting `exclude=()` here.\nCustomFieldChoiceFormSet = inlineformset_factory(\n parent_model=CustomField,\n model=CustomFieldChoice,\n exclude=(),\n extra=5,\n widgets={\n \"value\": forms.TextInput(attrs={\"class\": \"form-control\"}),\n \"weight\": forms.NumberInput(attrs={\"class\": \"form-control\"}),\n },\n)\n\n\nclass CustomFieldForm(BootstrapMixin, forms.ModelForm):\n label = forms.CharField(required=True, max_length=50, help_text=\"Name of the field as displayed to users.\")\n slug = SlugField(\n max_length=50,\n slug_source=\"label\",\n help_text=\"Internal name of this field. Please use underscores rather than dashes.\",\n )\n description = forms.CharField(\n required=False,\n help_text=\"Also used as the help text when editing models using this custom field.<br>\"\n '<a href=\"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" target=\"_blank\">'\n \"Markdown</a> syntax is supported.\",\n )\n content_types = MultipleContentTypeField(\n feature=\"custom_fields\", help_text=\"The object(s) to which this field applies.\"\n )\n\n class Meta:\n model = CustomField\n fields = (\n \"label\",\n \"slug\",\n \"type\",\n \"weight\",\n \"description\",\n \"required\",\n \"default\",\n \"filter_logic\",\n \"advanced_ui\",\n \"content_types\",\n \"validation_minimum\",\n \"validation_maximum\",\n \"validation_regex\",\n )\n\n\nclass CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelFormMixin):\n \"\"\"Base class for CSV export of models that support custom fields.\"\"\"\n\n def _append_customfield_fields(self):\n\n # Append form fields\n for cf in CustomField.objects.filter(content_types=self.obj_type):\n field_name = \"cf_{}\".format(cf.slug)\n self.fields[field_name] = cf.to_form_field(for_csv_import=True)\n\n # Annotate the field in the list of CustomField form fields\n self.custom_fields.append(field_name)\n\n\n# 2.0 TODO: remove this class\n@class_deprecated_in_favor_of(CustomFieldModelBulkEditFormMixin)\nclass CustomFieldBulkCreateForm(CustomFieldModelBulkEditFormMixin):\n \"\"\"No longer needed as a separate class - use CustomFieldModelBulkEditFormMixin instead.\"\"\"\n\n\n#\n# Custom Links\n#\n\n\nclass CustomLinkForm(BootstrapMixin, forms.ModelForm):\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_links\").get_query()).order_by(\"app_label\", \"model\"),\n label=\"Content Type\",\n )\n\n class Meta:\n model = CustomLink\n fields = (\n \"content_type\",\n \"name\",\n \"text\",\n \"target_url\",\n \"weight\",\n \"group_name\",\n \"button_class\",\n \"new_window\",\n )\n\n\nclass CustomLinkFilterForm(BootstrapMixin, forms.Form):\n model = CustomLink\n q = forms.CharField(required=False, label=\"Search\")\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_links\").get_query()).order_by(\"app_label\", \"model\"),\n required=False,\n label=\"Content Type\",\n )\n\n\n#\n# Dynamic Groups\n#\n\n\nclass DynamicGroupForm(NautobotModelForm):\n \"\"\"DynamicGroup model form.\"\"\"\n\n slug = SlugField()\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"dynamic_groups\").get_query()).order_by(\"app_label\", \"model\"),\n label=\"Content Type\",\n )\n\n class Meta:\n model = DynamicGroup\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n \"content_type\",\n ]\n\n\nclass DynamicGroupMembershipFormSetForm(forms.ModelForm):\n \"\"\"DynamicGroupMembership model form for use inline on DynamicGroupFormSet.\"\"\"\n\n group = DynamicModelChoiceField(\n queryset=DynamicGroup.objects.all(),\n query_params={\"content_type\": \"$content_type\"},\n )\n\n class Meta:\n model = DynamicGroupMembership\n fields = (\"operator\", \"group\", \"weight\")\n\n\n# Inline formset for use with providing dynamic rows when creating/editing memberships of child\n# DynamicGroups to a parent DynamicGroup.\nBaseDynamicGroupMembershipFormSet = inlineformset_factory(\n parent_model=DynamicGroup,\n model=DynamicGroupMembership,\n form=DynamicGroupMembershipFormSetForm,\n extra=4,\n fk_name=\"parent_group\",\n widgets={\n \"operator\": StaticSelect2,\n \"weight\": forms.HiddenInput(),\n },\n)\n\n\nclass DynamicGroupMembershipFormSet(BaseDynamicGroupMembershipFormSet):\n \"\"\"\n Inline formset for use with providing dynamic rows when creating/editing memberships of child\n groups to a parent DynamicGroup.\n \"\"\"\n\n\nclass DynamicGroupFilterForm(BootstrapMixin, forms.Form):\n \"\"\"DynamicGroup filter form.\"\"\"\n\n model = DynamicGroup\n q = forms.CharField(required=False, label=\"Search\")\n content_type = MultipleContentTypeField(feature=\"dynamic_groups\", choices_as_strings=True, label=\"Content Type\")\n\n\n#\n# Export Templates\n#\n\n\nclass ExportTemplateForm(BootstrapMixin, forms.ModelForm):\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_templates\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n label=\"Content Type\",\n )\n\n class Meta:\n model = ExportTemplate\n fields = (\n \"content_type\",\n \"name\",\n \"description\",\n \"template_code\",\n \"mime_type\",\n \"file_extension\",\n )\n\n\nclass ExportTemplateFilterForm(BootstrapMixin, forms.Form):\n model = ExportTemplate\n q = forms.CharField(required=False, label=\"Search\")\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"export_templates\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n required=False,\n label=\"Content Type\",\n )\n\n\n#\n# Git repositories and other data sources\n#\n\n\ndef get_git_datasource_content_choices():\n return get_datasource_content_choices(\"extras.gitrepository\")\n\n\nclass PasswordInputWithPlaceholder(forms.PasswordInput):\n \"\"\"PasswordInput that is populated with a placeholder value if any existing value is present.\"\"\"\n\n def __init__(self, attrs=None, placeholder=\"\", render_value=False):\n if placeholder:\n render_value = True\n self._placeholder = placeholder\n super().__init__(attrs=attrs, render_value=render_value)\n\n def get_context(self, name, value, attrs):\n if value:\n value = self._placeholder\n return super().get_context(name, value, attrs)\n\n\nclass GitRepositoryForm(BootstrapMixin, RelationshipModelFormMixin):\n\n slug = SlugField(help_text=\"Filesystem-friendly unique shorthand\")\n\n remote_url = forms.URLField(\n required=True,\n label=\"Remote URL\",\n help_text=\"Only http:// and https:// URLs are presently supported\",\n )\n\n _token = forms.CharField(\n required=False,\n label=\"Token\",\n widget=PasswordInputWithPlaceholder(placeholder=GitRepository.TOKEN_PLACEHOLDER),\n help_text=\"<em>Deprecated</em> - use a secrets group instead.\",\n )\n\n username = forms.CharField(\n required=False,\n label=\"Username\",\n help_text=\"Username for token authentication.<br><em>Deprecated</em> - use a secrets group instead\",\n )\n\n secrets_group = DynamicModelChoiceField(required=False, queryset=SecretsGroup.objects.all())\n\n provided_contents = forms.MultipleChoiceField(\n required=False,\n label=\"Provides\",\n choices=get_git_datasource_content_choices,\n )\n\n class Meta:\n model = GitRepository\n fields = [\n \"name\",\n \"slug\",\n \"remote_url\",\n \"branch\",\n \"username\",\n \"_token\",\n \"secrets_group\",\n \"provided_contents\",\n \"tags\",\n ]\n\n def clean(self):\n super().clean()\n\n # set dryrun after a successful clean\n if \"_dryrun_create\" in self.data or \"_dryrun_update\" in self.data:\n self.instance.set_dryrun()\n\n\nclass GitRepositoryCSVForm(CSVModelForm):\n secrets_group = CSVModelChoiceField(\n queryset=SecretsGroup.objects.all(),\n to_field_name=\"name\",\n required=False,\n help_text=\"Secrets group for repository access (if any)\",\n )\n\n class Meta:\n model = GitRepository\n fields = GitRepository.csv_headers\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"provided_contents\"] = CSVMultipleChoiceField(\n choices=get_git_datasource_content_choices(),\n required=False,\n help_text=mark_safe(\n \"The data types this repository provides. Multiple values must be comma-separated and wrapped in \"\n 'double quotes (e.g. <code>\"extras.job,extras.configcontext\"</code>).'\n ),\n )\n\n\nclass GitRepositoryBulkEditForm(NautobotBulkEditForm):\n pk = forms.ModelMultipleChoiceField(\n queryset=GitRepository.objects.all(),\n widget=forms.MultipleHiddenInput(),\n )\n remote_url = forms.CharField(\n label=\"Remote URL\",\n required=False,\n )\n branch = forms.CharField(\n required=False,\n )\n _token = forms.CharField(\n required=False,\n label=\"Token\",\n widget=PasswordInputWithPlaceholder(placeholder=GitRepository.TOKEN_PLACEHOLDER),\n help_text=\"<em>Deprecated</em> - use a secrets group instead.\",\n )\n username = forms.CharField(\n required=False,\n label=\"Username\",\n help_text=\"<em>Deprecated</em> - use a secrets group instead.\",\n )\n\n secrets_group = DynamicModelChoiceField(required=False, queryset=SecretsGroup.objects.all())\n\n class Meta:\n model = GitRepository\n nullable_fields = [\"secrets_group\"]\n\n\nclass GitRepositoryFilterForm(BootstrapMixin, forms.Form):\n model = GitRepository\n q = forms.CharField(required=False, label=\"Search\")\n name = forms.CharField(required=False)\n branch = forms.CharField(required=False)\n provided_contents = forms.ChoiceField(\n required=False,\n label=\"Provides\",\n choices=add_blank_choice(get_git_datasource_content_choices()),\n )\n\n\n#\n# GraphQL saved queries\n#\n\n\nclass GraphQLQueryForm(BootstrapMixin, forms.ModelForm):\n slug = SlugField()\n query = TextField()\n\n class Meta:\n model = GraphQLQuery\n fields = (\n \"name\",\n \"slug\",\n \"query\",\n )\n\n def get_action_url(self):\n return reverse(\"extras:graphqlquery_add\")\n\n\nclass GraphQLQueryFilterForm(BootstrapMixin, forms.Form):\n model = GraphQLQuery\n q = forms.CharField(required=False, label=\"Search\")\n\n\n#\n# Image attachments\n#\n\n\nclass ImageAttachmentForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n model = ImageAttachment\n fields = [\n \"name\",\n \"image\",\n ]\n\n\n#\n# Jobs\n#\n\n\nclass JobForm(BootstrapMixin, forms.Form):\n \"\"\"\n This form is used to render the user input fields for a Job class. Its fields are dynamically\n controlled by the job definition. See `nautobot.extras.jobs.BaseJob.as_form`\n \"\"\"\n\n _commit = forms.BooleanField(\n required=False,\n initial=True,\n label=\"Commit changes\",\n help_text=\"Commit changes to the database (uncheck for a dry-run)\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Move _commit to the end of the form\n commit = self.fields.pop(\"_commit\")\n self.fields[\"_commit\"] = commit\n\n @property\n def requires_input(self):\n \"\"\"\n A boolean indicating whether the form requires user input (ignore the _commit field).\n \"\"\"\n return bool(len(self.fields) > 1)\n\n\nclass JobEditForm(NautobotModelForm):\n slug = SlugField()\n\n class Meta:\n model = Job\n fields = [\n \"slug\",\n \"enabled\",\n \"name_override\",\n \"name\",\n \"grouping_override\",\n \"grouping\",\n \"description_override\",\n \"description\",\n \"commit_default_override\",\n \"commit_default\",\n \"hidden_override\",\n \"hidden\",\n \"read_only_override\",\n \"read_only\",\n \"approval_required_override\",\n \"approval_required\",\n \"soft_time_limit_override\",\n \"soft_time_limit\",\n \"time_limit_override\",\n \"time_limit\",\n \"has_sensitive_variables\",\n \"has_sensitive_variables_override\",\n \"tags\",\n ]\n\n def clean(self):\n \"\"\"\n For all overridable fields, if they aren't marked as overridden, revert them to the underlying value if known.\n \"\"\"\n cleaned_data = super().clean() or self.cleaned_data\n job_class = self.instance.job_class\n if job_class is not None:\n for field_name in JOB_OVERRIDABLE_FIELDS:\n if not cleaned_data.get(f\"{field_name}_override\", False):\n cleaned_data[field_name] = getattr(job_class, field_name)\n return cleaned_data\n\n\nclass JobFilterForm(BootstrapMixin, forms.Form):\n model = Job\n q = forms.CharField(required=False, label=\"Search\")\n installed = forms.NullBooleanField(\n initial=True,\n required=False,\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n enabled = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n has_sensitive_variables = forms.NullBooleanField(\n required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES)\n )\n commit_default = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n hidden = forms.NullBooleanField(\n initial=False,\n required=False,\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n read_only = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n approval_required = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n is_job_hook_receiver = forms.NullBooleanField(\n initial=False,\n required=False,\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n tag = TagFilterField(model)\n\n\nclass JobHookForm(BootstrapMixin, forms.ModelForm):\n content_types = MultipleContentTypeField(\n queryset=ChangeLoggedModelsQuery().as_queryset(), required=True, label=\"Content Type(s)\"\n )\n\n class Meta:\n model = JobHook\n fields = (\n \"name\",\n \"content_types\",\n \"job\",\n \"enabled\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n )\n\n def clean(self):\n data = super().clean()\n\n conflicts = JobHook.check_for_conflicts(\n instance=self.instance,\n content_types=self.cleaned_data.get(\"content_types\"),\n job=self.cleaned_data.get(\"job\"),\n type_create=self.cleaned_data.get(\"type_create\"),\n type_update=self.cleaned_data.get(\"type_update\"),\n type_delete=self.cleaned_data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise ValidationError(conflicts)\n\n return data\n\n\nclass JobHookFilterForm(BootstrapMixin, forms.Form):\n model = JobHook\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n queryset=ChangeLoggedModelsQuery().as_queryset(),\n choices_as_strings=True,\n required=False,\n label=\"Content Type(s)\",\n )\n enabled = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n job = DynamicModelMultipleChoiceField(\n label=\"Job\",\n queryset=Job.objects.all(),\n required=False,\n to_field_name=\"slug\",\n widget=APISelectMultiple(api_url=\"/api/extras/jobs/\", api_version=\"1.3\"),\n )\n type_create = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_update = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_delete = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n\n\nclass JobScheduleForm(BootstrapMixin, forms.Form):\n \"\"\"\n This form is rendered alongside the JobForm but deals specifically with the fields needed to either\n execute the job immediately, or schedule it for later. Each field name is prefixed with an underscore\n because in the POST body, they share a namespace with the JobForm which includes fields defined by the\n job author, so the underscore prefix helps to avoid name collisions.\n \"\"\"\n\n _schedule_type = forms.ChoiceField(\n choices=JobExecutionType,\n help_text=\"The job can either run immediately, once in the future, or on a recurring schedule.\",\n label=\"Type\",\n )\n _schedule_name = forms.CharField(\n required=False,\n label=\"Schedule name\",\n help_text=\"Name for the job schedule.\",\n )\n _schedule_start_time = forms.DateTimeField(\n required=False,\n label=\"Starting date and time\",\n widget=DateTimePicker(),\n )\n _recurrence_custom_time = forms.CharField(\n required=False,\n label=\"Crontab\",\n help_text=\"Custom crontab syntax (* * * * *)\",\n )\n\n def clean(self):\n \"\"\"\n Validate all required information is present if the job needs to be scheduled\n \"\"\"\n cleaned_data = super().clean()\n\n if \"_schedule_type\" in cleaned_data and cleaned_data.get(\"_schedule_type\") != JobExecutionType.TYPE_IMMEDIATELY:\n if not cleaned_data.get(\"_schedule_name\"):\n raise ValidationError({\"_schedule_name\": \"Please provide a name for the job schedule.\"})\n\n if (\n not cleaned_data.get(\"_schedule_start_time\")\n and cleaned_data.get(\"_schedule_type\") != JobExecutionType.TYPE_CUSTOM\n ) or (\n cleaned_data.get(\"_schedule_start_time\")\n and cleaned_data.get(\"_schedule_start_time\") < ScheduledJob.earliest_possible_time()\n ):\n raise ValidationError(\n {\n \"_schedule_start_time\": \"Please enter a valid date and time greater than or equal to the current date and time.\"\n }\n )\n\n if cleaned_data.get(\"_schedule_type\") == JobExecutionType.TYPE_CUSTOM:\n try:\n ScheduledJob.get_crontab(cleaned_data.get(\"_recurrence_custom_time\"))\n except Exception as e:\n raise ValidationError({\"_recurrence_custom_time\": e})\n\n\nclass JobResultFilterForm(BootstrapMixin, forms.Form):\n model = JobResult\n q = forms.CharField(required=False, label=\"Search\")\n job_model = DynamicModelMultipleChoiceField(\n label=\"Job\",\n queryset=Job.objects.all(),\n required=False,\n to_field_name=\"slug\",\n widget=APISelectMultiple(api_url=\"/api/extras/jobs/\", api_version=\"1.3\"),\n )\n # FIXME(glenn) Filtering by obj_type?\n name = forms.CharField(required=False)\n user = DynamicModelMultipleChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n label=\"User\",\n widget=APISelectMultiple(\n api_url=\"/api/users/users/\",\n ),\n )\n status = forms.MultipleChoiceField(\n choices=JobResultStatusChoices,\n required=False,\n widget=StaticSelect2Multiple(),\n )\n\n\nclass ScheduledJobFilterForm(BootstrapMixin, forms.Form):\n model = ScheduledJob\n q = forms.CharField(required=False, label=\"Search\")\n name = forms.CharField(required=False)\n job_model = DynamicModelMultipleChoiceField(\n label=\"Job\",\n queryset=Job.objects.all(),\n required=False,\n to_field_name=\"slug\",\n widget=APISelectMultiple(api_url=\"/api/extras/job-models/\"),\n )\n total_run_count = forms.IntegerField(required=False)\n\n\n#\n# Notes\n#\n\n\nclass NoteForm(BootstrapMixin, forms.ModelForm):\n note = CommentField\n\n class Meta:\n model = Note\n fields = [\"assigned_object_type\", \"assigned_object_id\", \"note\"]\n widgets = {\n \"assigned_object_type\": forms.HiddenInput,\n \"assigned_object_id\": forms.HiddenInput,\n }\n\n\n#\n# Filter form for local config context data\n#\n\n\nclass LocalContextFilterForm(forms.Form):\n local_context_data = forms.NullBooleanField(\n required=False,\n label=\"Has local config context data\",\n widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES),\n )\n local_context_schema = DynamicModelMultipleChoiceField(\n queryset=ConfigContextSchema.objects.all(), to_field_name=\"slug\", required=False\n )\n\n\n#\n# Model form for local config context data\n#\n\n\nclass LocalContextModelForm(forms.ModelForm):\n local_context_schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), required=False)\n local_context_data = JSONField(required=False, label=\"\")\n\n\nclass LocalContextModelBulkEditForm(BulkEditForm):\n local_context_schema = DynamicModelChoiceField(queryset=ConfigContextSchema.objects.all(), required=False)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # append nullable fields\n self.nullable_fields.append(\"local_context_schema\")\n\n\n#\n# Change logging\n#\n\n\nclass ObjectChangeFilterForm(BootstrapMixin, forms.Form):\n model = ObjectChange\n q = forms.CharField(required=False, label=\"Search\")\n time__gte = forms.DateTimeField(label=\"After\", required=False, widget=DateTimePicker())\n time__lte = forms.DateTimeField(label=\"Before\", required=False, widget=DateTimePicker())\n action = forms.ChoiceField(\n choices=add_blank_choice(ObjectChangeActionChoices),\n required=False,\n widget=StaticSelect2(),\n )\n user_id = DynamicModelMultipleChoiceField(\n queryset=get_user_model().objects.all(),\n required=False,\n label=\"User\",\n widget=APISelectMultiple(\n api_url=\"/api/users/users/\",\n ),\n )\n changed_object_type_id = DynamicModelMultipleChoiceField(\n queryset=ContentType.objects.all(),\n required=False,\n label=\"Object Type\",\n widget=APISelectMultiple(\n api_url=\"/api/extras/content-types/\",\n ),\n )\n\n\n#\n# Relationship\n#\n\n\nclass RelationshipForm(BootstrapMixin, forms.ModelForm):\n\n slug = SlugField(help_text=\"Internal name of this relationship. Please use underscores rather than dashes.\")\n source_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()).order_by(\"app_label\", \"model\"),\n help_text=\"The source object type to which this relationship applies.\",\n )\n source_filter = JSONField(\n required=False,\n help_text=\"Filterset filter matching the applicable source objects of the selected type.<br>\"\n 'Enter in <a href=\"https://json.org/\">JSON</a> format.',\n )\n destination_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"relationships\").get_query()).order_by(\"app_label\", \"model\"),\n help_text=\"The destination object type to which this relationship applies.\",\n )\n destination_filter = JSONField(\n required=False,\n help_text=\"Filterset filter matching the applicable destination objects of the selected type.<br>\"\n 'Enter in <a href=\"https://json.org/\">JSON</a> format.',\n )\n\n class Meta:\n model = Relationship\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n \"type\",\n \"advanced_ui\",\n \"source_type\",\n \"source_label\",\n \"source_hidden\",\n \"source_filter\",\n \"destination_type\",\n \"destination_label\",\n \"destination_hidden\",\n \"destination_filter\",\n ]\n\n def save(self, commit=True):\n\n # TODO add support for owner when a CR is created in the UI\n obj = super().save(commit)\n\n return obj\n\n\nclass RelationshipFilterForm(BootstrapMixin, forms.Form):\n model = Relationship\n\n type = forms.MultipleChoiceField(choices=RelationshipTypeChoices, required=False, widget=StaticSelect2Multiple())\n\n source_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Source Type\"\n )\n\n destination_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Destination Type\"\n )\n\n\nclass RelationshipAssociationFilterForm(BootstrapMixin, forms.Form):\n model = RelationshipAssociation\n\n relationship = DynamicModelMultipleChoiceField(\n queryset=Relationship.objects.all(),\n to_field_name=\"slug\",\n required=False,\n )\n\n source_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Source Type\"\n )\n\n destination_type = MultipleContentTypeField(\n feature=\"relationships\", choices_as_strings=True, required=False, label=\"Destination Type\"\n )\n\n\n#\n# Secrets\n#\n\n\ndef provider_choices():\n return sorted([(slug, provider.name) for slug, provider in registry[\"secrets_providers\"].items()])\n\n\nclass SecretForm(NautobotModelForm):\n \"\"\"Create/update form for `Secret` objects.\"\"\"\n\n slug = SlugField()\n\n provider = forms.ChoiceField(choices=provider_choices, widget=StaticSelect2())\n\n parameters = JSONField(help_text='Enter parameters in <a href=\"https://json.org/\">JSON</a> format.')\n\n class Meta:\n model = Secret\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n \"provider\",\n \"parameters\",\n \"tags\",\n ]\n\n\nclass SecretCSVForm(CustomFieldModelCSVForm):\n class Meta:\n model = Secret\n fields = Secret.csv_headers\n\n\ndef provider_choices_with_blank():\n return add_blank_choice(sorted([(slug, provider.name) for slug, provider in registry[\"secrets_providers\"].items()]))\n\n\nclass SecretFilterForm(NautobotFilterForm):\n model = Secret\n q = forms.CharField(required=False, label=\"Search\")\n provider = forms.MultipleChoiceField(\n choices=provider_choices_with_blank, widget=StaticSelect2Multiple(), required=False\n )\n tag = TagFilterField(model)\n\n\n# Inline formset for use with providing dynamic rows when creating/editing assignments of Secrets to SecretsGroups.\nSecretsGroupAssociationFormSet = inlineformset_factory(\n parent_model=SecretsGroup,\n model=SecretsGroupAssociation,\n fields=(\"access_type\", \"secret_type\", \"secret\"),\n extra=5,\n widgets={\n \"access_type\": StaticSelect2,\n \"secret_type\": StaticSelect2,\n \"secret\": APISelect(api_url=\"/api/extras/secrets/\"),\n },\n)\n\n\nclass SecretsGroupForm(NautobotModelForm):\n \"\"\"Create/update form for `SecretsGroup` objects.\"\"\"\n\n slug = SlugField()\n\n class Meta:\n model = SecretsGroup\n fields = [\n \"name\",\n \"slug\",\n \"description\",\n ]\n\n\nclass SecretsGroupFilterForm(NautobotFilterForm):\n model = SecretsGroup\n q = forms.CharField(required=False, label=\"Search\")\n\n\n#\n# Statuses\n#\n\n\nclass StatusForm(NautobotModelForm):\n \"\"\"Generic create/update form for `Status` objects.\"\"\"\n\n slug = SlugField()\n content_types = MultipleContentTypeField(feature=\"statuses\", label=\"Content Type(s)\")\n\n class Meta:\n model = Status\n widgets = {\"color\": ColorSelect()}\n fields = [\"name\", \"slug\", \"description\", \"content_types\", \"color\"]\n\n\nclass StatusCSVForm(CustomFieldModelCSVForm):\n \"\"\"Generic CSV bulk import form for `Status` objects.\"\"\"\n\n content_types = CSVMultipleContentTypeField(\n feature=\"statuses\",\n choices_as_strings=True,\n help_text=mark_safe(\n \"The object types to which this status applies. Multiple values \"\n \"must be comma-separated and wrapped in double quotes. (e.g. \"\n '<code>\"dcim.device,dcim.rack\"</code>)'\n ),\n label=\"Content type(s)\",\n )\n\n class Meta:\n model = Status\n fields = Status.csv_headers\n help_texts = {\n \"color\": mark_safe(\"RGB color in hexadecimal (e.g. <code>00ff00</code>)\"),\n }\n\n\nclass StatusFilterForm(NautobotFilterForm):\n \"\"\"Filtering/search form for `Status` objects.\"\"\"\n\n model = Status\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n feature=\"statuses\", choices_as_strings=True, required=False, label=\"Content Type(s)\"\n )\n color = forms.CharField(max_length=6, required=False, widget=ColorSelect())\n\n\nclass StatusBulkEditForm(NautobotBulkEditForm):\n \"\"\"Bulk edit/delete form for `Status` objects.\"\"\"\n\n pk = forms.ModelMultipleChoiceField(queryset=Status.objects.all(), widget=forms.MultipleHiddenInput)\n color = forms.CharField(max_length=6, required=False, widget=ColorSelect())\n content_types = MultipleContentTypeField(feature=\"statuses\", required=False, label=\"Content Type(s)\")\n\n class Meta:\n nullable_fields = []\n\n\n#\n# Tags\n#\n\n\nclass TagForm(NautobotModelForm):\n slug = SlugField()\n content_types = ModelMultipleChoiceField(\n label=\"Content Type(s)\",\n queryset=TaggableClassesQuery().as_queryset,\n )\n\n class Meta:\n model = Tag\n fields = [\"name\", \"slug\", \"color\", \"description\", \"content_types\"]\n\n def clean(self):\n data = super().clean()\n\n if self.instance.present_in_database:\n # check if tag is assigned to any of the removed content_types\n content_types_id = [content_type.id for content_type in self.cleaned_data[\"content_types\"]]\n errors = self.instance.validate_content_types_removal(content_types_id)\n\n if errors:\n raise ValidationError(errors)\n\n return data\n\n\nclass TagCSVForm(CustomFieldModelCSVForm):\n slug = SlugField()\n\n class Meta:\n model = Tag\n fields = Tag.csv_headers\n help_texts = {\n \"color\": mark_safe(\"RGB color in hexadecimal (e.g. <code>00ff00</code>)\"),\n }\n\n\nclass TagFilterForm(NautobotFilterForm):\n model = Tag\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n choices_as_strings=True,\n required=False,\n label=\"Content Type(s)\",\n queryset=TaggableClassesQuery().as_queryset,\n )\n\n\nclass TagBulkEditForm(NautobotBulkEditForm):\n pk = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), widget=forms.MultipleHiddenInput)\n color = forms.CharField(max_length=6, required=False, widget=ColorSelect())\n description = forms.CharField(max_length=200, required=False)\n\n class Meta:\n nullable_fields = [\"description\"]\n\n\n#\n# Webhooks\n#\n\n\nclass WebhookForm(BootstrapMixin, forms.ModelForm):\n content_types = MultipleContentTypeField(feature=\"webhooks\", required=False, label=\"Content Type(s)\")\n\n class Meta:\n model = Webhook\n fields = (\n \"name\",\n \"content_types\",\n \"enabled\",\n \"type_create\",\n \"type_update\",\n \"type_delete\",\n \"payload_url\",\n \"http_method\",\n \"http_content_type\",\n \"additional_headers\",\n \"body_template\",\n \"secret\",\n \"ssl_verification\",\n \"ca_file_path\",\n )\n\n def clean(self):\n data = super().clean()\n\n conflicts = Webhook.check_for_conflicts(\n instance=self.instance,\n content_types=self.cleaned_data.get(\"content_types\"),\n payload_url=self.cleaned_data.get(\"payload_url\"),\n type_create=self.cleaned_data.get(\"type_create\"),\n type_update=self.cleaned_data.get(\"type_update\"),\n type_delete=self.cleaned_data.get(\"type_delete\"),\n )\n\n if conflicts:\n raise ValidationError(conflicts)\n\n return data\n\n\nclass WebhookFilterForm(BootstrapMixin, forms.Form):\n model = Webhook\n q = forms.CharField(required=False, label=\"Search\")\n content_types = MultipleContentTypeField(\n feature=\"webhooks\", choices_as_strings=True, required=False, label=\"Content Type(s)\"\n )\n type_create = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_update = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n type_delete = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n enabled = forms.NullBooleanField(required=False, widget=StaticSelect2(choices=BOOLEAN_WITH_BLANK_CHOICES))\n",
"path": "nautobot/extras/forms/forms.py"
}
] | diff --git a/nautobot/docs/release-notes/version-1.4.md b/nautobot/docs/release-notes/version-1.4.md
index a46247abb70..4193c7697d0 100644
--- a/nautobot/docs/release-notes/version-1.4.md
+++ b/nautobot/docs/release-notes/version-1.4.md
@@ -145,6 +145,8 @@ The `settings_and_registry` default context processor was changed to purely `set
### Fixed
+- [#2178](https://github.com/nautobot/nautobot/issues/2178) - Fixed "invalid filter" error when filtering JobResults in the UI.
+
## v1.4.0rc1 (2022-08-10)
### Added
diff --git a/nautobot/extras/forms/forms.py b/nautobot/extras/forms/forms.py
index 053f159f0dd..accdadcca57 100644
--- a/nautobot/extras/forms/forms.py
+++ b/nautobot/extras/forms/forms.py
@@ -949,10 +949,10 @@ class JobResultFilterForm(BootstrapMixin, forms.Form):
api_url="/api/users/users/",
),
)
- status = forms.ChoiceField(
- choices=add_blank_choice(JobResultStatusChoices),
+ status = forms.MultipleChoiceField(
+ choices=JobResultStatusChoices,
required=False,
- widget=StaticSelect2(),
+ widget=StaticSelect2Multiple(),
)
|
pallets__werkzeug-1539 | ProfilerMiddleware's default filename_format causes ValueError when ProfilerMiddleware is used with profile_dir
## Environment
```
$ sw_vers
ProductName: Mac OS X
ProductVersion: 10.13.6
BuildVersion: 17G3025
$ python --version
Python 3.7.2
$ pip freeze
Click==7.0
Flask==1.0.2
itsdangerous==1.1.0
Jinja2==2.10.1
MarkupSafe==1.1.1
Werkzeug==0.15.2
```
Basically, the only Python dependency I installed was Flask because that's what I'm most familiar with. However, the error I'm describing looks to be contained within werkzeug.
## Observed Behavior
When using `ProfilerMiddleware` with its `profile_dir` argument, the following error gets raised after a request is sent to the server:
```
Error on request:
Traceback (most recent call last):
File "/dev/jlove-bazaarvoice/werkzeug-profiler-bug/.venv/lib/python3.7/site-packages/werkzeug/serving.py", line 302, in run_wsgi
execute(self.server.app)
File "/dev/jlove-bazaarvoice/werkzeug-profiler-bug/.venv/lib/python3.7/site-packages/werkzeug/serving.py", line 290, in execute
application_iter = app(environ, start_response)
File "/dev/jlove-bazaarvoice/werkzeug-profiler-bug/.venv/lib/python3.7/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/dev/jlove-bazaarvoice/werkzeug-profiler-bug/.venv/lib/python3.7/site-packages/werkzeug/middleware/profiler.py", line 119, in __call__
time=time.time(),
ValueError: Unknown format code 'd' for object of type 'float'
```
## Expected Behavior
No `ValueError`.
## Steps to Reproduce
1. `pip install flask`
2. Save the following file as app.py.
```python
# app.py
from flask import Flask
from werkzeug.middleware.profiler import ProfilerMiddleware
app = Flask(__name__)
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, profile_dir=".")
@app.route("/", methods=["GET"])
def get_index():
return "Hello, world!"
```
3. Start the server with `FLASK_APP=app.py flask run`.
4. Send a request to the server (e.g. http://127.0.0.1:5000/).
## Workaround/Solution
Slightly modify `ProfilerMiddleware`'s `filename_format`, replacing the `d` with `f`. For example:
```python
app.wsgi_app = ProfilerMiddleware(
app.wsgi_app, profile_dir=".", filename_format="{method}.{path}.{elapsed:06f}ms.{time:f}.prof"
)
```
Both instances of `d` need to be replaced because both `elapsed` and `time` are floating point numbers.
| [
{
"content": "\"\"\"\nApplication Profiler\n====================\n\nThis module provides a middleware that profiles each request with the\n:mod:`cProfile` module. This can help identify bottlenecks in your code\nthat may be slowing down your application.\n\n.. autoclass:: ProfilerMiddleware\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nfrom __future__ import print_function\n\nimport os.path\nimport sys\nimport time\nfrom pstats import Stats\n\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile\n\n\nclass ProfilerMiddleware(object):\n \"\"\"Wrap a WSGI application and profile the execution of each\n request. Responses are buffered so that timings are more exact.\n\n If ``stream`` is given, :class:`pstats.Stats` are written to it\n after each request. If ``profile_dir`` is given, :mod:`cProfile`\n data files are saved to that directory, one file per request.\n\n The filename can be customized by passing ``filename_format``. If\n it is a string, it will be formatted using :meth:`str.format` with\n the following fields available:\n\n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n - ``{elapsed}`` - The elapsed time of the request.\n - ``{time}`` - The time of the request.\n\n If it is a callable, it will be called with the WSGI ``environ``\n dict and should return a filename.\n\n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n :param sort_by: A tuple of columns to sort stats by. See\n :meth:`pstats.Stats.sort_stats`.\n :param restrictions: A tuple of restrictions to filter stats by. See\n :meth:`pstats.Stats.print_stats`.\n :param profile_dir: Save profile data files to this directory.\n :param filename_format: Format string for profile data file names,\n or a callable returning a name. See explanation above.\n\n .. code-block:: python\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n\n .. versionadded:: 0.15\n Added ``filename_format``.\n\n .. versionadded:: 0.9\n Added ``restrictions`` and ``profile_dir``.\n \"\"\"\n\n def __init__(\n self,\n app,\n stream=sys.stdout,\n sort_by=(\"time\", \"calls\"),\n restrictions=(),\n profile_dir=None,\n filename_format=\"{method}.{path}.{elapsed:06d}ms.{time:d}.prof\",\n ):\n self._app = app\n self._stream = stream\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n self._filename_format = filename_format\n\n def __call__(self, environ, start_response):\n response_body = []\n\n def catching_start_response(status, headers, exc_info=None):\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp():\n app_iter = self._app(environ, catching_start_response)\n response_body.extend(app_iter)\n\n if hasattr(app_iter, \"close\"):\n app_iter.close()\n\n profile = Profile()\n start = time.time()\n profile.runcall(runapp)\n body = b\"\".join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n if callable(self._filename_format):\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n method=environ[\"REQUEST_METHOD\"],\n path=(\n environ.get(\"PATH_INFO\").strip(\"/\").replace(\"/\", \".\") or \"root\"\n ),\n elapsed=elapsed * 1000.0,\n time=time.time(),\n )\n filename = os.path.join(self._profile_dir, filename)\n profile.dump_stats(filename)\n\n if self._stream is not None:\n stats = Stats(profile, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n print(\"-\" * 80, file=self._stream)\n print(\"PATH: {!r}\".format(environ.get(\"PATH_INFO\", \"\")), file=self._stream)\n stats.print_stats(*self._restrictions)\n print(\"-\" * 80 + \"\\n\", file=self._stream)\n\n return [body]\n",
"path": "src/werkzeug/middleware/profiler.py"
}
] | [
{
"content": "\"\"\"\nApplication Profiler\n====================\n\nThis module provides a middleware that profiles each request with the\n:mod:`cProfile` module. This can help identify bottlenecks in your code\nthat may be slowing down your application.\n\n.. autoclass:: ProfilerMiddleware\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nfrom __future__ import print_function\n\nimport os.path\nimport sys\nimport time\nfrom pstats import Stats\n\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile\n\n\nclass ProfilerMiddleware(object):\n \"\"\"Wrap a WSGI application and profile the execution of each\n request. Responses are buffered so that timings are more exact.\n\n If ``stream`` is given, :class:`pstats.Stats` are written to it\n after each request. If ``profile_dir`` is given, :mod:`cProfile`\n data files are saved to that directory, one file per request.\n\n The filename can be customized by passing ``filename_format``. If\n it is a string, it will be formatted using :meth:`str.format` with\n the following fields available:\n\n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n - ``{elapsed}`` - The elapsed time of the request.\n - ``{time}`` - The time of the request.\n\n If it is a callable, it will be called with the WSGI ``environ``\n dict and should return a filename.\n\n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n :param sort_by: A tuple of columns to sort stats by. See\n :meth:`pstats.Stats.sort_stats`.\n :param restrictions: A tuple of restrictions to filter stats by. See\n :meth:`pstats.Stats.print_stats`.\n :param profile_dir: Save profile data files to this directory.\n :param filename_format: Format string for profile data file names,\n or a callable returning a name. See explanation above.\n\n .. code-block:: python\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n\n .. versionadded:: 0.15\n Added ``filename_format``.\n\n .. versionadded:: 0.9\n Added ``restrictions`` and ``profile_dir``.\n \"\"\"\n\n def __init__(\n self,\n app,\n stream=sys.stdout,\n sort_by=(\"time\", \"calls\"),\n restrictions=(),\n profile_dir=None,\n filename_format=\"{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof\",\n ):\n self._app = app\n self._stream = stream\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n self._filename_format = filename_format\n\n def __call__(self, environ, start_response):\n response_body = []\n\n def catching_start_response(status, headers, exc_info=None):\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp():\n app_iter = self._app(environ, catching_start_response)\n response_body.extend(app_iter)\n\n if hasattr(app_iter, \"close\"):\n app_iter.close()\n\n profile = Profile()\n start = time.time()\n profile.runcall(runapp)\n body = b\"\".join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n if callable(self._filename_format):\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n method=environ[\"REQUEST_METHOD\"],\n path=(\n environ.get(\"PATH_INFO\").strip(\"/\").replace(\"/\", \".\") or \"root\"\n ),\n elapsed=elapsed * 1000.0,\n time=time.time(),\n )\n filename = os.path.join(self._profile_dir, filename)\n profile.dump_stats(filename)\n\n if self._stream is not None:\n stats = Stats(profile, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n print(\"-\" * 80, file=self._stream)\n print(\"PATH: {!r}\".format(environ.get(\"PATH_INFO\", \"\")), file=self._stream)\n stats.print_stats(*self._restrictions)\n print(\"-\" * 80 + \"\\n\", file=self._stream)\n\n return [body]\n",
"path": "src/werkzeug/middleware/profiler.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 0de7ba992..49c29ffbf 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -7,12 +7,15 @@ Unreleased
- Properly handle multi-line header folding in development server in
Python 2.7. (:issue:`1080`)
-- Restore the ``response`` argument to :exc:`exceptions.Unauthorized`.
+- Restore the ``response`` argument to :exc:`~exceptions.Unauthorized`.
(:pr:`1527`)
-- :exc:`exceptions.Unauthorized` doesn't add the ``WWW-Authenticate``
+- :exc:`~exceptions.Unauthorized` doesn't add the ``WWW-Authenticate``
header if ``www_authenticate`` is not given. (:issue:`1516`)
- The default URL converter correctly encodes bytes to string rather
than representing them with ``b''``. (:issue:`1502`)
+- Fix the filename format string in
+ :class:`~middleware.profiler.ProfilerMiddleware` to correctly handle
+ float values. (:issue:`1511`)
Version 0.15.2
diff --git a/src/werkzeug/middleware/profiler.py b/src/werkzeug/middleware/profiler.py
index 289879d94..32a14d9fd 100644
--- a/src/werkzeug/middleware/profiler.py
+++ b/src/werkzeug/middleware/profiler.py
@@ -77,7 +77,7 @@ def __init__(
sort_by=("time", "calls"),
restrictions=(),
profile_dir=None,
- filename_format="{method}.{path}.{elapsed:06d}ms.{time:d}.prof",
+ filename_format="{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
):
self._app = app
self._stream = stream
|
bokeh__bokeh-4826 | Expose the wheel zoom speed in Python
The sensitivity of the wheel zoom cannot be configured on the Python side. However, there is a `speed` property which can be set on the JS side. It would be better to expose this property to the Python side too.
| [
{
"content": "\"\"\" Bokeh comes with a number of interactive tools.\n\nThere are five types of tool interactions:\n\n.. hlist::\n :columns: 5\n\n * Pan/Drag\n * Click/Tap\n * Scroll/Pinch\n * Actions\n * Inspectors\n\nFor the first three comprise the category of gesture tools, and only\none tool for each gesture can be active at any given time. The active\ntool is indicated on the toolbar by a highlight next to to the tool.\nActions are immediate or modal operations that are only activated when\ntheir button in the toolbar is pressed. Inspectors are passive tools that\nmerely report information or annotate the plot in some way, and may\nalways be active regardless of what other tools are currently active.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..model import Model\nfrom ..core.properties import abstract, Float, Color\nfrom ..core.properties import (\n Any, Auto, Bool, String, Enum, Instance, Either, List, Dict, Tuple, Override\n)\nfrom ..core.enums import Dimension, Location, Anchor\n\nfrom .annotations import BoxAnnotation, PolyAnnotation\nfrom .callbacks import Callback\nfrom .renderers import Renderer\nfrom .layouts import LayoutDOM, Box\n\n\nclass ToolEvents(Model):\n \"\"\"\n\n \"\"\"\n\n geometries = List(Dict(String, Any))\n\n\n@abstract\nclass Tool(Model):\n \"\"\" A base class for all interactive tool types. ``Tool`` is\n not generally useful to instantiate on its own.\n\n \"\"\"\n\n plot = Instance(\".models.plots.Plot\", help=\"\"\"\n The Plot that this tool will act on.\n \"\"\")\n\n\n@abstract\nclass Action(Tool):\n pass\n\n\n@abstract\nclass Drag(Tool):\n pass\n\n\n@abstract\nclass Scroll(Tool):\n pass\n\n\n@abstract\nclass Tap(Tool):\n pass\n\n\n@abstract\nclass Inspection(Tool):\n pass\n\n\n@abstract\nclass ToolbarBase(LayoutDOM):\n \"\"\" A base class for different toolbars. ``ToolbarBase`` is\n not generally useful to instantiate on its own.\n\n \"\"\"\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n # This is an odd case. The sizing is custom handled. In the future we will\n # probably set it as `stretch_width` or `stretch_height` depending on its\n # orientation.\n sizing_mode = Override(default=None)\n\n\nclass Toolbar(ToolbarBase):\n \"\"\" Hold tools to display for a single plot.\n\n \"\"\"\n\n active_drag = Either(Auto, Instance(Drag), help=\"\"\"\n Specify a drag tool to be active when the plot is displayed.\n \"\"\")\n\n active_scroll = Either(Auto, Instance(Scroll), help=\"\"\"\n Specify a scroll/pinch tool to be active when the plot is displayed.\n \"\"\")\n\n active_tap = Either(Auto, Instance(Tap), help=\"\"\"\n Specify a tap/click tool to be active when the plot is displayed.\n \"\"\")\n\n\nclass ToolbarBox(Box):\n \"\"\" A layoutable toolbar that can accept the tools of multiple plots, and\n can merge the tools into a single button for convenience.\n\n \"\"\"\n def _check_empty_layout(self):\n # Overriding the children check from Box. As toolbarbox's children\n # are normally set JS side.\n return None\n\n toolbar_location = Enum(Location, default='right', help=\"\"\"\n Should the toolbar be presented as if it was stuck to the `above`, `right`, `left`, `below`\n edge of a plot. Default is `right`.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n merge_tools = Bool(default=True, help=\"\"\"\n Merge all the tools together so there is one tool to control all the plots.\n \"\"\")\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n\nclass PanTool(Drag):\n \"\"\" *toolbar icon*: |pan_icon|\n\n The pan tool allows the user to pan a Plot by left-dragging\n a mouse, or on touch devices by dragging a finger or stylus, across\n the plot region.\n\n The pan tool also activates the border regions of a Plot for \"single\n axis\" panning. For instance, dragging in the vertical border or axis\n will effect a pan in the vertical direction only, with the horizontal\n dimension kept fixed.\n\n .. |pan_icon| image:: /_images/icons/Pan.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the pan tool is constrained to act in. By default\n the pan tool will pan in any dimension, but can be configured to only\n pan horizontally across the width of the plot, or vertically across the\n height of the plot.\n \"\"\")\n\n\nclass WheelZoomTool(Scroll):\n \"\"\" *toolbar icon*: |wheel_zoom_icon|\n\n The wheel zoom tool will zoom the plot in and out, centered on the\n current mouse location.\n\n The wheel zoom tool also activates the border regions of a Plot for\n \"single axis\" zooming. For instance, zooming in the vertical border or\n axis will effect a zoom in the vertical direction only, with the\n horizontal dimension kept fixed.\n\n .. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the wheel zoom tool is constrained to act in. By\n default the wheel zoom tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n\nclass SaveTool(Action):\n \"\"\" *toolbar icon*: |save_icon|\n\n The save tool is an action. When activated, the tool opens a download dialog\n which allows to save an image reproduction of the plot in PNG format. If\n automatic download is not support by a web browser, the tool falls back to\n opening the generated image in a new tab or window. User then can manually\n save it by right clicking on the image and choosing \"Save As\" (or similar)\n menu item.\n\n .. |save_icon| image:: /_images/icons/Save.png\n :height: 18pt\n\n \"\"\"\n\n\nclass ResetTool(Action):\n \"\"\" *toolbar icon*: |reset_icon|\n\n The reset tool is an action. When activated in the toolbar, the tool\n resets the data bounds of the plot to their values when the plot was\n initially created.\n\n Optionally, the reset tool also resets the plat canvas dimensions to\n their original size\n\n .. |reset_icon| image:: /_images/icons/Reset.png\n :height: 18pt\n \"\"\"\n reset_size = Bool(default=True, help=\"\"\"\n Whether activating the Reset tool should also reset the plot's canvas\n dimensions to their original size.\n \"\"\")\n\n\nclass ResizeTool(Drag):\n \"\"\" *toolbar icon*: |resize_icon|\n\n The resize tool allows the user to left-drag a mouse or drag a finger\n to resize the entire plot area on the screen.\n\n .. |resize_icon| image:: /_images/icons/Resize.png\n :height: 18pt\n\n \"\"\"\n\n\nclass TapTool(Tap):\n \"\"\" *toolbar icon*: |tap_select_icon|\n\n The tap selection tool allows the user to select at single points by\n left-clicking a mouse, or tapping with a finger.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. |tap_select_icon| image:: /_images/icons/TapSelect.png\n :height: 18pt\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n behavior = Enum(\"select\", \"inspect\", default=\"select\", help=\"\"\"\n This tool can be configured to either make selections or inspections\n on associated data sources. The difference is that selection changes\n propagate across bokeh and other components (e.g. selection glyph)\n will be notified. Inspecions don't act like this, so it's useful to\n configure `callback` when setting `behavior='inspect'`.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A client-side action specification, like opening a URL, showing\n a dialog box, etc. See :class:`~bokeh.models.actions.Action` for details.\n \"\"\")\n\n\n\n\nclass CrosshairTool(Inspection):\n \"\"\" *toolbar icon*: |inspector_icon|\n\n The crosshair tool is a passive inspector tool. It is generally on\n at all times, but can be configured in the inspector's menu\n associated with the *toolbar icon* shown above.\n\n The crosshair tool draws a crosshair annotation over the plot,\n centered on the current mouse position. The crosshair tool may be\n configured to draw across only one dimension by setting the\n ``dimension`` property to only ``width`` or ``height``.\n\n .. |inspector_icon| image:: /_images/icons/Inspector.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the crosshair tool is to track. By default, both a\n vertical and horizontal line will be drawn. If only \"width\" is supplied,\n only a horizontal line will be drawn. If only \"height\" is supplied,\n only a vertical line will be drawn.\n \"\"\")\n\n line_color = Color(default=\"black\", help=\"\"\"\n A color to use to stroke paths with.\n\n Acceptable values are:\n\n - any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``\n - an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``\n - a 3-tuple of integers (r,g,b) between 0 and 255\n - a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1\n\n .. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp\n\n \"\"\")\n\n line_width = Float(default=1, help=\"\"\"\n Stroke width in units of pixels.\n \"\"\")\n\n line_alpha = Float(default=1.0, help=\"\"\"\n An alpha value to use to stroke paths with.\n\n Acceptable values are floating point numbers between 0 (transparent)\n and 1 (opaque).\n\n \"\"\")\n\nDEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(\n level=\"overlay\",\n render_mode=\"css\",\n top_units=\"screen\",\n left_units=\"screen\",\n bottom_units=\"screen\",\n right_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass BoxZoomTool(Drag):\n \"\"\" *toolbar icon*: |box_zoom_icon|\n\n The box zoom tool allows users to define a rectangular\n region of a Plot to zoom to by dragging he mouse or a\n finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n .. |box_zoom_icon| image:: /_images/icons/BoxZoom.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the zoom box is to be free in. By default,\n users may freely draw zoom boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n match_aspect = Bool(default=False, help=\"\"\"\n Whether the box zoom region should be restricted to have the same\n aspect ratio as the plot region.\n\n .. note::\n If the tool is restricted to one dimension, this value has\n no effect.\n\n \"\"\")\n\n\nclass BoxSelectTool(Drag):\n \"\"\" *toolbar icon*: |box_select_icon|\n\n The box selection tool allows users to make selections on a\n Plot by indicating a rectangular region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n\n .. |box_select_icon| image:: /_images/icons/BoxSelect.png\n :height: 18pt\n\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(False, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: False\n \"\"\")\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the box selection is to be free in. By default,\n users may freely draw selections boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on completion of drawing a selection box.\n The cb_data parameter that is available to the Callback code will contain\n one BoxSelectTool-specific field:\n\n :geometry: object containing the coordinates of the selection box\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nDEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(\n level=\"overlay\",\n xs_units=\"screen\",\n ys_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass LassoSelectTool(Drag):\n \"\"\" *toolbar icon*: |lasso_select_icon|\n\n The lasso selection tool allows users to make selections on a\n Plot by indicating a free-drawn \"lasso\" region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |lasso_select_icon| image:: /_images/icons/LassoSelect.png\n :height: 18pt\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(True, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: True\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on every selection of a lasso area.\n The cb_data parameter that is available to the Callback code will contain\n one LassoSelectTool-specific field:\n\n :geometry: object containing the coordinates of the lasso area\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n\nclass PolySelectTool(Tap):\n \"\"\" *toolbar icon*: |poly_select_icon|\n\n The polygon selection tool allows users to make selections on a\n Plot by indicating a polygonal region with mouse clicks. single\n clicks (or taps) add successive points to the definition of the\n polygon, and a double click (or tap) indicates the selection\n region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |poly_select_icon| image:: /_images/icons/PolygonSelect.png\n :height: 18pt\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nclass HoverTool(Inspection):\n \"\"\" *toolbar icon*: |inspector_icon|\n\n The hover tool is a passive inspector tool. It is generally on at\n all times, but can be configured in the inspector's menu associated\n with the *toolbar icon* shown above.\n\n By default, the hover tool displays informational tooltips whenever\n the cursor is directly over a glyph. The data to show comes from the\n glyph's data source, and what is to be displayed is configurable with\n the ``tooltips`` attribute that maps display names to columns in the\n data source, or to special known variables.\n\n Here is an example of how to configure and use the hover tool::\n\n # Add tooltip (name, field) pairs to the tool. See below for a\n # description of possible field values.\n hover.tooltips = [\n (\"index\", \"$index\"),\n (\"(x,y)\", \"($x, $y)\"),\n (\"radius\", \"@radius\"),\n (\"fill color\", \"$color[hex, swatch]:fill_color\"),\n (\"foo\", \"@foo\"),\n (\"bar\", \"@bar\"),\n ]\n\n You can also supply a ``Callback`` to the HoverTool, to build custom\n interactions on hover. In this case you may want to turn the tooltips\n off by setting ``tooltips=None``.\n\n .. warning::\n\n Hover tool does not currently work with the following glyphs:\n\n .. hlist::\n :columns: 3\n\n * annulus\n * arc\n * bezier\n * gear\n * image\n * image_rgba\n * image_url\n * multi_line\n * oval\n * patch\n * quadratic\n * ray\n * segment\n * text\n\n .. |hover_icon| image:: /_images/icons/Inspector.png\n :height: 18pt\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the input's value changes. The\n cb_data parameter that is available to the Callback code will contain two\n HoverTool specific fields:\n\n :index: object containing the indices of the hovered points in the data source\n :geometry: object containing the coordinates of the hover cursor\n \"\"\")\n\n tooltips = Either(String, List(Tuple(String, String)),\n default=[\n (\"index\",\"$index\"),\n (\"data (x, y)\",\"($x, $y)\"),\n (\"canvas (x, y)\",\"($sx, $sy)\"),\n ], help=\"\"\"\n The (name, field) pairs describing what the hover tool should\n display when there is a hit.\n\n Field names starting with \"@\" are interpreted as columns on the\n data source. For instance, \"@temp\" would look up values to display\n from the \"temp\" column of the data source.\n\n Field names starting with \"$\" are special, known fields:\n\n :$index: index of selected point in the data source\n :$x: x-coordinate under the cursor in data space\n :$y: y-coordinate under the cursor in data space\n :$sx: x-coordinate under the cursor in screen (canvas) space\n :$sy: y-coordinate under the cursor in screen (canvas) space\n :$color: color data from data source, with the syntax:\n ``$color[options]:field_name``. The available options\n are: 'hex' (to display the color as a hex value), and\n 'swatch' to also display a small color swatch.\n\n ``None`` is also a valid value for tooltips. This turns off the\n rendering of tooltips. This is mostly useful when supplying other\n actions on hover via the callback property.\n\n .. note::\n The tooltips attribute can also be configured with a mapping type,\n e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,\n the visual presentation order is unspecified.\n\n \"\"\").accepts(Dict(String, String), lambda d: list(d.items()))\n\n mode = Enum(\"mouse\", \"hline\", \"vline\", help=\"\"\"\n Whether to consider hover pointer as a point (x/y values), or a\n span on h or v directions.\n \"\"\")\n\n point_policy = Enum(\"snap_to_data\", \"follow_mouse\", \"none\", help=\"\"\"\n Whether the tooltip position should snap to the \"center\" (or other anchor)\n position of the associated glyph, or always follow the current mouse cursor\n position.\n \"\"\")\n\n line_policy = Enum(\"prev\", \"next\", \"nearest\", \"interp\", \"none\", help=\"\"\"\n When showing tooltips for lines, whether the tooltip position should be\n the \"previous\" or \"next\" points on the line, the nearest point to the\n current mouse position, or interpolate along the line to the current\n mouse position.\n \"\"\")\n\n anchor = Enum(Anchor, default=\"center\", help=\"\"\"\n If point policy is set to `\"snap_to_data\"`, `anchor` defines the attachment\n point of a tooltip. The default is to attach to the center of a glyph.\n \"\"\")\n\n attachment = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Whether tooltip's arrow should appear in the horizontal or vertical dimension.\n \"\"\")\n\nDEFAULT_HELP_TIP = \"Click the question mark to learn more about Bokeh plot tools.\"\nDEFAULT_HELP_URL = \"http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html\"\n\nclass HelpTool(Action):\n \"\"\"\n The help tool is a widget designed to replace the hardcoded 'Help' link.\n The hover text can be customized through the ``help_tooltip`` attribute\n and the redirect site overridden as well.\n \"\"\"\n\n help_tooltip = String(default=DEFAULT_HELP_TIP, help=\"\"\"\n Tooltip displayed when hovering over the help icon.\n \"\"\")\n\n redirect = String(default=DEFAULT_HELP_URL, help=\"\"\"\n Site to be redirected through upon click.\n \"\"\")\n\nclass UndoTool(Action):\n \"\"\" *toolbar icon*: |undo_icon|\n\n Undo tool allows to restore previous state of the plot.\n\n .. |undo_icon| image:: /_images/icons/Undo.png\n :height: 18pt\n \"\"\"\n\nclass RedoTool(Action):\n \"\"\" *toolbar icon*: |redo_icon|\n\n Redo tool reverses the last action performed by undo tool.\n\n .. |redo_icon| image:: /_images/icons/Redo.png\n :height: 18pt\n \"\"\"\n",
"path": "bokeh/models/tools.py"
}
] | [
{
"content": "\"\"\" Bokeh comes with a number of interactive tools.\n\nThere are five types of tool interactions:\n\n.. hlist::\n :columns: 5\n\n * Pan/Drag\n * Click/Tap\n * Scroll/Pinch\n * Actions\n * Inspectors\n\nFor the first three comprise the category of gesture tools, and only\none tool for each gesture can be active at any given time. The active\ntool is indicated on the toolbar by a highlight next to to the tool.\nActions are immediate or modal operations that are only activated when\ntheir button in the toolbar is pressed. Inspectors are passive tools that\nmerely report information or annotate the plot in some way, and may\nalways be active regardless of what other tools are currently active.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..model import Model\nfrom ..core.properties import abstract, Float, Color\nfrom ..core.properties import (\n Any, Auto, Bool, String, Enum, Instance, Either, List, Dict, Tuple, Override\n)\nfrom ..core.enums import Dimension, Location, Anchor\n\nfrom .annotations import BoxAnnotation, PolyAnnotation\nfrom .callbacks import Callback\nfrom .renderers import Renderer\nfrom .layouts import LayoutDOM, Box\n\n\nclass ToolEvents(Model):\n \"\"\"\n\n \"\"\"\n\n geometries = List(Dict(String, Any))\n\n\n@abstract\nclass Tool(Model):\n \"\"\" A base class for all interactive tool types. ``Tool`` is\n not generally useful to instantiate on its own.\n\n \"\"\"\n\n plot = Instance(\".models.plots.Plot\", help=\"\"\"\n The Plot that this tool will act on.\n \"\"\")\n\n\n@abstract\nclass Action(Tool):\n pass\n\n\n@abstract\nclass Drag(Tool):\n pass\n\n\n@abstract\nclass Scroll(Tool):\n pass\n\n\n@abstract\nclass Tap(Tool):\n pass\n\n\n@abstract\nclass Inspection(Tool):\n pass\n\n\n@abstract\nclass ToolbarBase(LayoutDOM):\n \"\"\" A base class for different toolbars. ``ToolbarBase`` is\n not generally useful to instantiate on its own.\n\n \"\"\"\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n # This is an odd case. The sizing is custom handled. In the future we will\n # probably set it as `stretch_width` or `stretch_height` depending on its\n # orientation.\n sizing_mode = Override(default=None)\n\n\nclass Toolbar(ToolbarBase):\n \"\"\" Hold tools to display for a single plot.\n\n \"\"\"\n\n active_drag = Either(Auto, Instance(Drag), help=\"\"\"\n Specify a drag tool to be active when the plot is displayed.\n \"\"\")\n\n active_scroll = Either(Auto, Instance(Scroll), help=\"\"\"\n Specify a scroll/pinch tool to be active when the plot is displayed.\n \"\"\")\n\n active_tap = Either(Auto, Instance(Tap), help=\"\"\"\n Specify a tap/click tool to be active when the plot is displayed.\n \"\"\")\n\n\nclass ToolbarBox(Box):\n \"\"\" A layoutable toolbar that can accept the tools of multiple plots, and\n can merge the tools into a single button for convenience.\n\n \"\"\"\n def _check_empty_layout(self):\n # Overriding the children check from Box. As toolbarbox's children\n # are normally set JS side.\n return None\n\n toolbar_location = Enum(Location, default='right', help=\"\"\"\n Should the toolbar be presented as if it was stuck to the `above`, `right`, `left`, `below`\n edge of a plot. Default is `right`.\n \"\"\")\n\n tools = List(Instance(Tool), help=\"\"\"\n A list of tools to add to the plot.\n \"\"\")\n\n merge_tools = Bool(default=True, help=\"\"\"\n Merge all the tools together so there is one tool to control all the plots.\n \"\"\")\n\n logo = Enum(\"normal\", \"grey\", help=\"\"\"\n What version of the Bokeh logo to display on the toolbar. If\n set to None, no logo will be displayed.\n \"\"\")\n\n\nclass PanTool(Drag):\n \"\"\" *toolbar icon*: |pan_icon|\n\n The pan tool allows the user to pan a Plot by left-dragging\n a mouse, or on touch devices by dragging a finger or stylus, across\n the plot region.\n\n The pan tool also activates the border regions of a Plot for \"single\n axis\" panning. For instance, dragging in the vertical border or axis\n will effect a pan in the vertical direction only, with the horizontal\n dimension kept fixed.\n\n .. |pan_icon| image:: /_images/icons/Pan.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the pan tool is constrained to act in. By default\n the pan tool will pan in any dimension, but can be configured to only\n pan horizontally across the width of the plot, or vertically across the\n height of the plot.\n \"\"\")\n\n\nclass WheelZoomTool(Scroll):\n \"\"\" *toolbar icon*: |wheel_zoom_icon|\n\n The wheel zoom tool will zoom the plot in and out, centered on the\n current mouse location.\n\n The wheel zoom tool also activates the border regions of a Plot for\n \"single axis\" zooming. For instance, zooming in the vertical border or\n axis will effect a zoom in the vertical direction only, with the\n horizontal dimension kept fixed.\n\n .. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the wheel zoom tool is constrained to act in. By\n default the wheel zoom tool will zoom in any dimension, but can be\n configured to only zoom horizontally across the width of the plot, or\n vertically across the height of the plot.\n \"\"\")\n\n speed = Float(default=1/600, help=\"\"\"\n Speed at which the wheel zooms. Default is 1/600. Optimal range is between \n 0.001 and 0.09. High values will be clipped. Speed may very between browsers.\n \"\"\")\n\n\n\nclass SaveTool(Action):\n \"\"\" *toolbar icon*: |save_icon|\n\n The save tool is an action. When activated, the tool opens a download dialog\n which allows to save an image reproduction of the plot in PNG format. If\n automatic download is not support by a web browser, the tool falls back to\n opening the generated image in a new tab or window. User then can manually\n save it by right clicking on the image and choosing \"Save As\" (or similar)\n menu item.\n\n .. |save_icon| image:: /_images/icons/Save.png\n :height: 18pt\n\n \"\"\"\n\n\nclass ResetTool(Action):\n \"\"\" *toolbar icon*: |reset_icon|\n\n The reset tool is an action. When activated in the toolbar, the tool\n resets the data bounds of the plot to their values when the plot was\n initially created.\n\n Optionally, the reset tool also resets the plat canvas dimensions to\n their original size\n\n .. |reset_icon| image:: /_images/icons/Reset.png\n :height: 18pt\n \"\"\"\n reset_size = Bool(default=True, help=\"\"\"\n Whether activating the Reset tool should also reset the plot's canvas\n dimensions to their original size.\n \"\"\")\n\n\nclass ResizeTool(Drag):\n \"\"\" *toolbar icon*: |resize_icon|\n\n The resize tool allows the user to left-drag a mouse or drag a finger\n to resize the entire plot area on the screen.\n\n .. |resize_icon| image:: /_images/icons/Resize.png\n :height: 18pt\n\n \"\"\"\n\n\nclass TapTool(Tap):\n \"\"\" *toolbar icon*: |tap_select_icon|\n\n The tap selection tool allows the user to select at single points by\n left-clicking a mouse, or tapping with a finger.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. |tap_select_icon| image:: /_images/icons/TapSelect.png\n :height: 18pt\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n behavior = Enum(\"select\", \"inspect\", default=\"select\", help=\"\"\"\n This tool can be configured to either make selections or inspections\n on associated data sources. The difference is that selection changes\n propagate across bokeh and other components (e.g. selection glyph)\n will be notified. Inspecions don't act like this, so it's useful to\n configure `callback` when setting `behavior='inspect'`.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A client-side action specification, like opening a URL, showing\n a dialog box, etc. See :class:`~bokeh.models.actions.Action` for details.\n \"\"\")\n\n\n\n\nclass CrosshairTool(Inspection):\n \"\"\" *toolbar icon*: |inspector_icon|\n\n The crosshair tool is a passive inspector tool. It is generally on\n at all times, but can be configured in the inspector's menu\n associated with the *toolbar icon* shown above.\n\n The crosshair tool draws a crosshair annotation over the plot,\n centered on the current mouse position. The crosshair tool may be\n configured to draw across only one dimension by setting the\n ``dimension`` property to only ``width`` or ``height``.\n\n .. |inspector_icon| image:: /_images/icons/Inspector.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the crosshair tool is to track. By default, both a\n vertical and horizontal line will be drawn. If only \"width\" is supplied,\n only a horizontal line will be drawn. If only \"height\" is supplied,\n only a vertical line will be drawn.\n \"\"\")\n\n line_color = Color(default=\"black\", help=\"\"\"\n A color to use to stroke paths with.\n\n Acceptable values are:\n\n - any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``\n - an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``\n - a 3-tuple of integers (r,g,b) between 0 and 255\n - a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1\n\n .. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp\n\n \"\"\")\n\n line_width = Float(default=1, help=\"\"\"\n Stroke width in units of pixels.\n \"\"\")\n\n line_alpha = Float(default=1.0, help=\"\"\"\n An alpha value to use to stroke paths with.\n\n Acceptable values are floating point numbers between 0 (transparent)\n and 1 (opaque).\n\n \"\"\")\n\nDEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(\n level=\"overlay\",\n render_mode=\"css\",\n top_units=\"screen\",\n left_units=\"screen\",\n bottom_units=\"screen\",\n right_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass BoxZoomTool(Drag):\n \"\"\" *toolbar icon*: |box_zoom_icon|\n\n The box zoom tool allows users to define a rectangular\n region of a Plot to zoom to by dragging he mouse or a\n finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n .. |box_zoom_icon| image:: /_images/icons/BoxZoom.png\n :height: 18pt\n\n \"\"\"\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the zoom box is to be free in. By default,\n users may freely draw zoom boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n match_aspect = Bool(default=False, help=\"\"\"\n Whether the box zoom region should be restricted to have the same\n aspect ratio as the plot region.\n\n .. note::\n If the tool is restricted to one dimension, this value has\n no effect.\n\n \"\"\")\n\n\nclass BoxSelectTool(Drag):\n \"\"\" *toolbar icon*: |box_select_icon|\n\n The box selection tool allows users to make selections on a\n Plot by indicating a rectangular region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n\n .. |box_select_icon| image:: /_images/icons/BoxSelect.png\n :height: 18pt\n\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(False, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: False\n \"\"\")\n\n dimensions = List(Enum(Dimension), default=[\"width\", \"height\"], help=\"\"\"\n Which dimensions the box selection is to be free in. By default,\n users may freely draw selections boxes with any dimensions. If only\n \"width\" is supplied, the box will be constrained to span the entire\n vertical space of the plot, only the horizontal dimension can be\n controlled. If only \"height\" is supplied, the box will be constrained\n to span the entire horizontal space of the plot, and the vertical\n dimension can be controlled.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on completion of drawing a selection box.\n The cb_data parameter that is available to the Callback code will contain\n one BoxSelectTool-specific field:\n\n :geometry: object containing the coordinates of the selection box\n \"\"\")\n\n overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nDEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(\n level=\"overlay\",\n xs_units=\"screen\",\n ys_units=\"screen\",\n fill_color=\"lightgrey\",\n fill_alpha=0.5,\n line_color=\"black\",\n line_alpha=1.0,\n line_width=2,\n line_dash=[4, 4]\n)\n\nclass LassoSelectTool(Drag):\n \"\"\" *toolbar icon*: |lasso_select_icon|\n\n The lasso selection tool allows users to make selections on a\n Plot by indicating a free-drawn \"lasso\" region by dragging the\n mouse or a finger over the plot region. The end of the drag\n event indicates the selection region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |lasso_select_icon| image:: /_images/icons/LassoSelect.png\n :height: 18pt\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n select_every_mousemove = Bool(True, help=\"\"\"\n Whether a selection computation should happen on every mouse\n event, or only once, when the selection region is completed. Default: True\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser on every selection of a lasso area.\n The cb_data parameter that is available to the Callback code will contain\n one LassoSelectTool-specific field:\n\n :geometry: object containing the coordinates of the lasso area\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\n\nclass PolySelectTool(Tap):\n \"\"\" *toolbar icon*: |poly_select_icon|\n\n The polygon selection tool allows users to make selections on a\n Plot by indicating a polygonal region with mouse clicks. single\n clicks (or taps) add successive points to the definition of the\n polygon, and a double click (or tap) indicates the selection\n region is ready.\n\n See :ref:`userguide_styling_selected_unselected_glyphs` for information\n on styling selected and unselected glyphs.\n\n .. note::\n Selections can be comprised of multiple regions, even those\n made by different selection tools. Hold down the <<shift>> key\n while making a selection to append the new selection to any\n previous selection that might exist.\n\n .. |poly_select_icon| image:: /_images/icons/PolygonSelect.png\n :height: 18pt\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help=\"\"\"\n A shaded annotation drawn to indicate the selection region.\n \"\"\")\n\nclass HoverTool(Inspection):\n \"\"\" *toolbar icon*: |inspector_icon|\n\n The hover tool is a passive inspector tool. It is generally on at\n all times, but can be configured in the inspector's menu associated\n with the *toolbar icon* shown above.\n\n By default, the hover tool displays informational tooltips whenever\n the cursor is directly over a glyph. The data to show comes from the\n glyph's data source, and what is to be displayed is configurable with\n the ``tooltips`` attribute that maps display names to columns in the\n data source, or to special known variables.\n\n Here is an example of how to configure and use the hover tool::\n\n # Add tooltip (name, field) pairs to the tool. See below for a\n # description of possible field values.\n hover.tooltips = [\n (\"index\", \"$index\"),\n (\"(x,y)\", \"($x, $y)\"),\n (\"radius\", \"@radius\"),\n (\"fill color\", \"$color[hex, swatch]:fill_color\"),\n (\"foo\", \"@foo\"),\n (\"bar\", \"@bar\"),\n ]\n\n You can also supply a ``Callback`` to the HoverTool, to build custom\n interactions on hover. In this case you may want to turn the tooltips\n off by setting ``tooltips=None``.\n\n .. warning::\n\n Hover tool does not currently work with the following glyphs:\n\n .. hlist::\n :columns: 3\n\n * annulus\n * arc\n * bezier\n * gear\n * image\n * image_rgba\n * image_url\n * multi_line\n * oval\n * patch\n * quadratic\n * ray\n * segment\n * text\n\n .. |hover_icon| image:: /_images/icons/Inspector.png\n :height: 18pt\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to hit test again. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the input's value changes. The\n cb_data parameter that is available to the Callback code will contain two\n HoverTool specific fields:\n\n :index: object containing the indices of the hovered points in the data source\n :geometry: object containing the coordinates of the hover cursor\n \"\"\")\n\n tooltips = Either(String, List(Tuple(String, String)),\n default=[\n (\"index\",\"$index\"),\n (\"data (x, y)\",\"($x, $y)\"),\n (\"canvas (x, y)\",\"($sx, $sy)\"),\n ], help=\"\"\"\n The (name, field) pairs describing what the hover tool should\n display when there is a hit.\n\n Field names starting with \"@\" are interpreted as columns on the\n data source. For instance, \"@temp\" would look up values to display\n from the \"temp\" column of the data source.\n\n Field names starting with \"$\" are special, known fields:\n\n :$index: index of selected point in the data source\n :$x: x-coordinate under the cursor in data space\n :$y: y-coordinate under the cursor in data space\n :$sx: x-coordinate under the cursor in screen (canvas) space\n :$sy: y-coordinate under the cursor in screen (canvas) space\n :$color: color data from data source, with the syntax:\n ``$color[options]:field_name``. The available options\n are: 'hex' (to display the color as a hex value), and\n 'swatch' to also display a small color swatch.\n\n ``None`` is also a valid value for tooltips. This turns off the\n rendering of tooltips. This is mostly useful when supplying other\n actions on hover via the callback property.\n\n .. note::\n The tooltips attribute can also be configured with a mapping type,\n e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,\n the visual presentation order is unspecified.\n\n \"\"\").accepts(Dict(String, String), lambda d: list(d.items()))\n\n mode = Enum(\"mouse\", \"hline\", \"vline\", help=\"\"\"\n Whether to consider hover pointer as a point (x/y values), or a\n span on h or v directions.\n \"\"\")\n\n point_policy = Enum(\"snap_to_data\", \"follow_mouse\", \"none\", help=\"\"\"\n Whether the tooltip position should snap to the \"center\" (or other anchor)\n position of the associated glyph, or always follow the current mouse cursor\n position.\n \"\"\")\n\n line_policy = Enum(\"prev\", \"next\", \"nearest\", \"interp\", \"none\", help=\"\"\"\n When showing tooltips for lines, whether the tooltip position should be\n the \"previous\" or \"next\" points on the line, the nearest point to the\n current mouse position, or interpolate along the line to the current\n mouse position.\n \"\"\")\n\n anchor = Enum(Anchor, default=\"center\", help=\"\"\"\n If point policy is set to `\"snap_to_data\"`, `anchor` defines the attachment\n point of a tooltip. The default is to attach to the center of a glyph.\n \"\"\")\n\n attachment = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Whether tooltip's arrow should appear in the horizontal or vertical dimension.\n \"\"\")\n\nDEFAULT_HELP_TIP = \"Click the question mark to learn more about Bokeh plot tools.\"\nDEFAULT_HELP_URL = \"http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html\"\n\nclass HelpTool(Action):\n \"\"\"\n The help tool is a widget designed to replace the hardcoded 'Help' link.\n The hover text can be customized through the ``help_tooltip`` attribute\n and the redirect site overridden as well.\n \"\"\"\n\n help_tooltip = String(default=DEFAULT_HELP_TIP, help=\"\"\"\n Tooltip displayed when hovering over the help icon.\n \"\"\")\n\n redirect = String(default=DEFAULT_HELP_URL, help=\"\"\"\n Site to be redirected through upon click.\n \"\"\")\n\nclass UndoTool(Action):\n \"\"\" *toolbar icon*: |undo_icon|\n\n Undo tool allows to restore previous state of the plot.\n\n .. |undo_icon| image:: /_images/icons/Undo.png\n :height: 18pt\n \"\"\"\n\nclass RedoTool(Action):\n \"\"\" *toolbar icon*: |redo_icon|\n\n Redo tool reverses the last action performed by undo tool.\n\n .. |redo_icon| image:: /_images/icons/Redo.png\n :height: 18pt\n \"\"\"\n",
"path": "bokeh/models/tools.py"
}
] | diff --git a/bokeh/models/tools.py b/bokeh/models/tools.py
index 856eee41647..e8bbe7d9d16 100644
--- a/bokeh/models/tools.py
+++ b/bokeh/models/tools.py
@@ -197,6 +197,12 @@ class WheelZoomTool(Scroll):
vertically across the height of the plot.
""")
+ speed = Float(default=1/600, help="""
+ Speed at which the wheel zooms. Default is 1/600. Optimal range is between
+ 0.001 and 0.09. High values will be clipped. Speed may very between browsers.
+ """)
+
+
class SaveTool(Action):
""" *toolbar icon*: |save_icon|
diff --git a/bokehjs/src/coffee/models/tools/gestures/wheel_zoom_tool.coffee b/bokehjs/src/coffee/models/tools/gestures/wheel_zoom_tool.coffee
index eebfac39c5a..b8e5601a46f 100644
--- a/bokehjs/src/coffee/models/tools/gestures/wheel_zoom_tool.coffee
+++ b/bokehjs/src/coffee/models/tools/gestures/wheel_zoom_tool.coffee
@@ -115,11 +115,9 @@ class WheelZoomTool extends GestureTool.Model
@define {
dimensions: [ p.Array, ["width", "height"] ]
+ speed: [ p.Number, 1/600 ]
}
- @internal {
- speed: [ p.Number, 1/600 ]
- }
module.exports =
Model: WheelZoomTool
|
rotki__rotki-2307 | Force restoring a database after adding api keys doesn't work.
## Problem Definition
This requires an empty account without premium or a fresh account without premium set up during the account creation.
After logging into this account, if you go to the API keys page and insert the premium keys and then try to force sync (download) a backed-up database, this will fail with an error that says that the user doesn't have premium.
Performing a logout and re-login seems to resolve the issue.
| [
{
"content": "#!/usr/bin/env python\n\nimport argparse\nimport logging.config\nimport os\nimport time\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional, Tuple, Union, overload\n\nimport gevent\nfrom gevent.lock import Semaphore\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.accounting.accountant import Accountant\nfrom rotkehlchen.accounting.structures import Balance\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver\nfrom rotkehlchen.balances.manual import account_for_manually_tracked_balances\nfrom rotkehlchen.chain.ethereum.manager import (\n ETHEREUM_NODES_TO_CONNECT_AT_START,\n EthereumManager,\n NodeName,\n)\nfrom rotkehlchen.chain.ethereum.trades import AMMTrade\nfrom rotkehlchen.chain.manager import BlockchainBalancesUpdate, ChainManager\nfrom rotkehlchen.chain.substrate.manager import SubstrateManager\nfrom rotkehlchen.chain.substrate.typing import SubstrateChain\nfrom rotkehlchen.chain.substrate.utils import KUSAMA_NODES_TO_CONNECT_AT_START\nfrom rotkehlchen.config import default_data_directory\nfrom rotkehlchen.constants.misc import ZERO\nfrom rotkehlchen.data.importer import DataImporter\nfrom rotkehlchen.data_handler import DataHandler\nfrom rotkehlchen.db.settings import DBSettings, ModifiableDBSettings\nfrom rotkehlchen.errors import (\n EthSyncError,\n InputError,\n PremiumAuthenticationError,\n RemoteError,\n SystemPermissionError,\n)\nfrom rotkehlchen.exchanges.data_structures import AssetMovement, Trade\nfrom rotkehlchen.exchanges.exchange import ExchangeInterface\nfrom rotkehlchen.exchanges.manager import ExchangeManager\nfrom rotkehlchen.externalapis.beaconchain import BeaconChain\nfrom rotkehlchen.externalapis.coingecko import Coingecko\nfrom rotkehlchen.externalapis.cryptocompare import Cryptocompare\nfrom rotkehlchen.externalapis.etherscan import Etherscan\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.greenlets import GreenletManager\nfrom rotkehlchen.history import EventsHistorian, PriceHistorian\nfrom rotkehlchen.history.events import FREE_LEDGER_ACTIONS_LIMIT\nfrom rotkehlchen.history.typing import HistoricalPriceOracle\nfrom rotkehlchen.icons import IconManager\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.logging import (\n DEFAULT_ANONYMIZED_LOGS,\n LoggingSettings,\n RotkehlchenLogsAdapter,\n configure_logging,\n)\nfrom rotkehlchen.premium.premium import Premium, PremiumCredentials, premium_create_and_verify\nfrom rotkehlchen.premium.sync import PremiumSyncManager\nfrom rotkehlchen.serialization.deserialize import deserialize_location\nfrom rotkehlchen.tasks.manager import DEFAULT_MAX_TASKS_NUM, TaskManager\nfrom rotkehlchen.typing import (\n ApiKey,\n ApiSecret,\n BlockchainAccountData,\n ListOfBlockchainAddresses,\n Location,\n SupportedBlockchain,\n Timestamp,\n)\nfrom rotkehlchen.usage_analytics import maybe_submit_usage_analytics\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nif TYPE_CHECKING:\n from rotkehlchen.chain.bitcoin.xpub import XpubData\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\nMAIN_LOOP_SECS_DELAY = 10\nFREE_TRADES_LIMIT = 250\nFREE_ASSET_MOVEMENTS_LIMIT = 100\n\nLIMITS_MAPPING = {\n 'trade': FREE_TRADES_LIMIT,\n 'asset_movement': FREE_ASSET_MOVEMENTS_LIMIT,\n 'ledger_action': FREE_LEDGER_ACTIONS_LIMIT,\n}\n\nICONS_BATCH_SIZE = 5\nICONS_QUERY_SLEEP = 10\n\n\nTRADES_LIST = List[Union[Trade, AMMTrade]]\n\n\nclass Rotkehlchen():\n def __init__(self, args: argparse.Namespace) -> None:\n \"\"\"Initialize the Rotkehlchen object\n\n May Raise:\n - SystemPermissionError if the given data directory's permissions\n are not correct.\n \"\"\"\n self.lock = Semaphore()\n self.lock.acquire()\n\n # Can also be None after unlock if premium credentials did not\n # authenticate or premium server temporarily offline\n self.premium: Optional[Premium] = None\n self.user_is_logged_in: bool = False\n configure_logging(args)\n\n self.sleep_secs = args.sleep_secs\n if args.data_dir is None:\n self.data_dir = default_data_directory()\n else:\n self.data_dir = Path(args.data_dir)\n\n if not os.access(self.data_dir, os.W_OK | os.R_OK):\n raise SystemPermissionError(\n f'The given data directory {self.data_dir} is not readable or writable',\n )\n self.main_loop_spawned = False\n self.args = args\n self.api_task_greenlets: List[gevent.Greenlet] = []\n self.msg_aggregator = MessagesAggregator()\n self.greenlet_manager = GreenletManager(msg_aggregator=self.msg_aggregator)\n self.exchange_manager = ExchangeManager(msg_aggregator=self.msg_aggregator)\n # Initialize the AssetResolver singleton\n AssetResolver(data_directory=self.data_dir)\n self.data = DataHandler(self.data_dir, self.msg_aggregator)\n self.cryptocompare = Cryptocompare(data_directory=self.data_dir, database=None)\n self.coingecko = Coingecko(data_directory=self.data_dir)\n self.icon_manager = IconManager(data_dir=self.data_dir, coingecko=self.coingecko)\n self.greenlet_manager.spawn_and_track(\n after_seconds=None,\n task_name='periodically_query_icons_until_all_cached',\n exception_is_error=False,\n method=self.icon_manager.periodically_query_icons_until_all_cached,\n batch_size=ICONS_BATCH_SIZE,\n sleep_time_secs=ICONS_QUERY_SLEEP,\n )\n # Initialize the Inquirer singleton\n Inquirer(\n data_dir=self.data_dir,\n cryptocompare=self.cryptocompare,\n coingecko=self.coingecko,\n )\n # Keeps how many trades we have found per location. Used for free user limiting\n self.actions_per_location: Dict[str, Dict[Location, int]] = {\n 'trade': defaultdict(int),\n 'asset_movement': defaultdict(int),\n }\n\n self.lock.release()\n self.task_manager: Optional[TaskManager] = None\n self.shutdown_event = gevent.event.Event()\n\n def reset_after_failed_account_creation_or_login(self) -> None:\n \"\"\"If the account creation or login failed make sure that the Rotki instance is clear\n\n Tricky instances are when after either failed premium credentials or user refusal\n to sync premium databases we relogged in.\n \"\"\"\n self.cryptocompare.db = None\n\n def unlock_user(\n self,\n user: str,\n password: str,\n create_new: bool,\n sync_approval: Literal['yes', 'no', 'unknown'],\n premium_credentials: Optional[PremiumCredentials],\n initial_settings: Optional[ModifiableDBSettings] = None,\n ) -> None:\n \"\"\"Unlocks an existing user or creates a new one if `create_new` is True\n\n May raise:\n - PremiumAuthenticationError if the password can't unlock the database.\n - AuthenticationError if premium_credentials are given and are invalid\n or can't authenticate with the server\n - DBUpgradeError if the rotki DB version is newer than the software or\n there is a DB upgrade and there is an error.\n - SystemPermissionError if the directory or DB file can not be accessed\n \"\"\"\n log.info(\n 'Unlocking user',\n user=user,\n create_new=create_new,\n sync_approval=sync_approval,\n initial_settings=initial_settings,\n )\n\n # unlock or create the DB\n self.password = password\n self.user_directory = self.data.unlock(user, password, create_new, initial_settings)\n self.data_importer = DataImporter(db=self.data.db)\n self.last_data_upload_ts = self.data.db.get_last_data_upload_ts()\n self.premium_sync_manager = PremiumSyncManager(data=self.data, password=password)\n # set the DB in the external services instances that need it\n self.cryptocompare.set_database(self.data.db)\n\n # Anything that was set above here has to be cleaned in case of failure in the next step\n # by reset_after_failed_account_creation_or_login()\n try:\n self.premium = self.premium_sync_manager.try_premium_at_start(\n given_premium_credentials=premium_credentials,\n username=user,\n create_new=create_new,\n sync_approval=sync_approval,\n )\n except PremiumAuthenticationError:\n # Reraise it only if this is during the creation of a new account where\n # the premium credentials were given by the user\n if create_new:\n raise\n self.msg_aggregator.add_warning(\n 'Could not authenticate the Rotki premium API keys found in the DB.'\n ' Has your subscription expired?',\n )\n # else let's just continue. User signed in succesfully, but he just\n # has unauthenticable/invalid premium credentials remaining in his DB\n\n settings = self.get_settings()\n self.greenlet_manager.spawn_and_track(\n after_seconds=None,\n task_name='submit_usage_analytics',\n exception_is_error=False,\n method=maybe_submit_usage_analytics,\n should_submit=settings.submit_usage_analytics,\n )\n self.etherscan = Etherscan(database=self.data.db, msg_aggregator=self.msg_aggregator)\n self.beaconchain = BeaconChain(database=self.data.db, msg_aggregator=self.msg_aggregator)\n eth_rpc_endpoint = settings.eth_rpc_endpoint\n # Initialize the price historian singleton\n PriceHistorian(\n data_directory=self.data_dir,\n cryptocompare=self.cryptocompare,\n coingecko=self.coingecko,\n )\n PriceHistorian().set_oracles_order(settings.historical_price_oracles)\n\n self.accountant = Accountant(\n db=self.data.db,\n user_directory=self.user_directory,\n msg_aggregator=self.msg_aggregator,\n create_csv=True,\n )\n\n # Initialize the rotkehlchen logger\n LoggingSettings(anonymized_logs=settings.anonymized_logs)\n exchange_credentials = self.data.db.get_exchange_credentials()\n self.exchange_manager.initialize_exchanges(\n exchange_credentials=exchange_credentials,\n database=self.data.db,\n )\n\n # Initialize blockchain querying modules\n ethereum_manager = EthereumManager(\n ethrpc_endpoint=eth_rpc_endpoint,\n etherscan=self.etherscan,\n database=self.data.db,\n msg_aggregator=self.msg_aggregator,\n greenlet_manager=self.greenlet_manager,\n connect_at_start=ETHEREUM_NODES_TO_CONNECT_AT_START,\n )\n kusama_manager = SubstrateManager(\n chain=SubstrateChain.KUSAMA,\n msg_aggregator=self.msg_aggregator,\n greenlet_manager=self.greenlet_manager,\n connect_at_start=KUSAMA_NODES_TO_CONNECT_AT_START,\n connect_on_startup=self._connect_ksm_manager_on_startup(),\n own_rpc_endpoint=settings.ksm_rpc_endpoint,\n )\n\n Inquirer().inject_ethereum(ethereum_manager)\n Inquirer().set_oracles_order(settings.current_price_oracles)\n\n self.chain_manager = ChainManager(\n blockchain_accounts=self.data.db.get_blockchain_accounts(),\n ethereum_manager=ethereum_manager,\n kusama_manager=kusama_manager,\n msg_aggregator=self.msg_aggregator,\n database=self.data.db,\n greenlet_manager=self.greenlet_manager,\n premium=self.premium,\n eth_modules=settings.active_modules,\n data_directory=self.data_dir,\n beaconchain=self.beaconchain,\n btc_derivation_gap_limit=settings.btc_derivation_gap_limit,\n )\n self.events_historian = EventsHistorian(\n user_directory=self.user_directory,\n db=self.data.db,\n msg_aggregator=self.msg_aggregator,\n exchange_manager=self.exchange_manager,\n chain_manager=self.chain_manager,\n )\n self.task_manager = TaskManager(\n max_tasks_num=DEFAULT_MAX_TASKS_NUM,\n greenlet_manager=self.greenlet_manager,\n api_task_greenlets=self.api_task_greenlets,\n database=self.data.db,\n cryptocompare=self.cryptocompare,\n premium_sync_manager=self.premium_sync_manager,\n chain_manager=self.chain_manager,\n exchange_manager=self.exchange_manager,\n )\n self.user_is_logged_in = True\n log.debug('User unlocking complete')\n\n def logout(self) -> None:\n if not self.user_is_logged_in:\n return\n\n user = self.data.username\n log.info(\n 'Logging out user',\n user=user,\n )\n self.greenlet_manager.clear()\n del self.chain_manager\n self.exchange_manager.delete_all_exchanges()\n\n # Reset rotkehlchen logger to default\n LoggingSettings(anonymized_logs=DEFAULT_ANONYMIZED_LOGS)\n\n del self.accountant\n del self.events_historian\n del self.data_importer\n\n if self.premium is not None:\n del self.premium\n self.data.logout()\n self.password = ''\n self.cryptocompare.unset_database()\n\n # Make sure no messages leak to other user sessions\n self.msg_aggregator.consume_errors()\n self.msg_aggregator.consume_warnings()\n self.task_manager = None\n\n self.user_is_logged_in = False\n log.info(\n 'User successfully logged out',\n user=user,\n )\n\n def set_premium_credentials(self, credentials: PremiumCredentials) -> None:\n \"\"\"\n Sets the premium credentials for Rotki\n\n Raises PremiumAuthenticationError if the given key is rejected by the Rotkehlchen server\n \"\"\"\n log.info('Setting new premium credentials')\n if self.premium is not None:\n self.premium.set_credentials(credentials)\n else:\n self.premium = premium_create_and_verify(credentials)\n\n self.data.db.set_rotkehlchen_premium(credentials)\n\n def delete_premium_credentials(self) -> Tuple[bool, str]:\n \"\"\"Deletes the premium credentials for Rotki\"\"\"\n msg = ''\n\n success = self.data.db.del_rotkehlchen_premium()\n if success is False:\n msg = 'The database was unable to delete the Premium keys for the logged-in user'\n self.deactivate_premium_status()\n return success, msg\n\n def deactivate_premium_status(self) -> None:\n \"\"\"Deactivate premium in the current session\"\"\"\n self.premium = None\n self.premium_sync_manager.premium = None\n self.chain_manager.deactivate_premium_status()\n\n def start(self) -> gevent.Greenlet:\n assert not self.main_loop_spawned, 'Tried to spawn the main loop twice'\n greenlet = gevent.spawn(self.main_loop)\n self.main_loop_spawned = True\n return greenlet\n\n def main_loop(self) -> None:\n \"\"\"Rotki main loop that fires often and runs the task manager's scheduler\"\"\"\n while self.shutdown_event.wait(timeout=MAIN_LOOP_SECS_DELAY) is not True:\n if self.task_manager is not None:\n self.task_manager.schedule()\n\n def get_blockchain_account_data(\n self,\n blockchain: SupportedBlockchain,\n ) -> Union[List[BlockchainAccountData], Dict[str, Any]]:\n account_data = self.data.db.get_blockchain_account_data(blockchain)\n if blockchain != SupportedBlockchain.BITCOIN:\n return account_data\n\n xpub_data = self.data.db.get_bitcoin_xpub_data()\n addresses_to_account_data = {x.address: x for x in account_data}\n address_to_xpub_mappings = self.data.db.get_addresses_to_xpub_mapping(\n list(addresses_to_account_data.keys()), # type: ignore\n )\n\n xpub_mappings: Dict['XpubData', List[BlockchainAccountData]] = {}\n for address, xpub_entry in address_to_xpub_mappings.items():\n if xpub_entry not in xpub_mappings:\n xpub_mappings[xpub_entry] = []\n xpub_mappings[xpub_entry].append(addresses_to_account_data[address])\n\n data: Dict[str, Any] = {'standalone': [], 'xpubs': []}\n # Add xpub data\n for xpub_entry in xpub_data:\n data_entry = xpub_entry.serialize()\n addresses = xpub_mappings.get(xpub_entry, None)\n data_entry['addresses'] = addresses if addresses and len(addresses) != 0 else None\n data['xpubs'].append(data_entry)\n # Add standalone addresses\n for account in account_data:\n if account.address not in address_to_xpub_mappings:\n data['standalone'].append(account)\n\n return data\n\n def add_blockchain_accounts(\n self,\n blockchain: SupportedBlockchain,\n account_data: List[BlockchainAccountData],\n ) -> BlockchainBalancesUpdate:\n \"\"\"Adds new blockchain accounts\n\n Adds the accounts to the blockchain instance and queries them to get the\n updated balances. Also adds them in the DB\n\n May raise:\n - EthSyncError from modify_blockchain_account\n - InputError if the given accounts list is empty.\n - TagConstraintError if any of the given account data contain unknown tags.\n - RemoteError if an external service such as Etherscan is queried and\n there is a problem with its query.\n \"\"\"\n self.data.db.ensure_tags_exist(\n given_data=account_data,\n action='adding',\n data_type='blockchain accounts',\n )\n address_type = blockchain.get_address_type()\n updated_balances = self.chain_manager.add_blockchain_accounts(\n blockchain=blockchain,\n accounts=[address_type(entry.address) for entry in account_data],\n )\n self.data.db.add_blockchain_accounts(\n blockchain=blockchain,\n account_data=account_data,\n )\n\n return updated_balances\n\n def edit_blockchain_accounts(\n self,\n blockchain: SupportedBlockchain,\n account_data: List[BlockchainAccountData],\n ) -> None:\n \"\"\"Edits blockchain accounts\n\n Edits blockchain account data for the given accounts\n\n May raise:\n - InputError if the given accounts list is empty or if\n any of the accounts to edit do not exist.\n - TagConstraintError if any of the given account data contain unknown tags.\n \"\"\"\n # First check for validity of account data addresses\n if len(account_data) == 0:\n raise InputError('Empty list of blockchain account data to edit was given')\n accounts = [x.address for x in account_data]\n unknown_accounts = set(accounts).difference(self.chain_manager.accounts.get(blockchain))\n if len(unknown_accounts) != 0:\n raise InputError(\n f'Tried to edit unknown {blockchain.value} '\n f'accounts {\",\".join(unknown_accounts)}',\n )\n\n self.data.db.ensure_tags_exist(\n given_data=account_data,\n action='editing',\n data_type='blockchain accounts',\n )\n\n # Finally edit the accounts\n self.data.db.edit_blockchain_accounts(\n blockchain=blockchain,\n account_data=account_data,\n )\n\n def remove_blockchain_accounts(\n self,\n blockchain: SupportedBlockchain,\n accounts: ListOfBlockchainAddresses,\n ) -> BlockchainBalancesUpdate:\n \"\"\"Removes blockchain accounts\n\n Removes the accounts from the blockchain instance and queries them to get\n the updated balances. Also removes them from the DB\n\n May raise:\n - RemoteError if an external service such as Etherscan is queried and\n there is a problem with its query.\n - InputError if a non-existing account was given to remove\n \"\"\"\n balances_update = self.chain_manager.remove_blockchain_accounts(\n blockchain=blockchain,\n accounts=accounts,\n )\n self.data.db.remove_blockchain_accounts(blockchain, accounts)\n return balances_update\n\n def get_history_query_status(self) -> Dict[str, str]:\n if self.events_historian.progress < FVal('100'):\n processing_state = self.events_historian.processing_state_name\n progress = self.events_historian.progress / 2\n elif self.accountant.first_processed_timestamp == -1:\n processing_state = 'Processing all retrieved historical events'\n progress = FVal(50)\n else:\n processing_state = 'Processing all retrieved historical events'\n # start_ts is min of the query start or the first action timestamp since action\n # processing can start well before query start to calculate cost basis\n start_ts = min(\n self.accountant.events.query_start_ts,\n self.accountant.first_processed_timestamp,\n )\n diff = self.accountant.events.query_end_ts - start_ts\n progress = 50 + 100 * (\n FVal(self.accountant.currently_processing_timestamp - start_ts) /\n FVal(diff) / 2)\n\n return {'processing_state': str(processing_state), 'total_progress': str(progress)}\n\n def process_history(\n self,\n start_ts: Timestamp,\n end_ts: Timestamp,\n ) -> Tuple[Dict[str, Any], str]:\n (\n error_or_empty,\n history,\n loan_history,\n asset_movements,\n eth_transactions,\n defi_events,\n ledger_actions,\n ) = self.events_historian.get_history(\n start_ts=start_ts,\n end_ts=end_ts,\n has_premium=self.premium is not None,\n )\n result = self.accountant.process_history(\n start_ts=start_ts,\n end_ts=end_ts,\n trade_history=history,\n loan_history=loan_history,\n asset_movements=asset_movements,\n eth_transactions=eth_transactions,\n defi_events=defi_events,\n ledger_actions=ledger_actions,\n )\n return result, error_or_empty\n\n @overload\n def _apply_actions_limit(\n self,\n location: Location,\n action_type: Literal['trade'],\n location_actions: TRADES_LIST,\n all_actions: TRADES_LIST,\n ) -> TRADES_LIST:\n ...\n\n @overload\n def _apply_actions_limit(\n self,\n location: Location,\n action_type: Literal['asset_movement'],\n location_actions: List[AssetMovement],\n all_actions: List[AssetMovement],\n ) -> List[AssetMovement]:\n ...\n\n def _apply_actions_limit(\n self,\n location: Location,\n action_type: Literal['trade', 'asset_movement'],\n location_actions: Union[TRADES_LIST, List[AssetMovement]],\n all_actions: Union[TRADES_LIST, List[AssetMovement]],\n ) -> Union[TRADES_LIST, List[AssetMovement]]:\n \"\"\"Take as many actions from location actions and add them to all actions as the limit permits\n\n Returns the modified (or not) all_actions\n \"\"\"\n # If we are already at or above the limit return current actions disregarding this location\n actions_mapping = self.actions_per_location[action_type]\n current_num_actions = sum(x for _, x in actions_mapping.items())\n limit = LIMITS_MAPPING[action_type]\n if current_num_actions >= limit:\n return all_actions\n\n # Find out how many more actions can we return, and depending on that get\n # the number of actions from the location actions and add them to the total\n remaining_num_actions = limit - current_num_actions\n if remaining_num_actions < 0:\n remaining_num_actions = 0\n\n num_actions_to_take = min(len(location_actions), remaining_num_actions)\n\n actions_mapping[location] = num_actions_to_take\n all_actions.extend(location_actions[0:num_actions_to_take]) # type: ignore\n return all_actions\n\n def query_trades(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n location: Optional[Location],\n ) -> TRADES_LIST:\n \"\"\"Queries trades for the given location and time range.\n If no location is given then all external, all exchange and DEX trades are queried.\n\n DEX Trades are queried only if the user has premium\n If the user does not have premium then a trade limit is applied.\n\n May raise:\n - RemoteError: If there are problems connecting to any of the remote exchanges\n \"\"\"\n trades: TRADES_LIST\n if location is not None:\n trades = self.query_location_trades(from_ts, to_ts, location)\n else:\n trades = self.query_location_trades(from_ts, to_ts, Location.EXTERNAL)\n # crypto.com is not an API key supported exchange but user can import from CSV\n trades.extend(self.query_location_trades(from_ts, to_ts, Location.CRYPTOCOM))\n for name, exchange in self.exchange_manager.connected_exchanges.items():\n exchange_trades = exchange.query_trade_history(start_ts=from_ts, end_ts=to_ts)\n if self.premium is None:\n trades = self._apply_actions_limit(\n location=deserialize_location(name),\n action_type='trade',\n location_actions=exchange_trades,\n all_actions=trades,\n )\n else:\n trades.extend(exchange_trades)\n\n # for all trades we also need uniswap trades\n if self.premium is not None:\n uniswap = self.chain_manager.uniswap\n if uniswap is not None:\n trades.extend(\n uniswap.get_trades(\n addresses=self.chain_manager.queried_addresses_for_module('uniswap'),\n from_timestamp=from_ts,\n to_timestamp=to_ts,\n ),\n )\n\n # return trades with most recent first\n trades.sort(key=lambda x: x.timestamp, reverse=True)\n return trades\n\n def query_location_trades(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n location: Location,\n ) -> TRADES_LIST:\n # clear the trades queried for this location\n self.actions_per_location['trade'][location] = 0\n\n location_trades: TRADES_LIST\n if location in (Location.EXTERNAL, Location.CRYPTOCOM):\n location_trades = self.data.db.get_trades( # type: ignore # list invariance\n from_ts=from_ts,\n to_ts=to_ts,\n location=location,\n )\n elif location == Location.UNISWAP:\n if self.premium is not None:\n uniswap = self.chain_manager.uniswap\n if uniswap is not None:\n location_trades = uniswap.get_trades( # type: ignore # list invariance\n addresses=self.chain_manager.queried_addresses_for_module('uniswap'),\n from_timestamp=from_ts,\n to_timestamp=to_ts,\n )\n else:\n # should only be an exchange\n exchange = self.exchange_manager.get(str(location))\n if not exchange:\n logger.warning(\n f'Tried to query trades from {location} which is either not an '\n f'exchange or not an exchange the user has connected to',\n )\n return []\n\n location_trades = exchange.query_trade_history(start_ts=from_ts, end_ts=to_ts)\n\n trades: TRADES_LIST = []\n if self.premium is None:\n trades = self._apply_actions_limit(\n location=location,\n action_type='trade',\n location_actions=location_trades,\n all_actions=trades,\n )\n else:\n trades = location_trades\n\n return trades\n\n def query_balances(\n self,\n requested_save_data: bool = False,\n timestamp: Timestamp = None,\n ignore_cache: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Query all balances rotkehlchen can see.\n\n If requested_save_data is True then the data are always saved in the DB,\n if it is False then data are saved if self.data.should_save_balances()\n is True.\n If timestamp is None then the current timestamp is used.\n If a timestamp is given then that is the time that the balances are going\n to be saved in the DB\n If ignore_cache is True then all underlying calls that have a cache ignore it\n\n Returns a dictionary with the queried balances.\n \"\"\"\n log.info('query_balances called', requested_save_data=requested_save_data)\n\n balances: Dict[str, Dict[Asset, Balance]] = {}\n problem_free = True\n for _, exchange in self.exchange_manager.connected_exchanges.items():\n exchange_balances, _ = exchange.query_balances(ignore_cache=ignore_cache)\n # If we got an error, disregard that exchange but make sure we don't save data\n if not isinstance(exchange_balances, dict):\n problem_free = False\n else:\n balances[exchange.name] = exchange_balances\n\n liabilities: Dict[Asset, Balance]\n try:\n blockchain_result = self.chain_manager.query_balances(\n blockchain=None,\n force_token_detection=ignore_cache,\n ignore_cache=ignore_cache,\n )\n balances[str(Location.BLOCKCHAIN)] = blockchain_result.totals.assets\n liabilities = blockchain_result.totals.liabilities\n except (RemoteError, EthSyncError) as e:\n problem_free = False\n liabilities = {}\n log.error(f'Querying blockchain balances failed due to: {str(e)}')\n\n balances = account_for_manually_tracked_balances(db=self.data.db, balances=balances)\n\n # Calculate usd totals\n assets_total_balance: DefaultDict[Asset, Balance] = defaultdict(Balance)\n total_usd_per_location: Dict[str, FVal] = {}\n for location, asset_balance in balances.items():\n total_usd_per_location[location] = ZERO\n for asset, balance in asset_balance.items():\n assets_total_balance[asset] += balance\n total_usd_per_location[location] += balance.usd_value\n\n net_usd = sum((balance.usd_value for _, balance in assets_total_balance.items()), ZERO)\n liabilities_total_usd = sum((liability.usd_value for _, liability in liabilities.items()), ZERO) # noqa: E501\n net_usd -= liabilities_total_usd\n\n # Calculate location stats\n location_stats: Dict[str, Any] = {}\n for location, total_usd in total_usd_per_location.items():\n if location == str(Location.BLOCKCHAIN):\n total_usd -= liabilities_total_usd\n\n percentage = (total_usd / net_usd).to_percentage() if net_usd != ZERO else '0%'\n location_stats[location] = {\n 'usd_value': total_usd,\n 'percentage_of_net_value': percentage,\n }\n\n # Calculate 'percentage_of_net_value' per asset\n assets_total_balance_as_dict: Dict[Asset, Dict[str, Any]] = {\n asset: balance.to_dict() for asset, balance in assets_total_balance.items()\n }\n liabilities_as_dict: Dict[Asset, Dict[str, Any]] = {\n asset: balance.to_dict() for asset, balance in liabilities.items()\n }\n for asset, balance_dict in assets_total_balance_as_dict.items():\n percentage = (balance_dict['usd_value'] / net_usd).to_percentage() if net_usd != ZERO else '0%' # noqa: E501\n assets_total_balance_as_dict[asset]['percentage_of_net_value'] = percentage\n\n for asset, balance_dict in liabilities_as_dict.items():\n percentage = (balance_dict['usd_value'] / net_usd).to_percentage() if net_usd != ZERO else '0%' # noqa: E501\n liabilities_as_dict[asset]['percentage_of_net_value'] = percentage\n\n # Compose balances response\n result_dict = {\n 'assets': assets_total_balance_as_dict,\n 'liabilities': liabilities_as_dict,\n 'location': location_stats,\n 'net_usd': net_usd,\n }\n allowed_to_save = requested_save_data or self.data.should_save_balances()\n\n if problem_free and allowed_to_save:\n if not timestamp:\n timestamp = Timestamp(int(time.time()))\n self.data.db.save_balances_data(data=result_dict, timestamp=timestamp)\n log.debug('query_balances data saved')\n else:\n log.debug(\n 'query_balances data not saved',\n allowed_to_save=allowed_to_save,\n problem_free=problem_free,\n )\n\n return result_dict\n\n def _query_exchange_asset_movements(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n all_movements: List[AssetMovement],\n exchange: Union[ExchangeInterface, Location],\n ) -> List[AssetMovement]:\n if isinstance(exchange, ExchangeInterface):\n location = deserialize_location(exchange.name)\n # clear the asset movements queried for this exchange\n self.actions_per_location['asset_movement'][location] = 0\n location_movements = exchange.query_deposits_withdrawals(\n start_ts=from_ts,\n end_ts=to_ts,\n )\n else:\n assert isinstance(exchange, Location), 'only a location should make it here'\n assert exchange == Location.CRYPTOCOM, 'only cryptocom should make it here'\n location = exchange\n # cryptocom has no exchange integration but we may have DB entries\n self.actions_per_location['asset_movement'][location] = 0\n location_movements = self.data.db.get_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n location=location,\n )\n\n movements: List[AssetMovement] = []\n if self.premium is None:\n movements = self._apply_actions_limit(\n location=location,\n action_type='asset_movement',\n location_actions=location_movements,\n all_actions=all_movements,\n )\n else:\n all_movements.extend(location_movements)\n movements = all_movements\n\n return movements\n\n def query_asset_movements(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n location: Optional[Location],\n ) -> List[AssetMovement]:\n \"\"\"Queries AssetMovements for the given location and time range.\n\n If no location is given then all exchange asset movements are queried.\n If the user does not have premium then a limit is applied.\n May raise:\n - RemoteError: If there are problems connecting to any of the remote exchanges\n \"\"\"\n movements: List[AssetMovement] = []\n if location is not None:\n if location == Location.CRYPTOCOM:\n movements = self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=Location.CRYPTOCOM,\n )\n else:\n exchange = self.exchange_manager.get(str(location))\n if not exchange:\n logger.warning(\n f'Tried to query deposits/withdrawals from {location} which is either '\n f'not at exchange or not an exchange the user has connected to',\n )\n return []\n movements = self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=exchange,\n )\n else:\n # cryptocom has no exchange integration but we may have DB entries due to csv import\n movements = self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=Location.CRYPTOCOM,\n )\n for _, exchange in self.exchange_manager.connected_exchanges.items():\n self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=exchange,\n )\n\n # return movements with most recent first\n movements.sort(key=lambda x: x.timestamp, reverse=True)\n return movements\n\n def set_settings(self, settings: ModifiableDBSettings) -> Tuple[bool, str]:\n \"\"\"Tries to set new settings. Returns True in success or False with message if error\"\"\"\n with self.lock:\n if settings.eth_rpc_endpoint is not None:\n result, msg = self.chain_manager.set_eth_rpc_endpoint(settings.eth_rpc_endpoint)\n if not result:\n return False, msg\n\n if settings.ksm_rpc_endpoint is not None:\n result, msg = self.chain_manager.set_ksm_rpc_endpoint(settings.ksm_rpc_endpoint)\n if not result:\n return False, msg\n\n if settings.kraken_account_type is not None:\n kraken = self.exchange_manager.get('kraken')\n if kraken:\n kraken.set_account_type(settings.kraken_account_type) # type: ignore\n\n if settings.btc_derivation_gap_limit is not None:\n self.chain_manager.btc_derivation_gap_limit = settings.btc_derivation_gap_limit\n\n if settings.current_price_oracles is not None:\n Inquirer().set_oracles_order(settings.current_price_oracles)\n\n if settings.historical_price_oracles is not None:\n PriceHistorian().set_oracles_order(settings.historical_price_oracles)\n\n self.data.db.set_settings(settings)\n return True, ''\n\n def get_settings(self) -> DBSettings:\n \"\"\"Returns the db settings with a check whether premium is active or not\"\"\"\n db_settings = self.data.db.get_settings(have_premium=self.premium is not None)\n return db_settings\n\n def setup_exchange(\n self,\n name: str,\n api_key: ApiKey,\n api_secret: ApiSecret,\n passphrase: Optional[str] = None,\n ) -> Tuple[bool, str]:\n \"\"\"\n Setup a new exchange with an api key and an api secret and optionally a passphrase\n\n By default the api keys are always validated unless validate is False.\n \"\"\"\n is_success, msg = self.exchange_manager.setup_exchange(\n name=name,\n api_key=api_key,\n api_secret=api_secret,\n database=self.data.db,\n passphrase=passphrase,\n )\n\n if is_success:\n # Success, save the result in the DB\n self.data.db.add_exchange(name, api_key, api_secret, passphrase=passphrase)\n return is_success, msg\n\n def remove_exchange(self, name: str) -> Tuple[bool, str]:\n if not self.exchange_manager.has_exchange(name):\n return False, 'Exchange {} is not registered'.format(name)\n\n self.exchange_manager.delete_exchange(name)\n # Success, remove it also from the DB\n self.data.db.remove_exchange(name)\n self.data.db.delete_used_query_range_for_exchange(name)\n return True, ''\n\n def query_periodic_data(self) -> Dict[str, Union[bool, Timestamp]]:\n \"\"\"Query for frequently changing data\"\"\"\n result: Dict[str, Union[bool, Timestamp]] = {}\n\n if self.user_is_logged_in:\n result['last_balance_save'] = self.data.db.get_last_balance_save_time()\n result['eth_node_connection'] = self.chain_manager.ethereum.web3_mapping.get(NodeName.OWN, None) is not None # noqa : E501\n result['last_data_upload_ts'] = Timestamp(self.premium_sync_manager.last_data_upload_ts) # noqa : E501\n return result\n\n def shutdown(self) -> None:\n self.logout()\n self.shutdown_event.set()\n\n def _connect_ksm_manager_on_startup(self) -> bool:\n return bool(self.data.db.get_blockchain_accounts().ksm)\n\n def create_oracle_cache(\n self,\n oracle: HistoricalPriceOracle,\n from_asset: Asset,\n to_asset: Asset,\n purge_old: bool,\n ) -> None:\n \"\"\"Creates the cache of the given asset pair from the start of time\n until now for the given oracle.\n\n if purge_old is true then any old cache in memory and in a file is purged\n\n May raise:\n - RemoteError if there is a problem reaching the oracle\n - UnsupportedAsset if any of the two assets is not supported by the oracle\n \"\"\"\n if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:\n return # only for cryptocompare for now\n\n self.cryptocompare.create_cache(from_asset, to_asset, purge_old)\n\n def delete_oracle_cache(\n self,\n oracle: HistoricalPriceOracle,\n from_asset: Asset,\n to_asset: Asset,\n ) -> None:\n if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:\n return # only for cryptocompare for now\n\n self.cryptocompare.delete_cache(from_asset, to_asset)\n\n def get_oracle_cache(self, oracle: HistoricalPriceOracle) -> List[Dict[str, Any]]:\n if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:\n return [] # only for cryptocompare for now\n\n return self.cryptocompare.get_all_cache_data()\n",
"path": "rotkehlchen/rotkehlchen.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nimport argparse\nimport logging.config\nimport os\nimport time\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional, Tuple, Union, overload\n\nimport gevent\nfrom gevent.lock import Semaphore\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.accounting.accountant import Accountant\nfrom rotkehlchen.accounting.structures import Balance\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver\nfrom rotkehlchen.balances.manual import account_for_manually_tracked_balances\nfrom rotkehlchen.chain.ethereum.manager import (\n ETHEREUM_NODES_TO_CONNECT_AT_START,\n EthereumManager,\n NodeName,\n)\nfrom rotkehlchen.chain.ethereum.trades import AMMTrade\nfrom rotkehlchen.chain.manager import BlockchainBalancesUpdate, ChainManager\nfrom rotkehlchen.chain.substrate.manager import SubstrateManager\nfrom rotkehlchen.chain.substrate.typing import SubstrateChain\nfrom rotkehlchen.chain.substrate.utils import KUSAMA_NODES_TO_CONNECT_AT_START\nfrom rotkehlchen.config import default_data_directory\nfrom rotkehlchen.constants.misc import ZERO\nfrom rotkehlchen.data.importer import DataImporter\nfrom rotkehlchen.data_handler import DataHandler\nfrom rotkehlchen.db.settings import DBSettings, ModifiableDBSettings\nfrom rotkehlchen.errors import (\n EthSyncError,\n InputError,\n PremiumAuthenticationError,\n RemoteError,\n SystemPermissionError,\n)\nfrom rotkehlchen.exchanges.data_structures import AssetMovement, Trade\nfrom rotkehlchen.exchanges.exchange import ExchangeInterface\nfrom rotkehlchen.exchanges.manager import ExchangeManager\nfrom rotkehlchen.externalapis.beaconchain import BeaconChain\nfrom rotkehlchen.externalapis.coingecko import Coingecko\nfrom rotkehlchen.externalapis.cryptocompare import Cryptocompare\nfrom rotkehlchen.externalapis.etherscan import Etherscan\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.greenlets import GreenletManager\nfrom rotkehlchen.history import EventsHistorian, PriceHistorian\nfrom rotkehlchen.history.events import FREE_LEDGER_ACTIONS_LIMIT\nfrom rotkehlchen.history.typing import HistoricalPriceOracle\nfrom rotkehlchen.icons import IconManager\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.logging import (\n DEFAULT_ANONYMIZED_LOGS,\n LoggingSettings,\n RotkehlchenLogsAdapter,\n configure_logging,\n)\nfrom rotkehlchen.premium.premium import Premium, PremiumCredentials, premium_create_and_verify\nfrom rotkehlchen.premium.sync import PremiumSyncManager\nfrom rotkehlchen.serialization.deserialize import deserialize_location\nfrom rotkehlchen.tasks.manager import DEFAULT_MAX_TASKS_NUM, TaskManager\nfrom rotkehlchen.typing import (\n ApiKey,\n ApiSecret,\n BlockchainAccountData,\n ListOfBlockchainAddresses,\n Location,\n SupportedBlockchain,\n Timestamp,\n)\nfrom rotkehlchen.usage_analytics import maybe_submit_usage_analytics\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nif TYPE_CHECKING:\n from rotkehlchen.chain.bitcoin.xpub import XpubData\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\nMAIN_LOOP_SECS_DELAY = 10\nFREE_TRADES_LIMIT = 250\nFREE_ASSET_MOVEMENTS_LIMIT = 100\n\nLIMITS_MAPPING = {\n 'trade': FREE_TRADES_LIMIT,\n 'asset_movement': FREE_ASSET_MOVEMENTS_LIMIT,\n 'ledger_action': FREE_LEDGER_ACTIONS_LIMIT,\n}\n\nICONS_BATCH_SIZE = 5\nICONS_QUERY_SLEEP = 10\n\n\nTRADES_LIST = List[Union[Trade, AMMTrade]]\n\n\nclass Rotkehlchen():\n def __init__(self, args: argparse.Namespace) -> None:\n \"\"\"Initialize the Rotkehlchen object\n\n May Raise:\n - SystemPermissionError if the given data directory's permissions\n are not correct.\n \"\"\"\n self.lock = Semaphore()\n self.lock.acquire()\n\n # Can also be None after unlock if premium credentials did not\n # authenticate or premium server temporarily offline\n self.premium: Optional[Premium] = None\n self.user_is_logged_in: bool = False\n configure_logging(args)\n\n self.sleep_secs = args.sleep_secs\n if args.data_dir is None:\n self.data_dir = default_data_directory()\n else:\n self.data_dir = Path(args.data_dir)\n\n if not os.access(self.data_dir, os.W_OK | os.R_OK):\n raise SystemPermissionError(\n f'The given data directory {self.data_dir} is not readable or writable',\n )\n self.main_loop_spawned = False\n self.args = args\n self.api_task_greenlets: List[gevent.Greenlet] = []\n self.msg_aggregator = MessagesAggregator()\n self.greenlet_manager = GreenletManager(msg_aggregator=self.msg_aggregator)\n self.exchange_manager = ExchangeManager(msg_aggregator=self.msg_aggregator)\n # Initialize the AssetResolver singleton\n AssetResolver(data_directory=self.data_dir)\n self.data = DataHandler(self.data_dir, self.msg_aggregator)\n self.cryptocompare = Cryptocompare(data_directory=self.data_dir, database=None)\n self.coingecko = Coingecko(data_directory=self.data_dir)\n self.icon_manager = IconManager(data_dir=self.data_dir, coingecko=self.coingecko)\n self.greenlet_manager.spawn_and_track(\n after_seconds=None,\n task_name='periodically_query_icons_until_all_cached',\n exception_is_error=False,\n method=self.icon_manager.periodically_query_icons_until_all_cached,\n batch_size=ICONS_BATCH_SIZE,\n sleep_time_secs=ICONS_QUERY_SLEEP,\n )\n # Initialize the Inquirer singleton\n Inquirer(\n data_dir=self.data_dir,\n cryptocompare=self.cryptocompare,\n coingecko=self.coingecko,\n )\n # Keeps how many trades we have found per location. Used for free user limiting\n self.actions_per_location: Dict[str, Dict[Location, int]] = {\n 'trade': defaultdict(int),\n 'asset_movement': defaultdict(int),\n }\n\n self.lock.release()\n self.task_manager: Optional[TaskManager] = None\n self.shutdown_event = gevent.event.Event()\n\n def reset_after_failed_account_creation_or_login(self) -> None:\n \"\"\"If the account creation or login failed make sure that the Rotki instance is clear\n\n Tricky instances are when after either failed premium credentials or user refusal\n to sync premium databases we relogged in.\n \"\"\"\n self.cryptocompare.db = None\n\n def unlock_user(\n self,\n user: str,\n password: str,\n create_new: bool,\n sync_approval: Literal['yes', 'no', 'unknown'],\n premium_credentials: Optional[PremiumCredentials],\n initial_settings: Optional[ModifiableDBSettings] = None,\n ) -> None:\n \"\"\"Unlocks an existing user or creates a new one if `create_new` is True\n\n May raise:\n - PremiumAuthenticationError if the password can't unlock the database.\n - AuthenticationError if premium_credentials are given and are invalid\n or can't authenticate with the server\n - DBUpgradeError if the rotki DB version is newer than the software or\n there is a DB upgrade and there is an error.\n - SystemPermissionError if the directory or DB file can not be accessed\n \"\"\"\n log.info(\n 'Unlocking user',\n user=user,\n create_new=create_new,\n sync_approval=sync_approval,\n initial_settings=initial_settings,\n )\n\n # unlock or create the DB\n self.password = password\n self.user_directory = self.data.unlock(user, password, create_new, initial_settings)\n self.data_importer = DataImporter(db=self.data.db)\n self.last_data_upload_ts = self.data.db.get_last_data_upload_ts()\n self.premium_sync_manager = PremiumSyncManager(data=self.data, password=password)\n # set the DB in the external services instances that need it\n self.cryptocompare.set_database(self.data.db)\n\n # Anything that was set above here has to be cleaned in case of failure in the next step\n # by reset_after_failed_account_creation_or_login()\n try:\n self.premium = self.premium_sync_manager.try_premium_at_start(\n given_premium_credentials=premium_credentials,\n username=user,\n create_new=create_new,\n sync_approval=sync_approval,\n )\n except PremiumAuthenticationError:\n # Reraise it only if this is during the creation of a new account where\n # the premium credentials were given by the user\n if create_new:\n raise\n self.msg_aggregator.add_warning(\n 'Could not authenticate the Rotki premium API keys found in the DB.'\n ' Has your subscription expired?',\n )\n # else let's just continue. User signed in succesfully, but he just\n # has unauthenticable/invalid premium credentials remaining in his DB\n\n settings = self.get_settings()\n self.greenlet_manager.spawn_and_track(\n after_seconds=None,\n task_name='submit_usage_analytics',\n exception_is_error=False,\n method=maybe_submit_usage_analytics,\n should_submit=settings.submit_usage_analytics,\n )\n self.etherscan = Etherscan(database=self.data.db, msg_aggregator=self.msg_aggregator)\n self.beaconchain = BeaconChain(database=self.data.db, msg_aggregator=self.msg_aggregator)\n eth_rpc_endpoint = settings.eth_rpc_endpoint\n # Initialize the price historian singleton\n PriceHistorian(\n data_directory=self.data_dir,\n cryptocompare=self.cryptocompare,\n coingecko=self.coingecko,\n )\n PriceHistorian().set_oracles_order(settings.historical_price_oracles)\n\n self.accountant = Accountant(\n db=self.data.db,\n user_directory=self.user_directory,\n msg_aggregator=self.msg_aggregator,\n create_csv=True,\n )\n\n # Initialize the rotkehlchen logger\n LoggingSettings(anonymized_logs=settings.anonymized_logs)\n exchange_credentials = self.data.db.get_exchange_credentials()\n self.exchange_manager.initialize_exchanges(\n exchange_credentials=exchange_credentials,\n database=self.data.db,\n )\n\n # Initialize blockchain querying modules\n ethereum_manager = EthereumManager(\n ethrpc_endpoint=eth_rpc_endpoint,\n etherscan=self.etherscan,\n database=self.data.db,\n msg_aggregator=self.msg_aggregator,\n greenlet_manager=self.greenlet_manager,\n connect_at_start=ETHEREUM_NODES_TO_CONNECT_AT_START,\n )\n kusama_manager = SubstrateManager(\n chain=SubstrateChain.KUSAMA,\n msg_aggregator=self.msg_aggregator,\n greenlet_manager=self.greenlet_manager,\n connect_at_start=KUSAMA_NODES_TO_CONNECT_AT_START,\n connect_on_startup=self._connect_ksm_manager_on_startup(),\n own_rpc_endpoint=settings.ksm_rpc_endpoint,\n )\n\n Inquirer().inject_ethereum(ethereum_manager)\n Inquirer().set_oracles_order(settings.current_price_oracles)\n\n self.chain_manager = ChainManager(\n blockchain_accounts=self.data.db.get_blockchain_accounts(),\n ethereum_manager=ethereum_manager,\n kusama_manager=kusama_manager,\n msg_aggregator=self.msg_aggregator,\n database=self.data.db,\n greenlet_manager=self.greenlet_manager,\n premium=self.premium,\n eth_modules=settings.active_modules,\n data_directory=self.data_dir,\n beaconchain=self.beaconchain,\n btc_derivation_gap_limit=settings.btc_derivation_gap_limit,\n )\n self.events_historian = EventsHistorian(\n user_directory=self.user_directory,\n db=self.data.db,\n msg_aggregator=self.msg_aggregator,\n exchange_manager=self.exchange_manager,\n chain_manager=self.chain_manager,\n )\n self.task_manager = TaskManager(\n max_tasks_num=DEFAULT_MAX_TASKS_NUM,\n greenlet_manager=self.greenlet_manager,\n api_task_greenlets=self.api_task_greenlets,\n database=self.data.db,\n cryptocompare=self.cryptocompare,\n premium_sync_manager=self.premium_sync_manager,\n chain_manager=self.chain_manager,\n exchange_manager=self.exchange_manager,\n )\n self.user_is_logged_in = True\n log.debug('User unlocking complete')\n\n def logout(self) -> None:\n if not self.user_is_logged_in:\n return\n\n user = self.data.username\n log.info(\n 'Logging out user',\n user=user,\n )\n self.greenlet_manager.clear()\n del self.chain_manager\n self.exchange_manager.delete_all_exchanges()\n\n # Reset rotkehlchen logger to default\n LoggingSettings(anonymized_logs=DEFAULT_ANONYMIZED_LOGS)\n\n del self.accountant\n del self.events_historian\n del self.data_importer\n\n if self.premium is not None:\n del self.premium\n self.data.logout()\n self.password = ''\n self.cryptocompare.unset_database()\n\n # Make sure no messages leak to other user sessions\n self.msg_aggregator.consume_errors()\n self.msg_aggregator.consume_warnings()\n self.task_manager = None\n\n self.user_is_logged_in = False\n log.info(\n 'User successfully logged out',\n user=user,\n )\n\n def set_premium_credentials(self, credentials: PremiumCredentials) -> None:\n \"\"\"\n Sets the premium credentials for Rotki\n\n Raises PremiumAuthenticationError if the given key is rejected by the Rotkehlchen server\n \"\"\"\n log.info('Setting new premium credentials')\n if self.premium is not None:\n self.premium.set_credentials(credentials)\n else:\n self.premium = premium_create_and_verify(credentials)\n self.premium_sync_manager.premium = self.premium\n\n self.data.db.set_rotkehlchen_premium(credentials)\n\n def delete_premium_credentials(self) -> Tuple[bool, str]:\n \"\"\"Deletes the premium credentials for Rotki\"\"\"\n msg = ''\n\n success = self.data.db.del_rotkehlchen_premium()\n if success is False:\n msg = 'The database was unable to delete the Premium keys for the logged-in user'\n self.deactivate_premium_status()\n return success, msg\n\n def deactivate_premium_status(self) -> None:\n \"\"\"Deactivate premium in the current session\"\"\"\n self.premium = None\n self.premium_sync_manager.premium = None\n self.chain_manager.deactivate_premium_status()\n\n def start(self) -> gevent.Greenlet:\n assert not self.main_loop_spawned, 'Tried to spawn the main loop twice'\n greenlet = gevent.spawn(self.main_loop)\n self.main_loop_spawned = True\n return greenlet\n\n def main_loop(self) -> None:\n \"\"\"Rotki main loop that fires often and runs the task manager's scheduler\"\"\"\n while self.shutdown_event.wait(timeout=MAIN_LOOP_SECS_DELAY) is not True:\n if self.task_manager is not None:\n self.task_manager.schedule()\n\n def get_blockchain_account_data(\n self,\n blockchain: SupportedBlockchain,\n ) -> Union[List[BlockchainAccountData], Dict[str, Any]]:\n account_data = self.data.db.get_blockchain_account_data(blockchain)\n if blockchain != SupportedBlockchain.BITCOIN:\n return account_data\n\n xpub_data = self.data.db.get_bitcoin_xpub_data()\n addresses_to_account_data = {x.address: x for x in account_data}\n address_to_xpub_mappings = self.data.db.get_addresses_to_xpub_mapping(\n list(addresses_to_account_data.keys()), # type: ignore\n )\n\n xpub_mappings: Dict['XpubData', List[BlockchainAccountData]] = {}\n for address, xpub_entry in address_to_xpub_mappings.items():\n if xpub_entry not in xpub_mappings:\n xpub_mappings[xpub_entry] = []\n xpub_mappings[xpub_entry].append(addresses_to_account_data[address])\n\n data: Dict[str, Any] = {'standalone': [], 'xpubs': []}\n # Add xpub data\n for xpub_entry in xpub_data:\n data_entry = xpub_entry.serialize()\n addresses = xpub_mappings.get(xpub_entry, None)\n data_entry['addresses'] = addresses if addresses and len(addresses) != 0 else None\n data['xpubs'].append(data_entry)\n # Add standalone addresses\n for account in account_data:\n if account.address not in address_to_xpub_mappings:\n data['standalone'].append(account)\n\n return data\n\n def add_blockchain_accounts(\n self,\n blockchain: SupportedBlockchain,\n account_data: List[BlockchainAccountData],\n ) -> BlockchainBalancesUpdate:\n \"\"\"Adds new blockchain accounts\n\n Adds the accounts to the blockchain instance and queries them to get the\n updated balances. Also adds them in the DB\n\n May raise:\n - EthSyncError from modify_blockchain_account\n - InputError if the given accounts list is empty.\n - TagConstraintError if any of the given account data contain unknown tags.\n - RemoteError if an external service such as Etherscan is queried and\n there is a problem with its query.\n \"\"\"\n self.data.db.ensure_tags_exist(\n given_data=account_data,\n action='adding',\n data_type='blockchain accounts',\n )\n address_type = blockchain.get_address_type()\n updated_balances = self.chain_manager.add_blockchain_accounts(\n blockchain=blockchain,\n accounts=[address_type(entry.address) for entry in account_data],\n )\n self.data.db.add_blockchain_accounts(\n blockchain=blockchain,\n account_data=account_data,\n )\n\n return updated_balances\n\n def edit_blockchain_accounts(\n self,\n blockchain: SupportedBlockchain,\n account_data: List[BlockchainAccountData],\n ) -> None:\n \"\"\"Edits blockchain accounts\n\n Edits blockchain account data for the given accounts\n\n May raise:\n - InputError if the given accounts list is empty or if\n any of the accounts to edit do not exist.\n - TagConstraintError if any of the given account data contain unknown tags.\n \"\"\"\n # First check for validity of account data addresses\n if len(account_data) == 0:\n raise InputError('Empty list of blockchain account data to edit was given')\n accounts = [x.address for x in account_data]\n unknown_accounts = set(accounts).difference(self.chain_manager.accounts.get(blockchain))\n if len(unknown_accounts) != 0:\n raise InputError(\n f'Tried to edit unknown {blockchain.value} '\n f'accounts {\",\".join(unknown_accounts)}',\n )\n\n self.data.db.ensure_tags_exist(\n given_data=account_data,\n action='editing',\n data_type='blockchain accounts',\n )\n\n # Finally edit the accounts\n self.data.db.edit_blockchain_accounts(\n blockchain=blockchain,\n account_data=account_data,\n )\n\n def remove_blockchain_accounts(\n self,\n blockchain: SupportedBlockchain,\n accounts: ListOfBlockchainAddresses,\n ) -> BlockchainBalancesUpdate:\n \"\"\"Removes blockchain accounts\n\n Removes the accounts from the blockchain instance and queries them to get\n the updated balances. Also removes them from the DB\n\n May raise:\n - RemoteError if an external service such as Etherscan is queried and\n there is a problem with its query.\n - InputError if a non-existing account was given to remove\n \"\"\"\n balances_update = self.chain_manager.remove_blockchain_accounts(\n blockchain=blockchain,\n accounts=accounts,\n )\n self.data.db.remove_blockchain_accounts(blockchain, accounts)\n return balances_update\n\n def get_history_query_status(self) -> Dict[str, str]:\n if self.events_historian.progress < FVal('100'):\n processing_state = self.events_historian.processing_state_name\n progress = self.events_historian.progress / 2\n elif self.accountant.first_processed_timestamp == -1:\n processing_state = 'Processing all retrieved historical events'\n progress = FVal(50)\n else:\n processing_state = 'Processing all retrieved historical events'\n # start_ts is min of the query start or the first action timestamp since action\n # processing can start well before query start to calculate cost basis\n start_ts = min(\n self.accountant.events.query_start_ts,\n self.accountant.first_processed_timestamp,\n )\n diff = self.accountant.events.query_end_ts - start_ts\n progress = 50 + 100 * (\n FVal(self.accountant.currently_processing_timestamp - start_ts) /\n FVal(diff) / 2)\n\n return {'processing_state': str(processing_state), 'total_progress': str(progress)}\n\n def process_history(\n self,\n start_ts: Timestamp,\n end_ts: Timestamp,\n ) -> Tuple[Dict[str, Any], str]:\n (\n error_or_empty,\n history,\n loan_history,\n asset_movements,\n eth_transactions,\n defi_events,\n ledger_actions,\n ) = self.events_historian.get_history(\n start_ts=start_ts,\n end_ts=end_ts,\n has_premium=self.premium is not None,\n )\n result = self.accountant.process_history(\n start_ts=start_ts,\n end_ts=end_ts,\n trade_history=history,\n loan_history=loan_history,\n asset_movements=asset_movements,\n eth_transactions=eth_transactions,\n defi_events=defi_events,\n ledger_actions=ledger_actions,\n )\n return result, error_or_empty\n\n @overload\n def _apply_actions_limit(\n self,\n location: Location,\n action_type: Literal['trade'],\n location_actions: TRADES_LIST,\n all_actions: TRADES_LIST,\n ) -> TRADES_LIST:\n ...\n\n @overload\n def _apply_actions_limit(\n self,\n location: Location,\n action_type: Literal['asset_movement'],\n location_actions: List[AssetMovement],\n all_actions: List[AssetMovement],\n ) -> List[AssetMovement]:\n ...\n\n def _apply_actions_limit(\n self,\n location: Location,\n action_type: Literal['trade', 'asset_movement'],\n location_actions: Union[TRADES_LIST, List[AssetMovement]],\n all_actions: Union[TRADES_LIST, List[AssetMovement]],\n ) -> Union[TRADES_LIST, List[AssetMovement]]:\n \"\"\"Take as many actions from location actions and add them to all actions as the limit permits\n\n Returns the modified (or not) all_actions\n \"\"\"\n # If we are already at or above the limit return current actions disregarding this location\n actions_mapping = self.actions_per_location[action_type]\n current_num_actions = sum(x for _, x in actions_mapping.items())\n limit = LIMITS_MAPPING[action_type]\n if current_num_actions >= limit:\n return all_actions\n\n # Find out how many more actions can we return, and depending on that get\n # the number of actions from the location actions and add them to the total\n remaining_num_actions = limit - current_num_actions\n if remaining_num_actions < 0:\n remaining_num_actions = 0\n\n num_actions_to_take = min(len(location_actions), remaining_num_actions)\n\n actions_mapping[location] = num_actions_to_take\n all_actions.extend(location_actions[0:num_actions_to_take]) # type: ignore\n return all_actions\n\n def query_trades(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n location: Optional[Location],\n ) -> TRADES_LIST:\n \"\"\"Queries trades for the given location and time range.\n If no location is given then all external, all exchange and DEX trades are queried.\n\n DEX Trades are queried only if the user has premium\n If the user does not have premium then a trade limit is applied.\n\n May raise:\n - RemoteError: If there are problems connecting to any of the remote exchanges\n \"\"\"\n trades: TRADES_LIST\n if location is not None:\n trades = self.query_location_trades(from_ts, to_ts, location)\n else:\n trades = self.query_location_trades(from_ts, to_ts, Location.EXTERNAL)\n # crypto.com is not an API key supported exchange but user can import from CSV\n trades.extend(self.query_location_trades(from_ts, to_ts, Location.CRYPTOCOM))\n for name, exchange in self.exchange_manager.connected_exchanges.items():\n exchange_trades = exchange.query_trade_history(start_ts=from_ts, end_ts=to_ts)\n if self.premium is None:\n trades = self._apply_actions_limit(\n location=deserialize_location(name),\n action_type='trade',\n location_actions=exchange_trades,\n all_actions=trades,\n )\n else:\n trades.extend(exchange_trades)\n\n # for all trades we also need uniswap trades\n if self.premium is not None:\n uniswap = self.chain_manager.uniswap\n if uniswap is not None:\n trades.extend(\n uniswap.get_trades(\n addresses=self.chain_manager.queried_addresses_for_module('uniswap'),\n from_timestamp=from_ts,\n to_timestamp=to_ts,\n ),\n )\n\n # return trades with most recent first\n trades.sort(key=lambda x: x.timestamp, reverse=True)\n return trades\n\n def query_location_trades(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n location: Location,\n ) -> TRADES_LIST:\n # clear the trades queried for this location\n self.actions_per_location['trade'][location] = 0\n\n location_trades: TRADES_LIST\n if location in (Location.EXTERNAL, Location.CRYPTOCOM):\n location_trades = self.data.db.get_trades( # type: ignore # list invariance\n from_ts=from_ts,\n to_ts=to_ts,\n location=location,\n )\n elif location == Location.UNISWAP:\n if self.premium is not None:\n uniswap = self.chain_manager.uniswap\n if uniswap is not None:\n location_trades = uniswap.get_trades( # type: ignore # list invariance\n addresses=self.chain_manager.queried_addresses_for_module('uniswap'),\n from_timestamp=from_ts,\n to_timestamp=to_ts,\n )\n else:\n # should only be an exchange\n exchange = self.exchange_manager.get(str(location))\n if not exchange:\n logger.warning(\n f'Tried to query trades from {location} which is either not an '\n f'exchange or not an exchange the user has connected to',\n )\n return []\n\n location_trades = exchange.query_trade_history(start_ts=from_ts, end_ts=to_ts)\n\n trades: TRADES_LIST = []\n if self.premium is None:\n trades = self._apply_actions_limit(\n location=location,\n action_type='trade',\n location_actions=location_trades,\n all_actions=trades,\n )\n else:\n trades = location_trades\n\n return trades\n\n def query_balances(\n self,\n requested_save_data: bool = False,\n timestamp: Timestamp = None,\n ignore_cache: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Query all balances rotkehlchen can see.\n\n If requested_save_data is True then the data are always saved in the DB,\n if it is False then data are saved if self.data.should_save_balances()\n is True.\n If timestamp is None then the current timestamp is used.\n If a timestamp is given then that is the time that the balances are going\n to be saved in the DB\n If ignore_cache is True then all underlying calls that have a cache ignore it\n\n Returns a dictionary with the queried balances.\n \"\"\"\n log.info('query_balances called', requested_save_data=requested_save_data)\n\n balances: Dict[str, Dict[Asset, Balance]] = {}\n problem_free = True\n for _, exchange in self.exchange_manager.connected_exchanges.items():\n exchange_balances, _ = exchange.query_balances(ignore_cache=ignore_cache)\n # If we got an error, disregard that exchange but make sure we don't save data\n if not isinstance(exchange_balances, dict):\n problem_free = False\n else:\n balances[exchange.name] = exchange_balances\n\n liabilities: Dict[Asset, Balance]\n try:\n blockchain_result = self.chain_manager.query_balances(\n blockchain=None,\n force_token_detection=ignore_cache,\n ignore_cache=ignore_cache,\n )\n balances[str(Location.BLOCKCHAIN)] = blockchain_result.totals.assets\n liabilities = blockchain_result.totals.liabilities\n except (RemoteError, EthSyncError) as e:\n problem_free = False\n liabilities = {}\n log.error(f'Querying blockchain balances failed due to: {str(e)}')\n\n balances = account_for_manually_tracked_balances(db=self.data.db, balances=balances)\n\n # Calculate usd totals\n assets_total_balance: DefaultDict[Asset, Balance] = defaultdict(Balance)\n total_usd_per_location: Dict[str, FVal] = {}\n for location, asset_balance in balances.items():\n total_usd_per_location[location] = ZERO\n for asset, balance in asset_balance.items():\n assets_total_balance[asset] += balance\n total_usd_per_location[location] += balance.usd_value\n\n net_usd = sum((balance.usd_value for _, balance in assets_total_balance.items()), ZERO)\n liabilities_total_usd = sum((liability.usd_value for _, liability in liabilities.items()), ZERO) # noqa: E501\n net_usd -= liabilities_total_usd\n\n # Calculate location stats\n location_stats: Dict[str, Any] = {}\n for location, total_usd in total_usd_per_location.items():\n if location == str(Location.BLOCKCHAIN):\n total_usd -= liabilities_total_usd\n\n percentage = (total_usd / net_usd).to_percentage() if net_usd != ZERO else '0%'\n location_stats[location] = {\n 'usd_value': total_usd,\n 'percentage_of_net_value': percentage,\n }\n\n # Calculate 'percentage_of_net_value' per asset\n assets_total_balance_as_dict: Dict[Asset, Dict[str, Any]] = {\n asset: balance.to_dict() for asset, balance in assets_total_balance.items()\n }\n liabilities_as_dict: Dict[Asset, Dict[str, Any]] = {\n asset: balance.to_dict() for asset, balance in liabilities.items()\n }\n for asset, balance_dict in assets_total_balance_as_dict.items():\n percentage = (balance_dict['usd_value'] / net_usd).to_percentage() if net_usd != ZERO else '0%' # noqa: E501\n assets_total_balance_as_dict[asset]['percentage_of_net_value'] = percentage\n\n for asset, balance_dict in liabilities_as_dict.items():\n percentage = (balance_dict['usd_value'] / net_usd).to_percentage() if net_usd != ZERO else '0%' # noqa: E501\n liabilities_as_dict[asset]['percentage_of_net_value'] = percentage\n\n # Compose balances response\n result_dict = {\n 'assets': assets_total_balance_as_dict,\n 'liabilities': liabilities_as_dict,\n 'location': location_stats,\n 'net_usd': net_usd,\n }\n allowed_to_save = requested_save_data or self.data.should_save_balances()\n\n if problem_free and allowed_to_save:\n if not timestamp:\n timestamp = Timestamp(int(time.time()))\n self.data.db.save_balances_data(data=result_dict, timestamp=timestamp)\n log.debug('query_balances data saved')\n else:\n log.debug(\n 'query_balances data not saved',\n allowed_to_save=allowed_to_save,\n problem_free=problem_free,\n )\n\n return result_dict\n\n def _query_exchange_asset_movements(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n all_movements: List[AssetMovement],\n exchange: Union[ExchangeInterface, Location],\n ) -> List[AssetMovement]:\n if isinstance(exchange, ExchangeInterface):\n location = deserialize_location(exchange.name)\n # clear the asset movements queried for this exchange\n self.actions_per_location['asset_movement'][location] = 0\n location_movements = exchange.query_deposits_withdrawals(\n start_ts=from_ts,\n end_ts=to_ts,\n )\n else:\n assert isinstance(exchange, Location), 'only a location should make it here'\n assert exchange == Location.CRYPTOCOM, 'only cryptocom should make it here'\n location = exchange\n # cryptocom has no exchange integration but we may have DB entries\n self.actions_per_location['asset_movement'][location] = 0\n location_movements = self.data.db.get_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n location=location,\n )\n\n movements: List[AssetMovement] = []\n if self.premium is None:\n movements = self._apply_actions_limit(\n location=location,\n action_type='asset_movement',\n location_actions=location_movements,\n all_actions=all_movements,\n )\n else:\n all_movements.extend(location_movements)\n movements = all_movements\n\n return movements\n\n def query_asset_movements(\n self,\n from_ts: Timestamp,\n to_ts: Timestamp,\n location: Optional[Location],\n ) -> List[AssetMovement]:\n \"\"\"Queries AssetMovements for the given location and time range.\n\n If no location is given then all exchange asset movements are queried.\n If the user does not have premium then a limit is applied.\n May raise:\n - RemoteError: If there are problems connecting to any of the remote exchanges\n \"\"\"\n movements: List[AssetMovement] = []\n if location is not None:\n if location == Location.CRYPTOCOM:\n movements = self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=Location.CRYPTOCOM,\n )\n else:\n exchange = self.exchange_manager.get(str(location))\n if not exchange:\n logger.warning(\n f'Tried to query deposits/withdrawals from {location} which is either '\n f'not at exchange or not an exchange the user has connected to',\n )\n return []\n movements = self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=exchange,\n )\n else:\n # cryptocom has no exchange integration but we may have DB entries due to csv import\n movements = self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=Location.CRYPTOCOM,\n )\n for _, exchange in self.exchange_manager.connected_exchanges.items():\n self._query_exchange_asset_movements(\n from_ts=from_ts,\n to_ts=to_ts,\n all_movements=movements,\n exchange=exchange,\n )\n\n # return movements with most recent first\n movements.sort(key=lambda x: x.timestamp, reverse=True)\n return movements\n\n def set_settings(self, settings: ModifiableDBSettings) -> Tuple[bool, str]:\n \"\"\"Tries to set new settings. Returns True in success or False with message if error\"\"\"\n with self.lock:\n if settings.eth_rpc_endpoint is not None:\n result, msg = self.chain_manager.set_eth_rpc_endpoint(settings.eth_rpc_endpoint)\n if not result:\n return False, msg\n\n if settings.ksm_rpc_endpoint is not None:\n result, msg = self.chain_manager.set_ksm_rpc_endpoint(settings.ksm_rpc_endpoint)\n if not result:\n return False, msg\n\n if settings.kraken_account_type is not None:\n kraken = self.exchange_manager.get('kraken')\n if kraken:\n kraken.set_account_type(settings.kraken_account_type) # type: ignore\n\n if settings.btc_derivation_gap_limit is not None:\n self.chain_manager.btc_derivation_gap_limit = settings.btc_derivation_gap_limit\n\n if settings.current_price_oracles is not None:\n Inquirer().set_oracles_order(settings.current_price_oracles)\n\n if settings.historical_price_oracles is not None:\n PriceHistorian().set_oracles_order(settings.historical_price_oracles)\n\n self.data.db.set_settings(settings)\n return True, ''\n\n def get_settings(self) -> DBSettings:\n \"\"\"Returns the db settings with a check whether premium is active or not\"\"\"\n db_settings = self.data.db.get_settings(have_premium=self.premium is not None)\n return db_settings\n\n def setup_exchange(\n self,\n name: str,\n api_key: ApiKey,\n api_secret: ApiSecret,\n passphrase: Optional[str] = None,\n ) -> Tuple[bool, str]:\n \"\"\"\n Setup a new exchange with an api key and an api secret and optionally a passphrase\n\n By default the api keys are always validated unless validate is False.\n \"\"\"\n is_success, msg = self.exchange_manager.setup_exchange(\n name=name,\n api_key=api_key,\n api_secret=api_secret,\n database=self.data.db,\n passphrase=passphrase,\n )\n\n if is_success:\n # Success, save the result in the DB\n self.data.db.add_exchange(name, api_key, api_secret, passphrase=passphrase)\n return is_success, msg\n\n def remove_exchange(self, name: str) -> Tuple[bool, str]:\n if not self.exchange_manager.has_exchange(name):\n return False, 'Exchange {} is not registered'.format(name)\n\n self.exchange_manager.delete_exchange(name)\n # Success, remove it also from the DB\n self.data.db.remove_exchange(name)\n self.data.db.delete_used_query_range_for_exchange(name)\n return True, ''\n\n def query_periodic_data(self) -> Dict[str, Union[bool, Timestamp]]:\n \"\"\"Query for frequently changing data\"\"\"\n result: Dict[str, Union[bool, Timestamp]] = {}\n\n if self.user_is_logged_in:\n result['last_balance_save'] = self.data.db.get_last_balance_save_time()\n result['eth_node_connection'] = self.chain_manager.ethereum.web3_mapping.get(NodeName.OWN, None) is not None # noqa : E501\n result['last_data_upload_ts'] = Timestamp(self.premium_sync_manager.last_data_upload_ts) # noqa : E501\n return result\n\n def shutdown(self) -> None:\n self.logout()\n self.shutdown_event.set()\n\n def _connect_ksm_manager_on_startup(self) -> bool:\n return bool(self.data.db.get_blockchain_accounts().ksm)\n\n def create_oracle_cache(\n self,\n oracle: HistoricalPriceOracle,\n from_asset: Asset,\n to_asset: Asset,\n purge_old: bool,\n ) -> None:\n \"\"\"Creates the cache of the given asset pair from the start of time\n until now for the given oracle.\n\n if purge_old is true then any old cache in memory and in a file is purged\n\n May raise:\n - RemoteError if there is a problem reaching the oracle\n - UnsupportedAsset if any of the two assets is not supported by the oracle\n \"\"\"\n if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:\n return # only for cryptocompare for now\n\n self.cryptocompare.create_cache(from_asset, to_asset, purge_old)\n\n def delete_oracle_cache(\n self,\n oracle: HistoricalPriceOracle,\n from_asset: Asset,\n to_asset: Asset,\n ) -> None:\n if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:\n return # only for cryptocompare for now\n\n self.cryptocompare.delete_cache(from_asset, to_asset)\n\n def get_oracle_cache(self, oracle: HistoricalPriceOracle) -> List[Dict[str, Any]]:\n if oracle != HistoricalPriceOracle.CRYPTOCOMPARE:\n return [] # only for cryptocompare for now\n\n return self.cryptocompare.get_all_cache_data()\n",
"path": "rotkehlchen/rotkehlchen.py"
}
] | diff --git a/docs/changelog.rst b/docs/changelog.rst
index 30706e876f..1c6298db21 100755
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -2,6 +2,7 @@
Changelog
=========
+* :bug:`1928` rotki premium DB sync will now work after entering api keys for the first time even without a restart.
* :bug:`2294` Do not count MakerDAO Oasis proxy assets found by the DeFi SDK as it ends up double counting makerDAO vault deposits.
* :bug:`2287` Rotki encrypted DB upload for premium users should now respect the user setting.
diff --git a/rotkehlchen/rotkehlchen.py b/rotkehlchen/rotkehlchen.py
index 2b28883e13..f252820561 100755
--- a/rotkehlchen/rotkehlchen.py
+++ b/rotkehlchen/rotkehlchen.py
@@ -362,6 +362,7 @@ def set_premium_credentials(self, credentials: PremiumCredentials) -> None:
self.premium.set_credentials(credentials)
else:
self.premium = premium_create_and_verify(credentials)
+ self.premium_sync_manager.premium = self.premium
self.data.db.set_rotkehlchen_premium(credentials)
|
pymodbus-dev__pymodbus-1340 | Function code 0x15 fails because of incomplete receival of bytes
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* check the discussions forum https://github.com/riptideio/pymodbus/discussions
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python:3.10.7
* OS:Win 10
* Pymodbus:3.0.2
* Modbus Hardware (if used):USB to TTL(CH340)
### Pymodbus Specific
* Server: rtu - sync
* Client: rtu - sync
### Description
What were you trying, what has happened, what went wrong, and what did you expect?
We were trying to send many bytes data to slave by function code 0x15. Finally, we found that serial received data is incomplete, especially when the baud rate is low. In function recv of pymodbus/client/serial.py, when size if not None, serial does not wait data.
### Code and Logs
```python
# code and logs here.
from pymodbus.client import ModbusSerialClient as ModbusClient
from pymodbus.file_message import FileRecord, WriteFileRecordRequest
import os
import argparse
parser = argparse.ArgumentParser(description='Firmware update on MODBus RTU.')
parser.add_argument('-f',
action='store',
type=str,
metavar='',
dest='file',
help='Full path and filename of the file to be downloaded. '
'Example: -f C:\git\Firmware_Download\my_file.bin')
args = parser.parse_args()
client = ModbusClient(method='rtu',port ='COM8', parity = 'E', baudrate=9600, timeout=5)
client.connect()
file_len = os.path.getsize(args.file)
record_len = file_len // 2
record_len = record_len if record_len < 121 else 121
file = open(args.file, 'rb')
file_data = file.read(record_len * 2)
frame = [FileRecord(reference_type=0x06, file_number=0x01, record_number=0,
record_data=file_data, record_length=record_len)]
rq = WriteFileRecordRequest(records=frame, unit=1)
print(rq.encode())
rr = client.execute(rq)
client.close()
```
When I modify the recv in this way, there is no problem. I don't know if there will be any other problems with this change. I hope you can help me review. Thank you.
```python
def recv(self, size):
"""Read data from the underlying descriptor."""
super().recv(size)
if not self.socket:
raise ConnectionException(
self.__str__() # pylint: disable=unnecessary-dunder-call
)
if size is None:
size = self._wait_for_data()
elif size > self._in_waiting():
self._wait_for_data()
result = self.socket.read(size)
return result
```
| [
{
"content": "\"\"\"Modbus client async serial communication.\"\"\"\nimport asyncio\nimport time\nfrom functools import partial\n\nfrom pymodbus.client.base import ModbusBaseClient, ModbusClientProtocol\nfrom pymodbus.client.serial_asyncio import create_serial_connection\nfrom pymodbus.constants import Defaults\nfrom pymodbus.exceptions import ConnectionException\nfrom pymodbus.framer import ModbusFramer\nfrom pymodbus.framer.rtu_framer import ModbusRtuFramer\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import ModbusTransactionState\n\n\ntry:\n import serial\nexcept ImportError:\n pass\n\n\nclass AsyncModbusSerialClient(ModbusBaseClient):\n \"\"\"**AsyncModbusSerialClient**.\n\n :param port: Serial port used for communication.\n :param framer: (optional) Framer class.\n :param baudrate: (optional) Bits per second.\n :param bytesize: (optional) Number of bits per byte 7-8.\n :param parity: (optional) 'E'ven, 'O'dd or 'N'one\n :param stopbits: (optional) Number of stop bits 0-2¡.\n :param handle_local_echo: (optional) Discard local echo from dongle.\n :param kwargs: (optional) Experimental parameters\n\n The serial communication is RS-485 based, and usually used with a usb RS485 dongle.\n\n Example::\n\n from pymodbus.client import AsyncModbusSerialClient\n\n async def run():\n client = AsyncModbusSerialClient(\"dev/serial0\")\n\n await client.connect()\n ...\n await client.close()\n \"\"\"\n\n transport = None\n framer = None\n\n def __init__(\n self,\n port: str,\n framer: ModbusFramer = ModbusRtuFramer,\n baudrate: int = Defaults.Baudrate,\n bytesize: int = Defaults.Bytesize,\n parity: chr = Defaults.Parity,\n stopbits: int = Defaults.Stopbits,\n handle_local_echo: bool = Defaults.HandleLocalEcho,\n **kwargs: any,\n ) -> None:\n \"\"\"Initialize Asyncio Modbus Serial Client.\"\"\"\n self.protocol = None\n super().__init__(framer=framer, **kwargs)\n self.params.port = port\n self.params.baudrate = baudrate\n self.params.bytesize = bytesize\n self.params.parity = parity\n self.params.stopbits = stopbits\n self.params.handle_local_echo = handle_local_echo\n self.loop = None\n self._connected_event = asyncio.Event()\n self._reconnect_task = None\n\n async def close(self): # pylint: disable=invalid-overridden-method\n \"\"\"Stop connection.\"\"\"\n\n # prevent reconnect:\n self.delay_ms = 0\n if self.connected:\n if self.protocol.transport:\n self.protocol.transport.close()\n if self.protocol:\n await self.protocol.close()\n self.protocol = None\n await asyncio.sleep(0.1)\n\n # if there is an unfinished delayed reconnection attempt pending, cancel it\n if self._reconnect_task:\n self._reconnect_task.cancel()\n self._reconnect_task = None\n\n def _create_protocol(self):\n \"\"\"Create protocol.\"\"\"\n protocol = ModbusClientProtocol(\n framer=self.params.framer, xframer=self.framer, timeout=self.params.timeout\n )\n protocol.factory = self\n return protocol\n\n @property\n def connected(self):\n \"\"\"Connect internal.\"\"\"\n return self._connected_event.is_set()\n\n async def connect(self): # pylint: disable=invalid-overridden-method\n \"\"\"Connect Async client.\"\"\"\n # get current loop, if there are no loop a RuntimeError will be raised\n self.loop = asyncio.get_running_loop()\n\n Log.debug(\"Starting serial connection\")\n try:\n await create_serial_connection(\n self.loop,\n self._create_protocol,\n self.params.port,\n baudrate=self.params.baudrate,\n bytesize=self.params.bytesize,\n stopbits=self.params.stopbits,\n parity=self.params.parity,\n timeout=self.params.timeout,\n **self.params.kwargs,\n )\n await self._connected_event.wait()\n Log.info(\"Connected to {}\", self.params.port)\n except Exception as exc: # pylint: disable=broad-except\n Log.warning(\"Failed to connect: {}\", exc)\n if self.delay_ms > 0:\n self._launch_reconnect()\n return self.connected\n\n def protocol_made_connection(self, protocol):\n \"\"\"Notify successful connection.\"\"\"\n Log.info(\"Serial connected.\")\n if not self.connected:\n self._connected_event.set()\n self.protocol = protocol\n else:\n Log.error(\"Factory protocol connect callback called while connected.\")\n\n def protocol_lost_connection(self, protocol):\n \"\"\"Notify lost connection.\"\"\"\n Log.info(\"Serial lost connection.\")\n if protocol is not self.protocol:\n Log.error(\"Serial: protocol is not self.protocol.\")\n\n self._connected_event.clear()\n if self.protocol is not None:\n del self.protocol\n self.protocol = None\n if self.delay_ms:\n self._launch_reconnect()\n\n def _launch_reconnect(self):\n \"\"\"Launch delayed reconnection coroutine\"\"\"\n if self._reconnect_task:\n Log.warning(\n \"Ignoring launch of delayed reconnection, another is in progress\"\n )\n else:\n # store the future in a member variable so we know we have a pending reconnection attempt\n # also prevents its garbage collection\n self._reconnect_task = asyncio.create_task(self._reconnect())\n\n async def _reconnect(self):\n \"\"\"Reconnect.\"\"\"\n Log.debug(\"Waiting {} ms before next connection attempt.\", self.delay_ms)\n await asyncio.sleep(self.delay_ms / 1000)\n self.delay_ms = min(2 * self.delay_ms, self.params.reconnect_delay_max)\n\n self._reconnect_task = None\n return await self.connect()\n\n\nclass ModbusSerialClient(ModbusBaseClient):\n \"\"\"**ModbusSerialClient**.\n\n :param port: Serial port used for communication.\n :param framer: (optional) Framer class.\n :param baudrate: (optional) Bits per second.\n :param bytesize: (optional) Number of bits per byte 7-8.\n :param parity: (optional) 'E'ven, 'O'dd or 'N'one\n :param stopbits: (optional) Number of stop bits 0-2¡.\n :param handle_local_echo: (optional) Discard local echo from dongle.\n :param kwargs: (optional) Experimental parameters\n\n The serial communication is RS-485 based, and usually used with a usb RS485 dongle.\n\n Example::\n\n from pymodbus.client import ModbusSerialClient\n\n def run():\n client = ModbusSerialClient(\"dev/serial0\")\n\n client.connect()\n ...\n client.close()\n \"\"\"\n\n state = ModbusTransactionState.IDLE\n inter_char_timeout = 0\n silent_interval = 0\n\n def __init__(\n self,\n port: str,\n framer: ModbusFramer = ModbusRtuFramer,\n baudrate: int = Defaults.Baudrate,\n bytesize: int = Defaults.Bytesize,\n parity: chr = Defaults.Parity,\n stopbits: int = Defaults.Stopbits,\n handle_local_echo: bool = Defaults.HandleLocalEcho,\n **kwargs: any,\n ) -> None:\n \"\"\"Initialize Modbus Serial Client.\"\"\"\n super().__init__(framer=framer, **kwargs)\n self.params.port = port\n self.params.baudrate = baudrate\n self.params.bytesize = bytesize\n self.params.parity = parity\n self.params.stopbits = stopbits\n self.params.handle_local_echo = handle_local_echo\n self.socket = None\n\n self.last_frame_end = None\n if isinstance(self.framer, ModbusRtuFramer):\n if self.params.baudrate > 19200:\n self.silent_interval = 1.75 / 1000 # ms\n else:\n self._t0 = float((1 + 8 + 2)) / self.params.baudrate\n self.inter_char_timeout = 1.5 * self._t0\n self.silent_interval = 3.5 * self._t0\n self.silent_interval = round(self.silent_interval, 6)\n\n @property\n def connected(self):\n \"\"\"Connect internal.\"\"\"\n return self.connect()\n\n def connect(self):\n \"\"\"Connect to the modbus serial server.\"\"\"\n if self.socket:\n return True\n try:\n self.socket = serial.serial_for_url(\n self.params.port,\n timeout=self.params.timeout,\n bytesize=self.params.bytesize,\n stopbits=self.params.stopbits,\n baudrate=self.params.baudrate,\n parity=self.params.parity,\n )\n if isinstance(self.framer, ModbusRtuFramer):\n if self.params.strict:\n self.socket.interCharTimeout = self.inter_char_timeout\n self.last_frame_end = None\n except serial.SerialException as msg:\n Log.error(\"{}\", msg)\n self.close()\n return self.socket is not None\n\n def close(self):\n \"\"\"Close the underlying socket connection.\"\"\"\n if self.socket:\n self.socket.close()\n self.socket = None\n\n def _in_waiting(self):\n \"\"\"Return _in_waiting.\"\"\"\n in_waiting = \"in_waiting\" if hasattr(self.socket, \"in_waiting\") else \"inWaiting\"\n\n if in_waiting == \"in_waiting\":\n waitingbytes = getattr(self.socket, in_waiting)\n else:\n waitingbytes = getattr(self.socket, in_waiting)()\n return waitingbytes\n\n def send(self, request):\n \"\"\"Send data on the underlying socket.\n\n If receive buffer still holds some data then flush it.\n\n Sleep if last send finished less than 3.5 character times ago.\n \"\"\"\n super().send(request)\n if not self.socket:\n raise ConnectionException(str(self))\n if request:\n try:\n if waitingbytes := self._in_waiting():\n result = self.socket.read(waitingbytes)\n if self.state == ModbusTransactionState.RETRYING:\n Log.debug(\n \"Sending available data in recv buffer {}\", result, \":hex\"\n )\n return result\n Log.warning(\"Cleanup recv buffer before send: {}\", result, \":hex\")\n except NotImplementedError:\n pass\n if self.state != ModbusTransactionState.SENDING:\n Log.debug('New Transaction state \"SENDING\"')\n self.state = ModbusTransactionState.SENDING\n size = self.socket.write(request)\n return size\n return 0\n\n def _wait_for_data(self):\n \"\"\"Wait for data.\"\"\"\n size = 0\n more_data = False\n if self.params.timeout is not None and self.params.timeout:\n condition = partial(\n lambda start, timeout: (time.time() - start) <= timeout,\n timeout=self.params.timeout,\n )\n else:\n condition = partial(lambda dummy1, dummy2: True, dummy2=None)\n start = time.time()\n while condition(start):\n available = self._in_waiting()\n if (more_data and not available) or (more_data and available == size):\n break\n if available and available != size:\n more_data = True\n size = available\n time.sleep(0.01)\n return size\n\n def recv(self, size):\n \"\"\"Read data from the underlying descriptor.\"\"\"\n super().recv(size)\n if not self.socket:\n raise ConnectionException(\n self.__str__() # pylint: disable=unnecessary-dunder-call\n )\n if size is None:\n size = self._wait_for_data()\n elif size > self._in_waiting():\n self._wait_for_data()\n result = self.socket.read(size)\n return result\n\n def is_socket_open(self):\n \"\"\"Check if socket is open.\"\"\"\n if self.socket:\n if hasattr(self.socket, \"is_open\"):\n return self.socket.is_open\n return self.socket.isOpen()\n return False\n\n def __str__(self):\n \"\"\"Build a string representation of the connection.\"\"\"\n return f\"ModbusSerialClient({self.framer} baud[{self.params.baudrate}])\"\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return (\n f\"<{self.__class__.__name__} at {hex(id(self))} socket={self.socket}, \"\n f\"framer={self.framer}, timeout={self.params.timeout}>\"\n )\n",
"path": "pymodbus/client/serial.py"
}
] | [
{
"content": "\"\"\"Modbus client async serial communication.\"\"\"\nimport asyncio\nimport time\nfrom functools import partial\n\nfrom pymodbus.client.base import ModbusBaseClient, ModbusClientProtocol\nfrom pymodbus.client.serial_asyncio import create_serial_connection\nfrom pymodbus.constants import Defaults\nfrom pymodbus.exceptions import ConnectionException\nfrom pymodbus.framer import ModbusFramer\nfrom pymodbus.framer.rtu_framer import ModbusRtuFramer\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import ModbusTransactionState\n\n\ntry:\n import serial\nexcept ImportError:\n pass\n\n\nclass AsyncModbusSerialClient(ModbusBaseClient):\n \"\"\"**AsyncModbusSerialClient**.\n\n :param port: Serial port used for communication.\n :param framer: (optional) Framer class.\n :param baudrate: (optional) Bits per second.\n :param bytesize: (optional) Number of bits per byte 7-8.\n :param parity: (optional) 'E'ven, 'O'dd or 'N'one\n :param stopbits: (optional) Number of stop bits 0-2¡.\n :param handle_local_echo: (optional) Discard local echo from dongle.\n :param kwargs: (optional) Experimental parameters\n\n The serial communication is RS-485 based, and usually used with a usb RS485 dongle.\n\n Example::\n\n from pymodbus.client import AsyncModbusSerialClient\n\n async def run():\n client = AsyncModbusSerialClient(\"dev/serial0\")\n\n await client.connect()\n ...\n await client.close()\n \"\"\"\n\n transport = None\n framer = None\n\n def __init__(\n self,\n port: str,\n framer: ModbusFramer = ModbusRtuFramer,\n baudrate: int = Defaults.Baudrate,\n bytesize: int = Defaults.Bytesize,\n parity: chr = Defaults.Parity,\n stopbits: int = Defaults.Stopbits,\n handle_local_echo: bool = Defaults.HandleLocalEcho,\n **kwargs: any,\n ) -> None:\n \"\"\"Initialize Asyncio Modbus Serial Client.\"\"\"\n self.protocol = None\n super().__init__(framer=framer, **kwargs)\n self.params.port = port\n self.params.baudrate = baudrate\n self.params.bytesize = bytesize\n self.params.parity = parity\n self.params.stopbits = stopbits\n self.params.handle_local_echo = handle_local_echo\n self.loop = None\n self._connected_event = asyncio.Event()\n self._reconnect_task = None\n\n async def close(self): # pylint: disable=invalid-overridden-method\n \"\"\"Stop connection.\"\"\"\n\n # prevent reconnect:\n self.delay_ms = 0\n if self.connected:\n if self.protocol.transport:\n self.protocol.transport.close()\n if self.protocol:\n await self.protocol.close()\n self.protocol = None\n await asyncio.sleep(0.1)\n\n # if there is an unfinished delayed reconnection attempt pending, cancel it\n if self._reconnect_task:\n self._reconnect_task.cancel()\n self._reconnect_task = None\n\n def _create_protocol(self):\n \"\"\"Create protocol.\"\"\"\n protocol = ModbusClientProtocol(\n framer=self.params.framer, xframer=self.framer, timeout=self.params.timeout\n )\n protocol.factory = self\n return protocol\n\n @property\n def connected(self):\n \"\"\"Connect internal.\"\"\"\n return self._connected_event.is_set()\n\n async def connect(self): # pylint: disable=invalid-overridden-method\n \"\"\"Connect Async client.\"\"\"\n # get current loop, if there are no loop a RuntimeError will be raised\n self.loop = asyncio.get_running_loop()\n\n Log.debug(\"Starting serial connection\")\n try:\n await create_serial_connection(\n self.loop,\n self._create_protocol,\n self.params.port,\n baudrate=self.params.baudrate,\n bytesize=self.params.bytesize,\n stopbits=self.params.stopbits,\n parity=self.params.parity,\n timeout=self.params.timeout,\n **self.params.kwargs,\n )\n await self._connected_event.wait()\n Log.info(\"Connected to {}\", self.params.port)\n except Exception as exc: # pylint: disable=broad-except\n Log.warning(\"Failed to connect: {}\", exc)\n if self.delay_ms > 0:\n self._launch_reconnect()\n return self.connected\n\n def protocol_made_connection(self, protocol):\n \"\"\"Notify successful connection.\"\"\"\n Log.info(\"Serial connected.\")\n if not self.connected:\n self._connected_event.set()\n self.protocol = protocol\n else:\n Log.error(\"Factory protocol connect callback called while connected.\")\n\n def protocol_lost_connection(self, protocol):\n \"\"\"Notify lost connection.\"\"\"\n Log.info(\"Serial lost connection.\")\n if protocol is not self.protocol:\n Log.error(\"Serial: protocol is not self.protocol.\")\n\n self._connected_event.clear()\n if self.protocol is not None:\n del self.protocol\n self.protocol = None\n if self.delay_ms:\n self._launch_reconnect()\n\n def _launch_reconnect(self):\n \"\"\"Launch delayed reconnection coroutine\"\"\"\n if self._reconnect_task:\n Log.warning(\n \"Ignoring launch of delayed reconnection, another is in progress\"\n )\n else:\n # store the future in a member variable so we know we have a pending reconnection attempt\n # also prevents its garbage collection\n self._reconnect_task = asyncio.create_task(self._reconnect())\n\n async def _reconnect(self):\n \"\"\"Reconnect.\"\"\"\n Log.debug(\"Waiting {} ms before next connection attempt.\", self.delay_ms)\n await asyncio.sleep(self.delay_ms / 1000)\n self.delay_ms = min(2 * self.delay_ms, self.params.reconnect_delay_max)\n\n self._reconnect_task = None\n return await self.connect()\n\n\nclass ModbusSerialClient(ModbusBaseClient):\n \"\"\"**ModbusSerialClient**.\n\n :param port: Serial port used for communication.\n :param framer: (optional) Framer class.\n :param baudrate: (optional) Bits per second.\n :param bytesize: (optional) Number of bits per byte 7-8.\n :param parity: (optional) 'E'ven, 'O'dd or 'N'one\n :param stopbits: (optional) Number of stop bits 0-2¡.\n :param handle_local_echo: (optional) Discard local echo from dongle.\n :param kwargs: (optional) Experimental parameters\n\n The serial communication is RS-485 based, and usually used with a usb RS485 dongle.\n\n Example::\n\n from pymodbus.client import ModbusSerialClient\n\n def run():\n client = ModbusSerialClient(\"dev/serial0\")\n\n client.connect()\n ...\n client.close()\n \"\"\"\n\n state = ModbusTransactionState.IDLE\n inter_char_timeout = 0\n silent_interval = 0\n\n def __init__(\n self,\n port: str,\n framer: ModbusFramer = ModbusRtuFramer,\n baudrate: int = Defaults.Baudrate,\n bytesize: int = Defaults.Bytesize,\n parity: chr = Defaults.Parity,\n stopbits: int = Defaults.Stopbits,\n handle_local_echo: bool = Defaults.HandleLocalEcho,\n **kwargs: any,\n ) -> None:\n \"\"\"Initialize Modbus Serial Client.\"\"\"\n super().__init__(framer=framer, **kwargs)\n self.params.port = port\n self.params.baudrate = baudrate\n self.params.bytesize = bytesize\n self.params.parity = parity\n self.params.stopbits = stopbits\n self.params.handle_local_echo = handle_local_echo\n self.socket = None\n\n self.last_frame_end = None\n if isinstance(self.framer, ModbusRtuFramer):\n if self.params.baudrate > 19200:\n self.silent_interval = 1.75 / 1000 # ms\n else:\n self._t0 = float((1 + 8 + 2)) / self.params.baudrate\n self.inter_char_timeout = 1.5 * self._t0\n self.silent_interval = 3.5 * self._t0\n self.silent_interval = round(self.silent_interval, 6)\n\n @property\n def connected(self):\n \"\"\"Connect internal.\"\"\"\n return self.connect()\n\n def connect(self):\n \"\"\"Connect to the modbus serial server.\"\"\"\n if self.socket:\n return True\n try:\n self.socket = serial.serial_for_url(\n self.params.port,\n timeout=self.params.timeout,\n bytesize=self.params.bytesize,\n stopbits=self.params.stopbits,\n baudrate=self.params.baudrate,\n parity=self.params.parity,\n )\n if isinstance(self.framer, ModbusRtuFramer):\n if self.params.strict:\n self.socket.interCharTimeout = self.inter_char_timeout\n self.last_frame_end = None\n except serial.SerialException as msg:\n Log.error(\"{}\", msg)\n self.close()\n return self.socket is not None\n\n def close(self):\n \"\"\"Close the underlying socket connection.\"\"\"\n if self.socket:\n self.socket.close()\n self.socket = None\n\n def _in_waiting(self):\n \"\"\"Return _in_waiting.\"\"\"\n in_waiting = \"in_waiting\" if hasattr(self.socket, \"in_waiting\") else \"inWaiting\"\n\n if in_waiting == \"in_waiting\":\n waitingbytes = getattr(self.socket, in_waiting)\n else:\n waitingbytes = getattr(self.socket, in_waiting)()\n return waitingbytes\n\n def send(self, request):\n \"\"\"Send data on the underlying socket.\n\n If receive buffer still holds some data then flush it.\n\n Sleep if last send finished less than 3.5 character times ago.\n \"\"\"\n super().send(request)\n if not self.socket:\n raise ConnectionException(str(self))\n if request:\n try:\n if waitingbytes := self._in_waiting():\n result = self.socket.read(waitingbytes)\n if self.state == ModbusTransactionState.RETRYING:\n Log.debug(\n \"Sending available data in recv buffer {}\", result, \":hex\"\n )\n return result\n Log.warning(\"Cleanup recv buffer before send: {}\", result, \":hex\")\n except NotImplementedError:\n pass\n if self.state != ModbusTransactionState.SENDING:\n Log.debug('New Transaction state \"SENDING\"')\n self.state = ModbusTransactionState.SENDING\n size = self.socket.write(request)\n return size\n return 0\n\n def _wait_for_data(self):\n \"\"\"Wait for data.\"\"\"\n size = 0\n more_data = False\n if self.params.timeout is not None and self.params.timeout:\n condition = partial(\n lambda start, timeout: (time.time() - start) <= timeout,\n timeout=self.params.timeout,\n )\n else:\n condition = partial(lambda dummy1, dummy2: True, dummy2=None)\n start = time.time()\n while condition(start):\n available = self._in_waiting()\n if (more_data and not available) or (more_data and available == size):\n break\n if available and available != size:\n more_data = True\n size = available\n time.sleep(0.01)\n return size\n\n def recv(self, size):\n \"\"\"Read data from the underlying descriptor.\"\"\"\n super().recv(size)\n if not self.socket:\n raise ConnectionException(\n self.__str__() # pylint: disable=unnecessary-dunder-call\n )\n if size is None:\n size = self._wait_for_data()\n if size > self._in_waiting():\n size = self._wait_for_data()\n result = self.socket.read(size)\n return result\n\n def is_socket_open(self):\n \"\"\"Check if socket is open.\"\"\"\n if self.socket:\n if hasattr(self.socket, \"is_open\"):\n return self.socket.is_open\n return self.socket.isOpen()\n return False\n\n def __str__(self):\n \"\"\"Build a string representation of the connection.\"\"\"\n return f\"ModbusSerialClient({self.framer} baud[{self.params.baudrate}])\"\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return (\n f\"<{self.__class__.__name__} at {hex(id(self))} socket={self.socket}, \"\n f\"framer={self.framer}, timeout={self.params.timeout}>\"\n )\n",
"path": "pymodbus/client/serial.py"
}
] | diff --git a/pymodbus/client/serial.py b/pymodbus/client/serial.py
index 69c800380..4fc102615 100644
--- a/pymodbus/client/serial.py
+++ b/pymodbus/client/serial.py
@@ -336,8 +336,8 @@ def recv(self, size):
)
if size is None:
size = self._wait_for_data()
- elif size > self._in_waiting():
- self._wait_for_data()
+ if size > self._in_waiting():
+ size = self._wait_for_data()
result = self.socket.read(size)
return result
diff --git a/test/test_client_sync.py b/test/test_client_sync.py
index 3c6845212..476075f36 100755
--- a/test/test_client_sync.py
+++ b/test/test_client_sync.py
@@ -388,7 +388,7 @@ def test_basic_sync_serial_client(self, mock_serial):
self.assertEqual(0, client.send(None))
client.state = 0
self.assertEqual(1, client.send(b"\x00"))
- self.assertEqual(b"\x00", client.recv(1))
+ self.assertEqual(b"", client.recv(1))
# connect/disconnect
self.assertTrue(client.connect())
|
bookwyrm-social__bookwyrm-1341 | Cannot make other users admin on the website
**Describe the bug**
For the moment, there is no way to promote an user to be an admin. One has to do it in the "./bw-dev shell"
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Admin' and then the page of the user you want to promote
2. Promote the user and save
3. The "promoted user" logins in
4. Nope, not promoted
**Expected behavior**
The logged in promoted user should see the admin panel.
| [
{
"content": "\"\"\" manage user \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdminList(View):\n \"\"\"admin view of users on this server\"\"\"\n\n def get(self, request):\n \"\"\"list of users\"\"\"\n filters = {}\n server = request.GET.get(\"server\")\n if server:\n server = models.FederatedServer.objects.filter(server_name=server).first()\n filters[\"federated_server\"] = server\n filters[\"federated_server__isnull\"] = False\n username = request.GET.get(\"username\")\n if username:\n filters[\"username__icontains\"] = username\n scope = request.GET.get(\"scope\")\n if scope:\n filters[\"local\"] = scope == \"local\"\n\n users = models.User.objects.filter(**filters)\n\n sort = request.GET.get(\"sort\", \"-created_date\")\n sort_fields = [\n \"created_date\",\n \"last_active_date\",\n \"username\",\n \"federated_server__server_name\",\n \"is_active\",\n ]\n if sort in sort_fields + [\"-{:s}\".format(f) for f in sort_fields]:\n users = users.order_by(sort)\n\n paginated = Paginator(users, PAGE_LENGTH)\n data = {\n \"users\": paginated.get_page(request.GET.get(\"page\")),\n \"sort\": sort,\n \"server\": server,\n }\n return TemplateResponse(request, \"user_admin/user_admin.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdmin(View):\n \"\"\"moderate an individual user\"\"\"\n\n def get(self, request, user):\n \"\"\"user view\"\"\"\n user = get_object_or_404(models.User, id=user)\n data = {\"user\": user, \"group_form\": forms.UserGroupForm()}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n def post(self, request, user):\n \"\"\"update user group\"\"\"\n user = get_object_or_404(models.User, id=user)\n form = forms.UserGroupForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n data = {\"user\": user, \"group_form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n",
"path": "bookwyrm/views/user_admin.py"
}
] | [
{
"content": "\"\"\" manage user \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_user\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdminList(View):\n \"\"\"admin view of users on this server\"\"\"\n\n def get(self, request):\n \"\"\"list of users\"\"\"\n filters = {}\n server = request.GET.get(\"server\")\n if server:\n server = models.FederatedServer.objects.filter(server_name=server).first()\n filters[\"federated_server\"] = server\n filters[\"federated_server__isnull\"] = False\n username = request.GET.get(\"username\")\n if username:\n filters[\"username__icontains\"] = username\n scope = request.GET.get(\"scope\")\n if scope:\n filters[\"local\"] = scope == \"local\"\n\n users = models.User.objects.filter(**filters)\n\n sort = request.GET.get(\"sort\", \"-created_date\")\n sort_fields = [\n \"created_date\",\n \"last_active_date\",\n \"username\",\n \"federated_server__server_name\",\n \"is_active\",\n ]\n if sort in sort_fields + [\"-{:s}\".format(f) for f in sort_fields]:\n users = users.order_by(sort)\n\n paginated = Paginator(users, PAGE_LENGTH)\n data = {\n \"users\": paginated.get_page(request.GET.get(\"page\")),\n \"sort\": sort,\n \"server\": server,\n }\n return TemplateResponse(request, \"user_admin/user_admin.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.moderate_users\", raise_exception=True),\n name=\"dispatch\",\n)\nclass UserAdmin(View):\n \"\"\"moderate an individual user\"\"\"\n\n def get(self, request, user):\n \"\"\"user view\"\"\"\n user = get_object_or_404(models.User, id=user)\n data = {\"user\": user, \"group_form\": forms.UserGroupForm()}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n\n def post(self, request, user):\n \"\"\"update user group\"\"\"\n user = get_object_or_404(models.User, id=user)\n form = forms.UserGroupForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n data = {\"user\": user, \"group_form\": form}\n return TemplateResponse(request, \"user_admin/user.html\", data)\n",
"path": "bookwyrm/views/user_admin.py"
}
] | diff --git a/bookwyrm/templates/layout.html b/bookwyrm/templates/layout.html
index 2b8364ec9f..43ca81c743 100644
--- a/bookwyrm/templates/layout.html
+++ b/bookwyrm/templates/layout.html
@@ -109,17 +109,17 @@
{% trans 'Settings' %}
</a>
</li>
- {% if perms.bookwyrm.create_invites or perms.moderate_users %}
+ {% if perms.bookwyrm.create_invites or perms.moderate_user %}
<li class="navbar-divider" role="presentation"></li>
{% endif %}
- {% if perms.bookwyrm.create_invites %}
+ {% if perms.bookwyrm.create_invites and not site.allow_registration %}
<li>
<a href="{% url 'settings-invite-requests' %}" class="navbar-item">
{% trans 'Invites' %}
</a>
</li>
{% endif %}
- {% if perms.bookwyrm.moderate_users %}
+ {% if perms.bookwyrm.moderate_user %}
<li>
<a href="{% url 'settings-users' %}" class="navbar-item">
{% trans 'Admin' %}
diff --git a/bookwyrm/templates/settings/admin_layout.html b/bookwyrm/templates/settings/admin_layout.html
index 9e57076bf6..6d65164766 100644
--- a/bookwyrm/templates/settings/admin_layout.html
+++ b/bookwyrm/templates/settings/admin_layout.html
@@ -21,23 +21,29 @@ <h1 class="title">{% block header %}{% endblock %}</h1>
{% if perms.bookwyrm.create_invites %}
<h2 class="menu-label">{% trans "Manage Users" %}</h2>
<ul class="menu-list">
+ {% if perms.bookwyrm.moderate_user %}
<li>
{% url 'settings-users' as url %}
<a href="{{ url }}"{% if url in request.path %} class="is-active" aria-selected="true"{% endif %}>{% trans "Users" %}</a>
</li>
+ {% endif %}
<li>
{% url 'settings-invite-requests' as url %}
{% url 'settings-invites' as alt_url %}
<a href="{{ url }}"{% if url in request.path or request.path in alt_url %} class="is-active" aria-selected="true"{% endif %}>{% trans "Invites" %}</a>
</li>
+ {% if perms.bookwyrm.moderate_user %}
<li>
{% url 'settings-reports' as url %}
<a href="{{ url }}"{% if url in request.path %} class="is-active" aria-selected="true"{% endif %}>{% trans "Reports" %}</a>
</li>
+ {% endif %}
+ {% if perms.bookwyrm.control_federation %}
<li>
{% url 'settings-federation' as url %}
<a href="{{ url }}"{% if url in request.path %} class="is-active" aria-selected="true"{% endif %}>{% trans "Federated Instances" %}</a>
</li>
+ {% endif %}
</ul>
{% endif %}
{% if perms.bookwyrm.edit_instance_settings %}
diff --git a/bookwyrm/views/user_admin.py b/bookwyrm/views/user_admin.py
index 7cfefb0f4a..3a9ea33922 100644
--- a/bookwyrm/views/user_admin.py
+++ b/bookwyrm/views/user_admin.py
@@ -13,7 +13,7 @@
# pylint: disable= no-self-use
@method_decorator(login_required, name="dispatch")
@method_decorator(
- permission_required("bookwyrm.moderate_users", raise_exception=True),
+ permission_required("bookwyrm.moderate_user", raise_exception=True),
name="dispatch",
)
class UserAdminList(View):
|
nvaccess__nvda-6776 | Manual update check doesn't always respect progress bar output config
gui.IndeterminateProgressDialog.done() doesn't check the configured output method for progress bars.
Also slight ugliness in the hard-coded frequencies and lengths of the beeps. Maybe add functions beepIndeterminate() and beepDone() to the tones module?
| [
{
"content": "# -*- coding: UTF-8 -*-\r\n#gui/__init__.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2015 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Mesar Hameed, Joseph Lee\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport time\r\nimport os\r\nimport sys\r\nimport threading\r\nimport codecs\r\nimport ctypes\r\nimport weakref\r\nimport wx\r\nimport globalVars\r\nimport tones\r\nimport ui\r\nfrom logHandler import log\r\nimport config\r\nimport versionInfo\r\nimport speech\r\nimport queueHandler\r\nimport core\r\nfrom settingsDialogs import *\r\nimport speechDictHandler\r\nimport languageHandler\r\nimport logViewer\r\nimport speechViewer\r\nimport winUser\r\nimport api\r\nimport guiHelper\r\n\r\ntry:\r\n\timport updateCheck\r\nexcept RuntimeError:\r\n\tupdateCheck = None\r\n\r\n### Constants\r\nNVDA_PATH = os.getcwdu()\r\nICON_PATH=os.path.join(NVDA_PATH, \"images\", \"nvda.ico\")\r\nDONATE_URL = \"http://www.nvaccess.org/donate/\"\r\n\r\n### Globals\r\nmainFrame = None\r\nisInMessageBox = False\r\n\r\ndef getDocFilePath(fileName, localized=True):\r\n\tif not getDocFilePath.rootPath:\r\n\t\tif hasattr(sys, \"frozen\"):\r\n\t\t\tgetDocFilePath.rootPath = os.path.join(NVDA_PATH, \"documentation\")\r\n\t\telse:\r\n\t\t\tgetDocFilePath.rootPath = os.path.abspath(os.path.join(\"..\", \"user_docs\"))\r\n\r\n\tif localized:\r\n\t\tlang = languageHandler.getLanguage()\r\n\t\ttryLangs = [lang]\r\n\t\tif \"_\" in lang:\r\n\t\t\t# This locale has a sub-locale, but documentation might not exist for the sub-locale, so try stripping it.\r\n\t\t\ttryLangs.append(lang.split(\"_\")[0])\r\n\t\t# If all else fails, use English.\r\n\t\ttryLangs.append(\"en\")\r\n\r\n\t\tfileName, fileExt = os.path.splitext(fileName)\r\n\t\tfor tryLang in tryLangs:\r\n\t\t\ttryDir = os.path.join(getDocFilePath.rootPath, tryLang)\r\n\t\t\tif not os.path.isdir(tryDir):\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# Some out of date translations might include .txt files which are now .html files in newer translations.\r\n\t\t\t# Therefore, ignore the extension and try both .html and .txt.\r\n\t\t\tfor tryExt in (\"html\", \"txt\"):\r\n\t\t\t\ttryPath = os.path.join(tryDir, \"%s.%s\" % (fileName, tryExt))\r\n\t\t\t\tif os.path.isfile(tryPath):\r\n\t\t\t\t\treturn tryPath\r\n\r\n\telse:\r\n\t\t# Not localized.\r\n\t\tif not hasattr(sys, \"frozen\") and fileName in (\"copying.txt\", \"contributors.txt\"):\r\n\t\t\t# If running from source, these two files are in the root dir.\r\n\t\t\treturn os.path.join(NVDA_PATH, \"..\", fileName)\r\n\t\telse:\r\n\t\t\treturn os.path.join(getDocFilePath.rootPath, fileName)\r\ngetDocFilePath.rootPath = None\r\n\r\nclass MainFrame(wx.Frame):\r\n\r\n\tdef __init__(self):\r\n\t\tstyle = wx.DEFAULT_FRAME_STYLE ^ wx.MAXIMIZE_BOX ^ wx.MINIMIZE_BOX | wx.FRAME_NO_TASKBAR\r\n\t\tsuper(MainFrame, self).__init__(None, wx.ID_ANY, versionInfo.name, size=(1,1), style=style)\r\n\t\tself.Bind(wx.EVT_CLOSE, self.onExitCommand)\r\n\t\tself.sysTrayIcon = SysTrayIcon(self)\r\n\t\t#: The focus before the last popup or C{None} if unknown.\r\n\t\t#: This is only valid before L{prePopup} is called,\r\n\t\t#: so it should be used as early as possible in any popup that needs it.\r\n\t\t#: @type: L{NVDAObject}\r\n\t\tself.prevFocus = None\r\n\t\t#: The focus ancestors before the last popup or C{None} if unknown.\r\n\t\t#: @type: list of L{NVDAObject}\r\n\t\tself.prevFocusAncestors = None\r\n\t\t# If NVDA has the uiAccess privilege, it can always set the foreground window.\r\n\t\tif not config.hasUiAccess():\r\n\t\t\t# This makes Windows return to the previous foreground window and also seems to allow NVDA to be brought to the foreground.\r\n\t\t\tself.Show()\r\n\t\t\tself.Hide()\r\n\t\t\tif winUser.isWindowVisible(self.Handle):\r\n\t\t\t\t# HACK: Work around a wx bug where Hide() doesn't actually hide the window,\r\n\t\t\t\t# but IsShown() returns False and Hide() again doesn't fix it.\r\n\t\t\t\t# This seems to happen if the call takes too long.\r\n\t\t\t\tself.Show()\r\n\t\t\t\tself.Hide()\r\n\r\n\tdef Destroy(self):\r\n\t\tself.sysTrayIcon.Destroy()\r\n\t\tsuper(MainFrame, self).Destroy()\r\n\r\n\tdef prePopup(self):\r\n\t\t\"\"\"Prepare for a popup.\r\n\t\tThis should be called before any dialog or menu which should pop up for the user.\r\n\t\tL{postPopup} should be called after the dialog or menu has been shown.\r\n\t\t@postcondition: A dialog or menu may be shown.\r\n\t\t\"\"\"\r\n\t\tnvdaPid = os.getpid()\r\n\t\tfocus = api.getFocusObject()\r\n\t\tif focus.processID != nvdaPid:\r\n\t\t\tself.prevFocus = focus\r\n\t\t\tself.prevFocusAncestors = api.getFocusAncestors()\r\n\t\tif winUser.getWindowThreadProcessID(winUser.getForegroundWindow())[0] != nvdaPid:\r\n\t\t\t# This process is not the foreground process, so bring it to the foreground.\r\n\t\t\tself.Raise()\r\n\r\n\tdef postPopup(self):\r\n\t\t\"\"\"Clean up after a popup dialog or menu.\r\n\t\tThis should be called after a dialog or menu was popped up for the user.\r\n\t\t\"\"\"\r\n\t\tself.prevFocus = None\r\n\t\tself.prevFocusAncestors = None\r\n\t\tif not winUser.isWindowVisible(winUser.getForegroundWindow()):\r\n\t\t\t# The current foreground window is invisible, so we want to return to the previous foreground window.\r\n\t\t\t# Showing and hiding our main window seems to achieve this.\r\n\t\t\tself.Show()\r\n\t\t\tself.Hide()\r\n\r\n\tdef showGui(self):\r\n\t\t# The menu pops up at the location of the mouse, which means it pops up at an unpredictable location.\r\n\t\t# Therefore, move the mouse to the centre of the screen so that the menu will always pop up there.\r\n\t\tleft, top, width, height = api.getDesktopObject().location\r\n\t\tx = width / 2\r\n\t\ty = height / 2\r\n\t\twinUser.setCursorPos(x, y)\r\n\t\tself.sysTrayIcon.onActivate(None)\r\n\r\n\tdef onRevertToSavedConfigurationCommand(self,evt):\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration)\r\n\t\t# Translators: Reported when last saved configuration has been applied by using revert to saved configuration option in NVDA menu.\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Configuration applied\"))\r\n\r\n\tdef onRevertToDefaultConfigurationCommand(self,evt):\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration,factoryDefaults=True)\r\n\t\t# Translators: Reported when configuration has been restored to defaults by using restore configuration to factory defaults item in NVDA menu.\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Configuration restored to factory defaults\"))\r\n\r\n\tdef onSaveConfigurationCommand(self,evt):\r\n\t\tif globalVars.appArgs.secure:\r\n\t\t\t# Translators: Reported when current configuration cannot be saved while NVDA is running in secure mode such as in Windows login screen.\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Cannot save configuration - NVDA in secure mode\"))\r\n\t\t\treturn\r\n\t\ttry:\r\n\t\t\tconfig.conf.save()\r\n\t\t\t# Translators: Reported when current configuration has been saved.\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Configuration saved\"))\r\n\t\texcept:\r\n\t\t\t# Translators: Message shown when current configuration cannot be saved such as when running NVDA from a CD.\r\n\t\t\tmessageBox(_(\"Could not save configuration - probably read only file system\"),_(\"Error\"),wx.OK | wx.ICON_ERROR)\r\n\r\n\tdef _popupSettingsDialog(self, dialog, *args, **kwargs):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\ttry:\r\n\t\t\tdialog(self, *args, **kwargs).Show()\r\n\t\texcept SettingsDialog.MultiInstanceError:\r\n\t\t\t# Translators: Message shown when attempting to open another NVDA settings dialog when one is already open (example: when trying to open keyboard settings when general settings dialog is open).\r\n\t\t\tmessageBox(_(\"An NVDA settings dialog is already open. Please close it first.\"),_(\"Error\"),style=wx.OK | wx.ICON_ERROR)\r\n\t\tself.postPopup()\r\n\r\n\tdef onDefaultDictionaryCommand(self,evt):\r\n\t\t# Translators: Title for default speech dictionary dialog.\r\n\t\tself._popupSettingsDialog(DictionaryDialog,_(\"Default dictionary\"),speechDictHandler.dictionaries[\"default\"])\r\n\r\n\tdef onVoiceDictionaryCommand(self,evt):\r\n\t\t# Translators: Title for voice dictionary for the current voice such as current eSpeak variant.\r\n\t\tself._popupSettingsDialog(DictionaryDialog,_(\"Voice dictionary (%s)\")%speechDictHandler.dictionaries[\"voice\"].fileName,speechDictHandler.dictionaries[\"voice\"])\r\n\r\n\tdef onTemporaryDictionaryCommand(self,evt):\r\n\t\t# Translators: Title for temporary speech dictionary dialog (the voice dictionary that is active as long as NvDA is running).\r\n\t\tself._popupSettingsDialog(DictionaryDialog,_(\"Temporary dictionary\"),speechDictHandler.dictionaries[\"temp\"])\r\n\r\n\tdef onExitCommand(self, evt):\r\n\t\tif config.conf[\"general\"][\"askToExit\"]:\r\n\t\t\tself.prePopup()\r\n\t\t\td = ExitDialog(self)\r\n\t\t\td.Raise()\r\n\t\t\td.Show()\r\n\t\t\tself.postPopup()\r\n\t\telse:\r\n\t\t\twx.GetApp().ExitMainLoop()\r\n\r\n\tdef onGeneralSettingsCommand(self,evt):\r\n\t\tself._popupSettingsDialog(GeneralSettingsDialog)\r\n\r\n\tdef onSynthesizerCommand(self,evt):\r\n\t\tself._popupSettingsDialog(SynthesizerDialog)\r\n\r\n\tdef onVoiceCommand(self,evt):\r\n\t\tself._popupSettingsDialog(VoiceSettingsDialog)\r\n\r\n\tdef onBrailleCommand(self,evt):\r\n\t\tself._popupSettingsDialog(BrailleSettingsDialog)\r\n\r\n\tdef onKeyboardSettingsCommand(self,evt):\r\n\t\tself._popupSettingsDialog(KeyboardSettingsDialog)\r\n\r\n\tdef onMouseSettingsCommand(self,evt):\r\n\t\tself._popupSettingsDialog(MouseSettingsDialog)\r\n\r\n\tdef onReviewCursorCommand(self,evt):\r\n\t\tself._popupSettingsDialog(ReviewCursorDialog)\r\n\r\n\tdef onInputCompositionCommand(self,evt):\r\n\t\tself._popupSettingsDialog(InputCompositionDialog)\r\n\r\n\tdef onObjectPresentationCommand(self,evt):\r\n\t\tself._popupSettingsDialog(ObjectPresentationDialog)\r\n\r\n\tdef onBrowseModeCommand(self,evt):\r\n\t\tself._popupSettingsDialog(BrowseModeDialog)\r\n\r\n\tdef onDocumentFormattingCommand(self,evt):\r\n\t\tself._popupSettingsDialog(DocumentFormattingDialog)\r\n\r\n\tdef onSpeechSymbolsCommand(self, evt):\r\n\t\tself._popupSettingsDialog(SpeechSymbolsDialog)\r\n\r\n\tdef onInputGesturesCommand(self, evt):\r\n\t\tself._popupSettingsDialog(InputGesturesDialog)\r\n\r\n\tdef onAboutCommand(self,evt):\r\n\t\t# Translators: The title of the dialog to show about info for NVDA.\r\n\t\tmessageBox(versionInfo.aboutMessage, _(\"About NVDA\"), wx.OK)\r\n\r\n\tdef onCheckForUpdateCommand(self, evt):\r\n\t\tupdateCheck.UpdateChecker().check()\r\n\t\t\r\n\tdef onViewLogCommand(self, evt):\r\n\t\tlogViewer.activate()\r\n\r\n\tdef onSpeechViewerEnabled(self, isEnabled):\r\n\t\t# its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA\r\n\t\tif self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleSpeechViewer:\r\n\t\t\tself.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(isEnabled)\r\n\r\n\tdef onToggleSpeechViewerCommand(self, evt):\r\n\t\tif not speechViewer.isActive:\r\n\t\t\tspeechViewer.activate()\r\n\t\telse:\r\n\t\t\tspeechViewer.deactivate()\r\n\r\n\tdef onPythonConsoleCommand(self, evt):\r\n\t\timport pythonConsole\r\n\t\tif not pythonConsole.consoleUI:\r\n\t\t\tpythonConsole.initialize()\r\n\t\tpythonConsole.activate()\r\n\r\n\tdef onAddonsManagerCommand(self,evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\tfrom addonGui import AddonsDialog\r\n\t\td=AddonsDialog(gui.mainFrame)\r\n\t\td.Show()\r\n\t\tself.postPopup()\r\n\r\n\tdef onReloadPluginsCommand(self, evt):\r\n\t\timport appModuleHandler, globalPluginHandler\r\n\t\tfrom NVDAObjects import NVDAObject\r\n\t\tappModuleHandler.reloadAppModules()\r\n\t\tglobalPluginHandler.reloadGlobalPlugins()\r\n\t\tNVDAObject.clearDynamicClassCache()\r\n\r\n\tdef onCreatePortableCopyCommand(self,evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\timport gui.installerGui\r\n\t\td=gui.installerGui.PortableCreaterDialog(gui.mainFrame)\r\n\t\td.Show()\r\n\t\tself.postPopup()\r\n\r\n\tdef onInstallCommand(self, evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tfrom gui import installerGui\r\n\t\tinstallerGui.showInstallGui()\r\n\r\n\tdef onConfigProfilesCommand(self, evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\tfrom configProfiles import ProfilesDialog\r\n\t\tProfilesDialog(gui.mainFrame).Show()\r\n\t\tself.postPopup()\r\n\r\nclass SysTrayIcon(wx.TaskBarIcon):\r\n\r\n\tdef __init__(self, frame):\r\n\t\tsuper(SysTrayIcon, self).__init__()\r\n\t\ticon=wx.Icon(ICON_PATH,wx.BITMAP_TYPE_ICO)\r\n\t\tself.SetIcon(icon, versionInfo.name)\r\n\r\n\t\tself.menu=wx.Menu()\r\n\t\tmenu_preferences=self.preferencesMenu=wx.Menu()\r\n\t\t# Translators: The label for the menu item to open general Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&General settings...\"),_(\"General settings\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onGeneralSettingsCommand, item)\r\n\t\t# Translators: The label for the menu item to open Synthesizer settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Synthesizer...\"),_(\"Change the synthesizer to be used\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onSynthesizerCommand, item)\r\n\t\t# Translators: The label for the menu item to open Voice Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Voice settings...\"),_(\"Choose the voice, rate, pitch and volume to use\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onVoiceCommand, item)\r\n\t\t# Translators: The label for the menu item to open Braille Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"B&raille settings...\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onBrailleCommand, item)\r\n\t\t# Translators: The label for the menu item to open Keyboard Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Keyboard settings...\"),_(\"Configure keyboard layout, speaking of typed characters, words or command keys\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onKeyboardSettingsCommand, item)\r\n\t\t# Translators: The label for the menu item to open Mouse Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY, _(\"&Mouse settings...\"),_(\"Change reporting of mouse shape and object under mouse\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onMouseSettingsCommand, item)\r\n\t\t# Translators: The label for the menu item to open Review Cursor dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"Review &cursor...\"),_(\"Configure how and when the review cursor moves\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onReviewCursorCommand, item)\r\n\t\t# Translators: The label for the menu item to open Input Composition Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Input composition settings...\"),_(\"Configure how NVDA reports input composition and candidate selection for certain languages\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onInputCompositionCommand, item)\r\n\t\t# Translators: The label for the menu item to open Object Presentation dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Object presentation...\"),_(\"Change reporting of objects\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onObjectPresentationCommand, item)\r\n\t\t# Translators: The label for the menu item to open Browse Mode settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Browse mode...\"),_(\"Change virtual buffers specific settings\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onBrowseModeCommand, item)\r\n\t\t# Translators: The label for the menu item to open Document Formatting settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"Document &formatting...\"),_(\"Change settings of document properties\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onDocumentFormattingCommand, item)\r\n\t\tsubMenu_speechDicts = wx.Menu()\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open Default speech dictionary dialog.\r\n\t\t\titem = subMenu_speechDicts.Append(wx.ID_ANY,_(\"&Default dictionary...\"),_(\"A dialog where you can set default dictionary by adding dictionary entries to the list\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onDefaultDictionaryCommand, item)\r\n\t\t\t# Translators: The label for the menu item to open Voice specific speech dictionary dialog.\r\n\t\t\titem = subMenu_speechDicts.Append(wx.ID_ANY,_(\"&Voice dictionary...\"),_(\"A dialog where you can set voice-specific dictionary by adding dictionary entries to the list\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onVoiceDictionaryCommand, item)\r\n\t\t# Translators: The label for the menu item to open Temporary speech dictionary dialog.\r\n\t\titem = subMenu_speechDicts.Append(wx.ID_ANY,_(\"&Temporary dictionary...\"),_(\"A dialog where you can set temporary dictionary by adding dictionary entries to the edit box\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onTemporaryDictionaryCommand, item)\r\n\t\t# Translators: The label for a submenu under NvDA Preferences menu to select speech dictionaries.\r\n\t\tmenu_preferences.AppendMenu(wx.ID_ANY,_(\"Speech &dictionaries\"),subMenu_speechDicts)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open Punctuation/symbol pronunciation dialog.\r\n\t\t\titem = menu_preferences.Append(wx.ID_ANY, _(\"&Punctuation/symbol pronunciation...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onSpeechSymbolsCommand, item)\r\n\t\t\t# Translators: The label for the menu item to open the Input Gestures dialog.\r\n\t\t\titem = menu_preferences.Append(wx.ID_ANY, _(\"I&nput gestures...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onInputGesturesCommand, item)\r\n\t\t# Translators: The label for Preferences submenu in NVDA menu.\r\n\t\tself.menu.AppendMenu(wx.ID_ANY,_(\"&Preferences\"),menu_preferences)\r\n\r\n\t\tmenu_tools = self.toolsMenu = wx.Menu()\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open NVDA Log Viewer.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"View log\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onViewLogCommand, item)\r\n\t\t# Translators: The label for the menu item to toggle Speech Viewer.\r\n\t\titem=self.menu_tools_toggleSpeechViewer = menu_tools.AppendCheckItem(wx.ID_ANY, _(\"Speech viewer\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onToggleSpeechViewerCommand, item)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open NVDA Python Console.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Python console\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onPythonConsoleCommand, item)\r\n\t\t\t# Translators: The label of a menu item to open the Add-ons Manager.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Manage &add-ons...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onAddonsManagerCommand, item)\r\n\t\tif not globalVars.appArgs.secure and getattr(sys,'frozen',None):\r\n\t\t\t# Translators: The label for the menu item to create a portable copy of NVDA from an installed or another portable version.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Create portable copy...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onCreatePortableCopyCommand, item)\r\n\t\t\tif not config.isInstalledCopy():\r\n\t\t\t\t# Translators: The label for the menu item to install NVDA on the computer.\r\n\t\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"&Install NVDA...\"))\r\n\t\t\t\tself.Bind(wx.EVT_MENU, frame.onInstallCommand, item)\r\n\t\t# Translators: The label for the menu item to reload plugins.\r\n\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Reload plugins\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onReloadPluginsCommand, item)\r\n\t\t# Translators: The label for the Tools submenu in NVDA menu.\r\n\t\tself.menu.AppendMenu(wx.ID_ANY, _(\"Tools\"), menu_tools)\r\n\r\n\t\tmenu_help = self.helpMenu = wx.Menu()\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label of a menu item to open NVDA user guide.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"&User Guide\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"userGuide.html\")), item)\r\n\t\t\t# Translators: The label of a menu item to open the Commands Quick Reference document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"Commands &Quick Reference\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"keyCommands.html\")), item)\r\n\t\t\t# Translators: The label for the menu item to open What's New document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"What's &new\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"changes.html\")), item)\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"NVDA &web site\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(\"http://www.nvda-project.org/\"), item)\r\n\t\t\t# Translators: The label for the menu item to view NVDA License document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"L&icense\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"copying.txt\", False)), item)\r\n\t\t\t# Translators: The label for the menu item to view NVDA Contributors list document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"C&ontributors\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"contributors.txt\", False)), item)\r\n\t\t# Translators: The label for the menu item to open NVDA Welcome Dialog.\r\n\t\titem = menu_help.Append(wx.ID_ANY, _(\"We&lcome dialog...\"))\r\n\t\tself.Bind(wx.EVT_MENU, lambda evt: WelcomeDialog.run(), item)\r\n\t\tmenu_help.AppendSeparator()\r\n\t\tif updateCheck:\r\n\t\t\t# Translators: The label of a menu item to manually check for an updated version of NVDA.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"&Check for update...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onCheckForUpdateCommand, item)\r\n\t\t# Translators: The label for the menu item to open About dialog to get information about NVDA.\r\n\t\titem = menu_help.Append(wx.ID_ABOUT, _(\"About...\"), _(\"About NVDA\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onAboutCommand, item)\r\n\t\t# Translators: The label for the Help submenu in NVDA menu.\r\n\t\tself.menu.AppendMenu(wx.ID_ANY,_(\"&Help\"),menu_help)\r\n\t\tself.menu.AppendSeparator()\r\n\t\t# Translators: The label for the menu item to open the Configuration Profiles dialog.\r\n\t\titem = self.menu.Append(wx.ID_ANY, _(\"&Configuration profiles...\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onConfigProfilesCommand, item)\r\n\t\t# Translators: The label for the menu item to revert to saved configuration.\r\n\t\titem = self.menu.Append(wx.ID_ANY, _(\"&Revert to saved configuration\"),_(\"Reset all settings to saved state\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onRevertToSavedConfigurationCommand, item)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to reset settings to default settings.\r\n\t\t\t# Here, default settings means settings that were there when the user first used NVDA.\r\n\t\t\titem = self.menu.Append(wx.ID_ANY, _(\"&Reset configuration to factory defaults\"),_(\"Reset all settings to default state\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onRevertToDefaultConfigurationCommand, item)\r\n\t\t\t# Translators: The label for the menu item to save current settings.\r\n\t\t\titem = self.menu.Append(wx.ID_SAVE, _(\"&Save configuration\"), _(\"Write the current configuration to nvda.ini\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onSaveConfigurationCommand, item)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\tself.menu.AppendSeparator()\r\n\t\t\t# Translators: The label for the menu item to open donate page.\r\n\t\t\titem = self.menu.Append(wx.ID_ANY, _(\"Donate\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(DONATE_URL), item)\r\n\t\tself.menu.AppendSeparator()\r\n\t\titem = self.menu.Append(wx.ID_EXIT, _(\"E&xit\"),_(\"Exit NVDA\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onExitCommand, item)\r\n\r\n\t\tself.Bind(wx.EVT_TASKBAR_LEFT_DOWN, self.onActivate)\r\n\t\tself.Bind(wx.EVT_TASKBAR_RIGHT_DOWN, self.onActivate)\r\n\r\n\tdef Destroy(self):\r\n\t\tself.menu.Destroy()\r\n\t\tsuper(SysTrayIcon, self).Destroy()\r\n\r\n\tdef onActivate(self, evt):\r\n\t\tmainFrame.prePopup()\r\n\t\timport appModules.nvda\r\n\t\tif not appModules.nvda.nvdaMenuIaIdentity:\r\n\t\t\t# The NVDA app module doesn't know how to identify the NVDA menu yet.\r\n\t\t\t# Signal that the NVDA menu has just been opened.\r\n\t\t\tappModules.nvda.nvdaMenuIaIdentity = True\r\n\t\tself.PopupMenu(self.menu)\r\n\t\tif appModules.nvda.nvdaMenuIaIdentity is True:\r\n\t\t\t# The NVDA menu didn't actually appear for some reason.\r\n\t\t\tappModules.nvda.nvdaMenuIaIdentity = None\r\n\t\tmainFrame.postPopup()\r\n\r\ndef initialize():\r\n\tglobal mainFrame\r\n\tif mainFrame:\r\n\t\traise RuntimeError(\"GUI already initialized\")\r\n\tmainFrame = MainFrame()\r\n\twx.GetApp().SetTopWindow(mainFrame)\r\n\r\ndef terminate():\r\n\tglobal mainFrame\r\n\t# This is called after the main loop exits because WM_QUIT exits the main loop\r\n\t# without destroying all objects correctly and we need to support WM_QUIT.\r\n\t# Therefore, any request to exit should exit the main loop.\r\n\twx.CallAfter(mainFrame.Destroy)\r\n\t# #4460: We need another iteration of the main loop\r\n\t# so that everything (especially the TaskBarIcon) is cleaned up properly.\r\n\t# ProcessPendingEvents doesn't seem to work, but MainLoop does.\r\n\t# Because the top window gets destroyed,\r\n\t# MainLoop thankfully returns pretty quickly.\r\n\twx.GetApp().MainLoop()\r\n\tmainFrame = None\r\n\r\ndef showGui():\r\n \twx.CallAfter(mainFrame.showGui)\r\n\r\ndef quit():\r\n\twx.CallAfter(mainFrame.onExitCommand, None)\r\n\r\ndef messageBox(message, caption=wx.MessageBoxCaptionStr, style=wx.OK | wx.CENTER, parent=None):\r\n\t\"\"\"Display a message dialog.\r\n\tThis should be used for all message dialogs\r\n\trather than using C{wx.MessageDialog} and C{wx.MessageBox} directly.\r\n\t@param message: The message text.\r\n\t@type message: str\r\n\t@param caption: The caption (title) of the dialog.\r\n\t@type caption: str\r\n\t@param style: Same as for wx.MessageBox.\r\n\t@type style: int\r\n\t@param parent: The parent window (optional).\r\n\t@type parent: C{wx.Window}\r\n\t@return: Same as for wx.MessageBox.\r\n\t@rtype: int\r\n\t\"\"\"\r\n\tglobal isInMessageBox\r\n\twasAlready = isInMessageBox\r\n\tisInMessageBox = True\r\n\tif not parent:\r\n\t\tmainFrame.prePopup()\r\n\tres = wx.MessageBox(message, caption, style, parent or mainFrame)\r\n\tif not parent:\r\n\t\tmainFrame.postPopup()\r\n\tif not wasAlready:\r\n\t\tisInMessageBox = False\r\n\treturn res\r\n\r\ndef runScriptModalDialog(dialog, callback=None):\r\n\t\"\"\"Run a modal dialog from a script.\r\n\tThis will not block the caller,\r\n\tbut will instead call C{callback} (if provided) with the result from the dialog.\r\n\tThe dialog will be destroyed once the callback has returned.\r\n\t@param dialog: The dialog to show.\r\n\t@type dialog: C{wx.Dialog}\r\n\t@param callback: The optional callable to call with the result from the dialog.\r\n\t@type callback: callable\r\n\t\"\"\"\r\n\tdef run():\r\n\t\tmainFrame.prePopup()\r\n\t\tres = dialog.ShowModal()\r\n\t\tmainFrame.postPopup()\r\n\t\tif callback:\r\n\t\t\tcallback(res)\r\n\t\tdialog.Destroy()\r\n\twx.CallAfter(run)\r\n\r\nclass WelcomeDialog(wx.Dialog):\r\n\t\"\"\"The NVDA welcome dialog.\r\n\tThis provides essential information for new users, such as a description of the NVDA key and instructions on how to activate the NVDA menu.\r\n\tIt also provides quick access to some important configuration options.\r\n\tThis dialog is displayed the first time NVDA is started with a new configuration.\r\n\t\"\"\"\r\n\r\n\t# Translators: The main message for the Welcome dialog when the user starts NVDA for the first time.\r\n\tWELCOME_MESSAGE_DETAIL = _(\r\n\t\t\"Most commands for controlling NVDA require you to hold down the NVDA key while pressing other keys.\\n\"\r\n\t\t\"By default, the numpad insert and main insert keys may both be used as the NVDA key.\\n\"\r\n\t\t\"You can also configure NVDA to use the CapsLock as the NVDA key.\\n\"\r\n\t\t\"Press NVDA+n at any time to activate the NVDA menu.\\n\"\r\n\t\t\"From this menu, you can configure NVDA, get help and access other NVDA functions.\\n\"\r\n\t)\r\n\r\n\tdef __init__(self, parent):\r\n\t\t# Translators: The title of the Welcome dialog when user starts NVDA for the first time.\r\n\t\tsuper(WelcomeDialog, self).__init__(parent, wx.ID_ANY, _(\"Welcome to NVDA\"))\r\n\t\tmainSizer=wx.BoxSizer(wx.VERTICAL)\r\n\t\t# Translators: The header for the Welcome dialog when user starts NVDA for the first time. This is in larger,\r\n\t\t# bold lettering \r\n\t\twelcomeTextHeader = wx.StaticText(self, label=_(\"Welcome to NVDA!\"))\r\n\t\twelcomeTextHeader.SetFont(wx.Font(18, wx.NORMAL, wx.NORMAL, wx.BOLD))\r\n\t\tmainSizer.AddSpacer(10)\r\n\t\tmainSizer.Add(welcomeTextHeader,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT)\r\n\t\tmainSizer.AddSpacer(10)\r\n\t\twelcomeTextDetail = wx.StaticText(self, wx.ID_ANY, self.WELCOME_MESSAGE_DETAIL)\r\n\t\tmainSizer.Add(welcomeTextDetail,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT)\r\n\t\toptionsSizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, _(\"Options\")), wx.VERTICAL)\r\n\t\tself.capsAsNVDAModifierCheckBox = wx.CheckBox(self, wx.ID_ANY, _(\"Use CapsLock as an NVDA modifier key\"))\r\n\t\tself.capsAsNVDAModifierCheckBox.SetValue(config.conf[\"keyboard\"][\"useCapsLockAsNVDAModifierKey\"])\r\n\t\toptionsSizer.Add(self.capsAsNVDAModifierCheckBox,flag=wx.TOP|wx.LEFT,border=10)\r\n\t\t# Translators: The label of a check box in the Welcome dialog.\r\n\t\tself.startAfterLogonCheckBox = wx.CheckBox(self, label=_(\"&Automatically start NVDA after I log on to Windows\"))\r\n\t\tself.startAfterLogonCheckBox.Value = config.getStartAfterLogon()\r\n\t\tif globalVars.appArgs.secure or not config.isInstalledCopy():\r\n\t\t\tself.startAfterLogonCheckBox.Disable()\r\n\t\toptionsSizer.Add(self.startAfterLogonCheckBox,flag=wx.TOP|wx.LEFT,border=10)\r\n\t\t# Translators: This is a label for a checkbox in welcome dialog to show welcome dialog at startup.\r\n\t\tself.showWelcomeDialogAtStartupCheckBox = wx.CheckBox(self, wx.ID_ANY, _(\"Show this dialog when NVDA starts\"))\r\n\t\tself.showWelcomeDialogAtStartupCheckBox.SetValue(config.conf[\"general\"][\"showWelcomeDialogAtStartup\"])\r\n\t\toptionsSizer.Add(self.showWelcomeDialogAtStartupCheckBox,flag=wx.TOP|wx.LEFT,border=10)\r\n\t\tmainSizer.Add(optionsSizer,flag=wx.LEFT|wx.TOP|wx.RIGHT|wx.EXPAND,border=20)\r\n\t\tmainSizer.Add(self.CreateButtonSizer(wx.OK),flag=wx.TOP|wx.BOTTOM|wx.ALIGN_CENTER_HORIZONTAL,border=20)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)\r\n\r\n\t\tself.SetSizer(mainSizer)\r\n\t\tmainSizer.Fit(self)\r\n\t\tself.capsAsNVDAModifierCheckBox.SetFocus()\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef onOk(self, evt):\r\n\t\tconfig.conf[\"keyboard\"][\"useCapsLockAsNVDAModifierKey\"] = self.capsAsNVDAModifierCheckBox.IsChecked()\r\n\t\tif self.startAfterLogonCheckBox.Enabled:\r\n\t\t\tconfig.setStartAfterLogon(self.startAfterLogonCheckBox.Value)\r\n\t\tconfig.conf[\"general\"][\"showWelcomeDialogAtStartup\"] = self.showWelcomeDialogAtStartupCheckBox.IsChecked()\r\n\t\ttry:\r\n\t\t\tconfig.conf.save()\r\n\t\texcept:\r\n\t\t\tlog.debugWarning(\"could not save\",exc_info=True)\r\n\t\tself.EndModal(wx.ID_OK)\r\n\r\n\t@classmethod\r\n\tdef run(cls):\r\n\t\t\"\"\"Prepare and display an instance of this dialog.\r\n\t\tThis does not require the dialog to be instantiated.\r\n\t\t\"\"\"\r\n\t\tmainFrame.prePopup()\r\n\t\td = cls(mainFrame)\r\n\t\td.ShowModal()\r\n\t\td.Destroy()\r\n\t\tmainFrame.postPopup()\r\n\r\nclass LauncherDialog(wx.Dialog):\r\n\t\"\"\"The dialog that is displayed when NVDA is started from the launcher.\r\n\tThis displays the license and allows the user to install or create a portable copy of NVDA.\r\n\t\"\"\"\r\n\r\n\tdef __init__(self, parent):\r\n\t\tsuper(LauncherDialog, self).__init__(parent, title=versionInfo.name)\r\n\t\tmainSizer = wx.BoxSizer(wx.VERTICAL)\r\n\t\tsHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)\r\n\r\n\t\t# Translators: The label of the license text which will be shown when NVDA installation program starts.\r\n\t\tgroupLabel = _(\"License Agreement\")\r\n\t\tsizer = sHelper.addItem(wx.StaticBoxSizer(wx.StaticBox(self, label=groupLabel), wx.VERTICAL))\r\n\t\tlicenseTextCtrl = wx.TextCtrl(self, size=(500, 400), style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)\r\n\t\tlicenseTextCtrl.Value = codecs.open(getDocFilePath(\"copying.txt\", False), \"r\", encoding=\"UTF-8\").read()\r\n\t\tsizer.Add(licenseTextCtrl)\r\n\r\n\t\t# Translators: The label for a checkbox in NvDA installation program to agree to the license agreement.\r\n\t\tagreeText = _(\"I &agree\")\r\n\t\tself.licenseAgreeCheckbox = sHelper.addItem(wx.CheckBox(self, label=agreeText))\r\n\t\tself.licenseAgreeCheckbox.Value = False\r\n\t\tself.licenseAgreeCheckbox.Bind(wx.EVT_CHECKBOX, self.onLicenseAgree)\r\n\r\n\t\tsizer = sHelper.addItem(wx.GridSizer(rows=2, cols=2))\r\n\t\tself.actionButtons = []\r\n\t\t# Translators: The label of the button in NVDA installation program to install NvDA on the user's computer.\r\n\t\tctrl = wx.Button(self, label=_(\"&Install NVDA on this computer\"))\r\n\t\tsizer.Add(ctrl)\r\n\t\tctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onInstallCommand))\r\n\t\tself.actionButtons.append(ctrl)\r\n\t\t# Translators: The label of the button in NVDA installation program to create a portable version of NVDA.\r\n\t\tctrl = wx.Button(self, label=_(\"Create &portable copy\"))\r\n\t\tsizer.Add(ctrl)\r\n\t\tctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onCreatePortableCopyCommand))\r\n\t\tself.actionButtons.append(ctrl)\r\n\t\t# Translators: The label of the button in NVDA installation program to continue using the installation program as a temporary copy of NVDA.\r\n\t\tctrl = wx.Button(self, label=_(\"&Continue running\"))\r\n\t\tsizer.Add(ctrl)\r\n\t\tctrl.Bind(wx.EVT_BUTTON, self.onContinueRunning)\r\n\t\tself.actionButtons.append(ctrl)\r\n\t\tsizer.Add(wx.Button(self, label=_(\"E&xit\"), id=wx.ID_CANCEL))\r\n\t\t# If we bind this on the button, it fails to trigger when the dialog is closed.\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onExit, id=wx.ID_CANCEL)\r\n\r\n\t\tfor ctrl in self.actionButtons:\r\n\t\t\tctrl.Disable()\r\n\r\n\t\tmainSizer.Add(sHelper.sizer, border = guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)\r\n\t\tself.Sizer = mainSizer\r\n\t\tmainSizer.Fit(self)\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef onLicenseAgree(self, evt):\r\n\t\tfor ctrl in self.actionButtons:\r\n\t\t\tctrl.Enable(evt.IsChecked())\r\n\r\n\tdef onAction(self, evt, func):\r\n\t\tself.Destroy()\r\n\t\tfunc(evt)\r\n\r\n\tdef onContinueRunning(self, evt):\r\n\t\tself.Destroy()\r\n\t\tcore.doStartupDialogs()\r\n\r\n\tdef onExit(self, evt):\r\n\t\twx.GetApp().ExitMainLoop()\r\n\r\n\t@classmethod\r\n\tdef run(cls):\r\n\t\t\"\"\"Prepare and display an instance of this dialog.\r\n\t\tThis does not require the dialog to be instantiated.\r\n\t\t\"\"\"\r\n\t\tmainFrame.prePopup()\r\n\t\td = cls(mainFrame)\r\n\t\td.Show()\r\n\t\tmainFrame.postPopup()\r\n\r\nclass ExitDialog(wx.Dialog):\r\n\t_instance = None\r\n\r\n\tdef __new__(cls, parent):\r\n\t\t# Make this a singleton.\r\n\t\tinst = cls._instance() if cls._instance else None\r\n\t\tif not inst:\r\n\t\t\treturn super(cls, cls).__new__(cls, parent)\r\n\t\treturn inst\r\n\r\n\tdef __init__(self, parent):\r\n\t\tinst = ExitDialog._instance() if ExitDialog._instance else None\r\n\t\tif inst:\r\n\t\t\treturn\r\n\t\t# Use a weakref so the instance can die.\r\n\t\tExitDialog._instance = weakref.ref(self)\r\n\t\t# Translators: The title of the dialog to exit NVDA\r\n\t\tsuper(ExitDialog, self).__init__(parent, title=_(\"Exit NVDA\"))\r\n\t\tdialog = self\r\n\t\tmainSizer = wx.BoxSizer(wx.VERTICAL)\r\n\r\n\t\tcontentSizerHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)\r\n\r\n\t\tif globalVars.appArgs.disableAddons:\r\n\t\t\t# Translators: A message in the exit Dialog shown when all add-ons are disabled.\r\n\t\t\taddonsDisabledText = _(\"All add-ons are now disabled. They will be re-enabled on the next restart unless you choose to disable them again.\")\r\n\t\t\tcontentSizerHelper.addItem(wx.StaticText(self, wx.ID_ANY, label=addonsDisabledText))\r\n\r\n\t\t# Translators: The label for actions list in the Exit dialog.\r\n\t\tlabelText=_(\"What would you like to &do?\")\r\n\t\tself.actions = [\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Exit\"),\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Restart\"),\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Restart with add-ons disabled\"),\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Restart with debug logging enabled\")]\r\n\t\tself.actionsList = contentSizerHelper.addLabeledControl(labelText, wx.Choice, choices=self.actions)\r\n\t\tself.actionsList.SetSelection(0)\r\n\r\n\t\tcontentSizerHelper.addItem( self.CreateButtonSizer(wx.OK | wx.CANCEL))\r\n\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)\r\n\r\n\t\tmainSizer.Add(contentSizerHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)\r\n\t\tmainSizer.Fit(self)\r\n\t\tself.Sizer = mainSizer\r\n\t\tself.actionsList.SetFocus()\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef onOk(self, evt):\r\n\t\taction=self.actionsList.GetSelection()\r\n\t\tif action == 0:\r\n\t\t\twx.GetApp().ExitMainLoop()\r\n\t\telif action == 1:\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.restart)\r\n\t\telif action == 2:\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.restart,disableAddons=True)\r\n\t\telif action == 3:\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.restart,debugLogging=True)\r\n\t\tself.Destroy()\r\n\r\n\tdef onCancel(self, evt):\r\n\t\tself.Destroy()\r\n\r\nclass ExecAndPump(threading.Thread):\r\n\t\"\"\"Executes the given function with given args and kwargs in a background thread while blocking and pumping in the current thread.\"\"\"\r\n\r\n\tdef __init__(self,func,*args,**kwargs):\r\n\t\tself.func=func\r\n\t\tself.args=args\r\n\t\tself.kwargs=kwargs\r\n\t\tsuper(ExecAndPump,self).__init__()\r\n\t\tself.threadExc=None\r\n\t\tself.start()\r\n\t\ttime.sleep(0.1)\r\n\t\tthreadHandle=ctypes.c_int()\r\n\t\tthreadHandle.value=ctypes.windll.kernel32.OpenThread(0x100000,False,self.ident)\r\n\t\tmsg=ctypes.wintypes.MSG()\r\n\t\twhile ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(threadHandle),False,-1,255)==1:\r\n\t\t\twhile ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1):\r\n\t\t\t\tctypes.windll.user32.TranslateMessage(ctypes.byref(msg))\r\n\t\t\t\tctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))\r\n\t\tif self.threadExc:\r\n\t\t\traise self.threadExc\r\n\r\n\tdef run(self):\r\n\t\ttry:\r\n\t\t\tself.func(*self.args,**self.kwargs)\r\n\t\texcept Exception as e:\r\n\t\t\tself.threadExc=e\r\n\t\t\tlog.debugWarning(\"task had errors\",exc_info=True)\r\n\r\nclass IndeterminateProgressDialog(wx.ProgressDialog):\r\n\r\n\tdef __init__(self, parent, title, message):\r\n\t\tsuper(IndeterminateProgressDialog, self).__init__(title, message, parent=parent)\r\n\t\tself._speechCounter = -1\r\n\t\tself.timer = wx.PyTimer(self.Pulse)\r\n\t\tself.timer.Start(1000)\r\n\t\tself.Raise()\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef Pulse(self):\r\n\t\tsuper(IndeterminateProgressDialog, self).Pulse()\r\n\t\t# We want progress to be spoken on the first pulse and every 10 pulses thereafter.\r\n\t\t# Therefore, cycle from 0 to 9 inclusive.\r\n\t\tself._speechCounter = (self._speechCounter + 1) % 10\r\n\t\tpbConf = config.conf[\"presentation\"][\"progressBarUpdates\"]\r\n\t\tif pbConf[\"progressBarOutputMode\"] == \"off\":\r\n\t\t\treturn\r\n\t\tif not pbConf[\"reportBackgroundProgressBars\"] and not self.IsActive():\r\n\t\t\treturn\r\n\t\tif pbConf[\"progressBarOutputMode\"] in (\"beep\", \"both\"):\r\n\t\t\ttones.beep(440, 40)\r\n\t\tif pbConf[\"progressBarOutputMode\"] in (\"speak\", \"both\") and self._speechCounter == 0:\r\n\t\t\t# Translators: Announced periodically to indicate progress for an indeterminate progress bar.\r\n\t\t\tspeech.speakMessage(_(\"Please wait\"))\r\n\r\n\tdef IsActive(self):\r\n\t\t#4714: In wxPython 3, ProgressDialog.IsActive always seems to return False.\r\n\t\treturn winUser.isDescendantWindow(winUser.getForegroundWindow(), self.Handle)\r\n\r\n\tdef done(self):\r\n\t\tself.timer.Stop()\r\n\t\tif self.IsActive():\r\n\t\t\ttones.beep(1760, 40)\r\n\t\tself.Hide()\r\n\t\tself.Destroy()\r\n\r\ndef shouldConfigProfileTriggersBeSuspended():\r\n\t\"\"\"Determine whether configuration profile triggers should be suspended in relation to NVDA's GUI.\r\n\tFor NVDA configuration dialogs, the configuration should remain the same as it was before the GUI was popped up\r\n\tso the user can change settings in the correct profile.\r\n\tTop-level windows that require this behavior should have a C{shouldSuspendConfigProfileTriggers} attribute set to C{True}.\r\n\tBecause these dialogs are often opened via the NVDA menu, this applies to the NVDA menu as well.\r\n\t\"\"\"\r\n\tif winUser.getGUIThreadInfo(ctypes.windll.kernel32.GetCurrentThreadId()).flags & 0x00000010:\r\n\t\t# The NVDA menu is active.\r\n\t\treturn True\r\n\tfor window in wx.GetTopLevelWindows():\r\n\t\tif window.IsShown() and getattr(window, \"shouldSuspendConfigProfileTriggers\", False):\r\n\t\t\treturn True\r\n\treturn False\r\n",
"path": "source/gui/__init__.py"
}
] | [
{
"content": "# -*- coding: UTF-8 -*-\r\n#gui/__init__.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2015 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Mesar Hameed, Joseph Lee\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport time\r\nimport os\r\nimport sys\r\nimport threading\r\nimport codecs\r\nimport ctypes\r\nimport weakref\r\nimport wx\r\nimport globalVars\r\nimport tones\r\nimport ui\r\nfrom logHandler import log\r\nimport config\r\nimport versionInfo\r\nimport speech\r\nimport queueHandler\r\nimport core\r\nfrom settingsDialogs import *\r\nimport speechDictHandler\r\nimport languageHandler\r\nimport logViewer\r\nimport speechViewer\r\nimport winUser\r\nimport api\r\nimport guiHelper\r\n\r\ntry:\r\n\timport updateCheck\r\nexcept RuntimeError:\r\n\tupdateCheck = None\r\n\r\n### Constants\r\nNVDA_PATH = os.getcwdu()\r\nICON_PATH=os.path.join(NVDA_PATH, \"images\", \"nvda.ico\")\r\nDONATE_URL = \"http://www.nvaccess.org/donate/\"\r\n\r\n### Globals\r\nmainFrame = None\r\nisInMessageBox = False\r\n\r\ndef getDocFilePath(fileName, localized=True):\r\n\tif not getDocFilePath.rootPath:\r\n\t\tif hasattr(sys, \"frozen\"):\r\n\t\t\tgetDocFilePath.rootPath = os.path.join(NVDA_PATH, \"documentation\")\r\n\t\telse:\r\n\t\t\tgetDocFilePath.rootPath = os.path.abspath(os.path.join(\"..\", \"user_docs\"))\r\n\r\n\tif localized:\r\n\t\tlang = languageHandler.getLanguage()\r\n\t\ttryLangs = [lang]\r\n\t\tif \"_\" in lang:\r\n\t\t\t# This locale has a sub-locale, but documentation might not exist for the sub-locale, so try stripping it.\r\n\t\t\ttryLangs.append(lang.split(\"_\")[0])\r\n\t\t# If all else fails, use English.\r\n\t\ttryLangs.append(\"en\")\r\n\r\n\t\tfileName, fileExt = os.path.splitext(fileName)\r\n\t\tfor tryLang in tryLangs:\r\n\t\t\ttryDir = os.path.join(getDocFilePath.rootPath, tryLang)\r\n\t\t\tif not os.path.isdir(tryDir):\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# Some out of date translations might include .txt files which are now .html files in newer translations.\r\n\t\t\t# Therefore, ignore the extension and try both .html and .txt.\r\n\t\t\tfor tryExt in (\"html\", \"txt\"):\r\n\t\t\t\ttryPath = os.path.join(tryDir, \"%s.%s\" % (fileName, tryExt))\r\n\t\t\t\tif os.path.isfile(tryPath):\r\n\t\t\t\t\treturn tryPath\r\n\r\n\telse:\r\n\t\t# Not localized.\r\n\t\tif not hasattr(sys, \"frozen\") and fileName in (\"copying.txt\", \"contributors.txt\"):\r\n\t\t\t# If running from source, these two files are in the root dir.\r\n\t\t\treturn os.path.join(NVDA_PATH, \"..\", fileName)\r\n\t\telse:\r\n\t\t\treturn os.path.join(getDocFilePath.rootPath, fileName)\r\ngetDocFilePath.rootPath = None\r\n\r\nclass MainFrame(wx.Frame):\r\n\r\n\tdef __init__(self):\r\n\t\tstyle = wx.DEFAULT_FRAME_STYLE ^ wx.MAXIMIZE_BOX ^ wx.MINIMIZE_BOX | wx.FRAME_NO_TASKBAR\r\n\t\tsuper(MainFrame, self).__init__(None, wx.ID_ANY, versionInfo.name, size=(1,1), style=style)\r\n\t\tself.Bind(wx.EVT_CLOSE, self.onExitCommand)\r\n\t\tself.sysTrayIcon = SysTrayIcon(self)\r\n\t\t#: The focus before the last popup or C{None} if unknown.\r\n\t\t#: This is only valid before L{prePopup} is called,\r\n\t\t#: so it should be used as early as possible in any popup that needs it.\r\n\t\t#: @type: L{NVDAObject}\r\n\t\tself.prevFocus = None\r\n\t\t#: The focus ancestors before the last popup or C{None} if unknown.\r\n\t\t#: @type: list of L{NVDAObject}\r\n\t\tself.prevFocusAncestors = None\r\n\t\t# If NVDA has the uiAccess privilege, it can always set the foreground window.\r\n\t\tif not config.hasUiAccess():\r\n\t\t\t# This makes Windows return to the previous foreground window and also seems to allow NVDA to be brought to the foreground.\r\n\t\t\tself.Show()\r\n\t\t\tself.Hide()\r\n\t\t\tif winUser.isWindowVisible(self.Handle):\r\n\t\t\t\t# HACK: Work around a wx bug where Hide() doesn't actually hide the window,\r\n\t\t\t\t# but IsShown() returns False and Hide() again doesn't fix it.\r\n\t\t\t\t# This seems to happen if the call takes too long.\r\n\t\t\t\tself.Show()\r\n\t\t\t\tself.Hide()\r\n\r\n\tdef Destroy(self):\r\n\t\tself.sysTrayIcon.Destroy()\r\n\t\tsuper(MainFrame, self).Destroy()\r\n\r\n\tdef prePopup(self):\r\n\t\t\"\"\"Prepare for a popup.\r\n\t\tThis should be called before any dialog or menu which should pop up for the user.\r\n\t\tL{postPopup} should be called after the dialog or menu has been shown.\r\n\t\t@postcondition: A dialog or menu may be shown.\r\n\t\t\"\"\"\r\n\t\tnvdaPid = os.getpid()\r\n\t\tfocus = api.getFocusObject()\r\n\t\tif focus.processID != nvdaPid:\r\n\t\t\tself.prevFocus = focus\r\n\t\t\tself.prevFocusAncestors = api.getFocusAncestors()\r\n\t\tif winUser.getWindowThreadProcessID(winUser.getForegroundWindow())[0] != nvdaPid:\r\n\t\t\t# This process is not the foreground process, so bring it to the foreground.\r\n\t\t\tself.Raise()\r\n\r\n\tdef postPopup(self):\r\n\t\t\"\"\"Clean up after a popup dialog or menu.\r\n\t\tThis should be called after a dialog or menu was popped up for the user.\r\n\t\t\"\"\"\r\n\t\tself.prevFocus = None\r\n\t\tself.prevFocusAncestors = None\r\n\t\tif not winUser.isWindowVisible(winUser.getForegroundWindow()):\r\n\t\t\t# The current foreground window is invisible, so we want to return to the previous foreground window.\r\n\t\t\t# Showing and hiding our main window seems to achieve this.\r\n\t\t\tself.Show()\r\n\t\t\tself.Hide()\r\n\r\n\tdef showGui(self):\r\n\t\t# The menu pops up at the location of the mouse, which means it pops up at an unpredictable location.\r\n\t\t# Therefore, move the mouse to the centre of the screen so that the menu will always pop up there.\r\n\t\tleft, top, width, height = api.getDesktopObject().location\r\n\t\tx = width / 2\r\n\t\ty = height / 2\r\n\t\twinUser.setCursorPos(x, y)\r\n\t\tself.sysTrayIcon.onActivate(None)\r\n\r\n\tdef onRevertToSavedConfigurationCommand(self,evt):\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration)\r\n\t\t# Translators: Reported when last saved configuration has been applied by using revert to saved configuration option in NVDA menu.\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Configuration applied\"))\r\n\r\n\tdef onRevertToDefaultConfigurationCommand(self,evt):\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.resetConfiguration,factoryDefaults=True)\r\n\t\t# Translators: Reported when configuration has been restored to defaults by using restore configuration to factory defaults item in NVDA menu.\r\n\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Configuration restored to factory defaults\"))\r\n\r\n\tdef onSaveConfigurationCommand(self,evt):\r\n\t\tif globalVars.appArgs.secure:\r\n\t\t\t# Translators: Reported when current configuration cannot be saved while NVDA is running in secure mode such as in Windows login screen.\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Cannot save configuration - NVDA in secure mode\"))\r\n\t\t\treturn\r\n\t\ttry:\r\n\t\t\tconfig.conf.save()\r\n\t\t\t# Translators: Reported when current configuration has been saved.\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,ui.message,_(\"Configuration saved\"))\r\n\t\texcept:\r\n\t\t\t# Translators: Message shown when current configuration cannot be saved such as when running NVDA from a CD.\r\n\t\t\tmessageBox(_(\"Could not save configuration - probably read only file system\"),_(\"Error\"),wx.OK | wx.ICON_ERROR)\r\n\r\n\tdef _popupSettingsDialog(self, dialog, *args, **kwargs):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\ttry:\r\n\t\t\tdialog(self, *args, **kwargs).Show()\r\n\t\texcept SettingsDialog.MultiInstanceError:\r\n\t\t\t# Translators: Message shown when attempting to open another NVDA settings dialog when one is already open (example: when trying to open keyboard settings when general settings dialog is open).\r\n\t\t\tmessageBox(_(\"An NVDA settings dialog is already open. Please close it first.\"),_(\"Error\"),style=wx.OK | wx.ICON_ERROR)\r\n\t\tself.postPopup()\r\n\r\n\tdef onDefaultDictionaryCommand(self,evt):\r\n\t\t# Translators: Title for default speech dictionary dialog.\r\n\t\tself._popupSettingsDialog(DictionaryDialog,_(\"Default dictionary\"),speechDictHandler.dictionaries[\"default\"])\r\n\r\n\tdef onVoiceDictionaryCommand(self,evt):\r\n\t\t# Translators: Title for voice dictionary for the current voice such as current eSpeak variant.\r\n\t\tself._popupSettingsDialog(DictionaryDialog,_(\"Voice dictionary (%s)\")%speechDictHandler.dictionaries[\"voice\"].fileName,speechDictHandler.dictionaries[\"voice\"])\r\n\r\n\tdef onTemporaryDictionaryCommand(self,evt):\r\n\t\t# Translators: Title for temporary speech dictionary dialog (the voice dictionary that is active as long as NvDA is running).\r\n\t\tself._popupSettingsDialog(DictionaryDialog,_(\"Temporary dictionary\"),speechDictHandler.dictionaries[\"temp\"])\r\n\r\n\tdef onExitCommand(self, evt):\r\n\t\tif config.conf[\"general\"][\"askToExit\"]:\r\n\t\t\tself.prePopup()\r\n\t\t\td = ExitDialog(self)\r\n\t\t\td.Raise()\r\n\t\t\td.Show()\r\n\t\t\tself.postPopup()\r\n\t\telse:\r\n\t\t\twx.GetApp().ExitMainLoop()\r\n\r\n\tdef onGeneralSettingsCommand(self,evt):\r\n\t\tself._popupSettingsDialog(GeneralSettingsDialog)\r\n\r\n\tdef onSynthesizerCommand(self,evt):\r\n\t\tself._popupSettingsDialog(SynthesizerDialog)\r\n\r\n\tdef onVoiceCommand(self,evt):\r\n\t\tself._popupSettingsDialog(VoiceSettingsDialog)\r\n\r\n\tdef onBrailleCommand(self,evt):\r\n\t\tself._popupSettingsDialog(BrailleSettingsDialog)\r\n\r\n\tdef onKeyboardSettingsCommand(self,evt):\r\n\t\tself._popupSettingsDialog(KeyboardSettingsDialog)\r\n\r\n\tdef onMouseSettingsCommand(self,evt):\r\n\t\tself._popupSettingsDialog(MouseSettingsDialog)\r\n\r\n\tdef onReviewCursorCommand(self,evt):\r\n\t\tself._popupSettingsDialog(ReviewCursorDialog)\r\n\r\n\tdef onInputCompositionCommand(self,evt):\r\n\t\tself._popupSettingsDialog(InputCompositionDialog)\r\n\r\n\tdef onObjectPresentationCommand(self,evt):\r\n\t\tself._popupSettingsDialog(ObjectPresentationDialog)\r\n\r\n\tdef onBrowseModeCommand(self,evt):\r\n\t\tself._popupSettingsDialog(BrowseModeDialog)\r\n\r\n\tdef onDocumentFormattingCommand(self,evt):\r\n\t\tself._popupSettingsDialog(DocumentFormattingDialog)\r\n\r\n\tdef onSpeechSymbolsCommand(self, evt):\r\n\t\tself._popupSettingsDialog(SpeechSymbolsDialog)\r\n\r\n\tdef onInputGesturesCommand(self, evt):\r\n\t\tself._popupSettingsDialog(InputGesturesDialog)\r\n\r\n\tdef onAboutCommand(self,evt):\r\n\t\t# Translators: The title of the dialog to show about info for NVDA.\r\n\t\tmessageBox(versionInfo.aboutMessage, _(\"About NVDA\"), wx.OK)\r\n\r\n\tdef onCheckForUpdateCommand(self, evt):\r\n\t\tupdateCheck.UpdateChecker().check()\r\n\t\t\r\n\tdef onViewLogCommand(self, evt):\r\n\t\tlogViewer.activate()\r\n\r\n\tdef onSpeechViewerEnabled(self, isEnabled):\r\n\t\t# its possible for this to be called after the sysTrayIcon is destroyed if we are exiting NVDA\r\n\t\tif self.sysTrayIcon and self.sysTrayIcon.menu_tools_toggleSpeechViewer:\r\n\t\t\tself.sysTrayIcon.menu_tools_toggleSpeechViewer.Check(isEnabled)\r\n\r\n\tdef onToggleSpeechViewerCommand(self, evt):\r\n\t\tif not speechViewer.isActive:\r\n\t\t\tspeechViewer.activate()\r\n\t\telse:\r\n\t\t\tspeechViewer.deactivate()\r\n\r\n\tdef onPythonConsoleCommand(self, evt):\r\n\t\timport pythonConsole\r\n\t\tif not pythonConsole.consoleUI:\r\n\t\t\tpythonConsole.initialize()\r\n\t\tpythonConsole.activate()\r\n\r\n\tdef onAddonsManagerCommand(self,evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\tfrom addonGui import AddonsDialog\r\n\t\td=AddonsDialog(gui.mainFrame)\r\n\t\td.Show()\r\n\t\tself.postPopup()\r\n\r\n\tdef onReloadPluginsCommand(self, evt):\r\n\t\timport appModuleHandler, globalPluginHandler\r\n\t\tfrom NVDAObjects import NVDAObject\r\n\t\tappModuleHandler.reloadAppModules()\r\n\t\tglobalPluginHandler.reloadGlobalPlugins()\r\n\t\tNVDAObject.clearDynamicClassCache()\r\n\r\n\tdef onCreatePortableCopyCommand(self,evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\timport gui.installerGui\r\n\t\td=gui.installerGui.PortableCreaterDialog(gui.mainFrame)\r\n\t\td.Show()\r\n\t\tself.postPopup()\r\n\r\n\tdef onInstallCommand(self, evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tfrom gui import installerGui\r\n\t\tinstallerGui.showInstallGui()\r\n\r\n\tdef onConfigProfilesCommand(self, evt):\r\n\t\tif isInMessageBox:\r\n\t\t\treturn\r\n\t\tself.prePopup()\r\n\t\tfrom configProfiles import ProfilesDialog\r\n\t\tProfilesDialog(gui.mainFrame).Show()\r\n\t\tself.postPopup()\r\n\r\nclass SysTrayIcon(wx.TaskBarIcon):\r\n\r\n\tdef __init__(self, frame):\r\n\t\tsuper(SysTrayIcon, self).__init__()\r\n\t\ticon=wx.Icon(ICON_PATH,wx.BITMAP_TYPE_ICO)\r\n\t\tself.SetIcon(icon, versionInfo.name)\r\n\r\n\t\tself.menu=wx.Menu()\r\n\t\tmenu_preferences=self.preferencesMenu=wx.Menu()\r\n\t\t# Translators: The label for the menu item to open general Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&General settings...\"),_(\"General settings\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onGeneralSettingsCommand, item)\r\n\t\t# Translators: The label for the menu item to open Synthesizer settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Synthesizer...\"),_(\"Change the synthesizer to be used\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onSynthesizerCommand, item)\r\n\t\t# Translators: The label for the menu item to open Voice Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Voice settings...\"),_(\"Choose the voice, rate, pitch and volume to use\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onVoiceCommand, item)\r\n\t\t# Translators: The label for the menu item to open Braille Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"B&raille settings...\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onBrailleCommand, item)\r\n\t\t# Translators: The label for the menu item to open Keyboard Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Keyboard settings...\"),_(\"Configure keyboard layout, speaking of typed characters, words or command keys\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onKeyboardSettingsCommand, item)\r\n\t\t# Translators: The label for the menu item to open Mouse Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY, _(\"&Mouse settings...\"),_(\"Change reporting of mouse shape and object under mouse\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onMouseSettingsCommand, item)\r\n\t\t# Translators: The label for the menu item to open Review Cursor dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"Review &cursor...\"),_(\"Configure how and when the review cursor moves\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onReviewCursorCommand, item)\r\n\t\t# Translators: The label for the menu item to open Input Composition Settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Input composition settings...\"),_(\"Configure how NVDA reports input composition and candidate selection for certain languages\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onInputCompositionCommand, item)\r\n\t\t# Translators: The label for the menu item to open Object Presentation dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Object presentation...\"),_(\"Change reporting of objects\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onObjectPresentationCommand, item)\r\n\t\t# Translators: The label for the menu item to open Browse Mode settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"&Browse mode...\"),_(\"Change virtual buffers specific settings\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onBrowseModeCommand, item)\r\n\t\t# Translators: The label for the menu item to open Document Formatting settings dialog.\r\n\t\titem = menu_preferences.Append(wx.ID_ANY,_(\"Document &formatting...\"),_(\"Change settings of document properties\")) \r\n\t\tself.Bind(wx.EVT_MENU, frame.onDocumentFormattingCommand, item)\r\n\t\tsubMenu_speechDicts = wx.Menu()\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open Default speech dictionary dialog.\r\n\t\t\titem = subMenu_speechDicts.Append(wx.ID_ANY,_(\"&Default dictionary...\"),_(\"A dialog where you can set default dictionary by adding dictionary entries to the list\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onDefaultDictionaryCommand, item)\r\n\t\t\t# Translators: The label for the menu item to open Voice specific speech dictionary dialog.\r\n\t\t\titem = subMenu_speechDicts.Append(wx.ID_ANY,_(\"&Voice dictionary...\"),_(\"A dialog where you can set voice-specific dictionary by adding dictionary entries to the list\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onVoiceDictionaryCommand, item)\r\n\t\t# Translators: The label for the menu item to open Temporary speech dictionary dialog.\r\n\t\titem = subMenu_speechDicts.Append(wx.ID_ANY,_(\"&Temporary dictionary...\"),_(\"A dialog where you can set temporary dictionary by adding dictionary entries to the edit box\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onTemporaryDictionaryCommand, item)\r\n\t\t# Translators: The label for a submenu under NvDA Preferences menu to select speech dictionaries.\r\n\t\tmenu_preferences.AppendMenu(wx.ID_ANY,_(\"Speech &dictionaries\"),subMenu_speechDicts)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open Punctuation/symbol pronunciation dialog.\r\n\t\t\titem = menu_preferences.Append(wx.ID_ANY, _(\"&Punctuation/symbol pronunciation...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onSpeechSymbolsCommand, item)\r\n\t\t\t# Translators: The label for the menu item to open the Input Gestures dialog.\r\n\t\t\titem = menu_preferences.Append(wx.ID_ANY, _(\"I&nput gestures...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onInputGesturesCommand, item)\r\n\t\t# Translators: The label for Preferences submenu in NVDA menu.\r\n\t\tself.menu.AppendMenu(wx.ID_ANY,_(\"&Preferences\"),menu_preferences)\r\n\r\n\t\tmenu_tools = self.toolsMenu = wx.Menu()\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open NVDA Log Viewer.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"View log\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onViewLogCommand, item)\r\n\t\t# Translators: The label for the menu item to toggle Speech Viewer.\r\n\t\titem=self.menu_tools_toggleSpeechViewer = menu_tools.AppendCheckItem(wx.ID_ANY, _(\"Speech viewer\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onToggleSpeechViewerCommand, item)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to open NVDA Python Console.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Python console\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onPythonConsoleCommand, item)\r\n\t\t\t# Translators: The label of a menu item to open the Add-ons Manager.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Manage &add-ons...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onAddonsManagerCommand, item)\r\n\t\tif not globalVars.appArgs.secure and getattr(sys,'frozen',None):\r\n\t\t\t# Translators: The label for the menu item to create a portable copy of NVDA from an installed or another portable version.\r\n\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Create portable copy...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onCreatePortableCopyCommand, item)\r\n\t\t\tif not config.isInstalledCopy():\r\n\t\t\t\t# Translators: The label for the menu item to install NVDA on the computer.\r\n\t\t\t\titem = menu_tools.Append(wx.ID_ANY, _(\"&Install NVDA...\"))\r\n\t\t\t\tself.Bind(wx.EVT_MENU, frame.onInstallCommand, item)\r\n\t\t# Translators: The label for the menu item to reload plugins.\r\n\t\titem = menu_tools.Append(wx.ID_ANY, _(\"Reload plugins\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onReloadPluginsCommand, item)\r\n\t\t# Translators: The label for the Tools submenu in NVDA menu.\r\n\t\tself.menu.AppendMenu(wx.ID_ANY, _(\"Tools\"), menu_tools)\r\n\r\n\t\tmenu_help = self.helpMenu = wx.Menu()\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label of a menu item to open NVDA user guide.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"&User Guide\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"userGuide.html\")), item)\r\n\t\t\t# Translators: The label of a menu item to open the Commands Quick Reference document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"Commands &Quick Reference\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"keyCommands.html\")), item)\r\n\t\t\t# Translators: The label for the menu item to open What's New document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"What's &new\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"changes.html\")), item)\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"NVDA &web site\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(\"http://www.nvda-project.org/\"), item)\r\n\t\t\t# Translators: The label for the menu item to view NVDA License document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"L&icense\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"copying.txt\", False)), item)\r\n\t\t\t# Translators: The label for the menu item to view NVDA Contributors list document.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"C&ontributors\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(getDocFilePath(\"contributors.txt\", False)), item)\r\n\t\t# Translators: The label for the menu item to open NVDA Welcome Dialog.\r\n\t\titem = menu_help.Append(wx.ID_ANY, _(\"We&lcome dialog...\"))\r\n\t\tself.Bind(wx.EVT_MENU, lambda evt: WelcomeDialog.run(), item)\r\n\t\tmenu_help.AppendSeparator()\r\n\t\tif updateCheck:\r\n\t\t\t# Translators: The label of a menu item to manually check for an updated version of NVDA.\r\n\t\t\titem = menu_help.Append(wx.ID_ANY, _(\"&Check for update...\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onCheckForUpdateCommand, item)\r\n\t\t# Translators: The label for the menu item to open About dialog to get information about NVDA.\r\n\t\titem = menu_help.Append(wx.ID_ABOUT, _(\"About...\"), _(\"About NVDA\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onAboutCommand, item)\r\n\t\t# Translators: The label for the Help submenu in NVDA menu.\r\n\t\tself.menu.AppendMenu(wx.ID_ANY,_(\"&Help\"),menu_help)\r\n\t\tself.menu.AppendSeparator()\r\n\t\t# Translators: The label for the menu item to open the Configuration Profiles dialog.\r\n\t\titem = self.menu.Append(wx.ID_ANY, _(\"&Configuration profiles...\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onConfigProfilesCommand, item)\r\n\t\t# Translators: The label for the menu item to revert to saved configuration.\r\n\t\titem = self.menu.Append(wx.ID_ANY, _(\"&Revert to saved configuration\"),_(\"Reset all settings to saved state\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onRevertToSavedConfigurationCommand, item)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\t# Translators: The label for the menu item to reset settings to default settings.\r\n\t\t\t# Here, default settings means settings that were there when the user first used NVDA.\r\n\t\t\titem = self.menu.Append(wx.ID_ANY, _(\"&Reset configuration to factory defaults\"),_(\"Reset all settings to default state\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onRevertToDefaultConfigurationCommand, item)\r\n\t\t\t# Translators: The label for the menu item to save current settings.\r\n\t\t\titem = self.menu.Append(wx.ID_SAVE, _(\"&Save configuration\"), _(\"Write the current configuration to nvda.ini\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, frame.onSaveConfigurationCommand, item)\r\n\t\tif not globalVars.appArgs.secure:\r\n\t\t\tself.menu.AppendSeparator()\r\n\t\t\t# Translators: The label for the menu item to open donate page.\r\n\t\t\titem = self.menu.Append(wx.ID_ANY, _(\"Donate\"))\r\n\t\t\tself.Bind(wx.EVT_MENU, lambda evt: os.startfile(DONATE_URL), item)\r\n\t\tself.menu.AppendSeparator()\r\n\t\titem = self.menu.Append(wx.ID_EXIT, _(\"E&xit\"),_(\"Exit NVDA\"))\r\n\t\tself.Bind(wx.EVT_MENU, frame.onExitCommand, item)\r\n\r\n\t\tself.Bind(wx.EVT_TASKBAR_LEFT_DOWN, self.onActivate)\r\n\t\tself.Bind(wx.EVT_TASKBAR_RIGHT_DOWN, self.onActivate)\r\n\r\n\tdef Destroy(self):\r\n\t\tself.menu.Destroy()\r\n\t\tsuper(SysTrayIcon, self).Destroy()\r\n\r\n\tdef onActivate(self, evt):\r\n\t\tmainFrame.prePopup()\r\n\t\timport appModules.nvda\r\n\t\tif not appModules.nvda.nvdaMenuIaIdentity:\r\n\t\t\t# The NVDA app module doesn't know how to identify the NVDA menu yet.\r\n\t\t\t# Signal that the NVDA menu has just been opened.\r\n\t\t\tappModules.nvda.nvdaMenuIaIdentity = True\r\n\t\tself.PopupMenu(self.menu)\r\n\t\tif appModules.nvda.nvdaMenuIaIdentity is True:\r\n\t\t\t# The NVDA menu didn't actually appear for some reason.\r\n\t\t\tappModules.nvda.nvdaMenuIaIdentity = None\r\n\t\tmainFrame.postPopup()\r\n\r\ndef initialize():\r\n\tglobal mainFrame\r\n\tif mainFrame:\r\n\t\traise RuntimeError(\"GUI already initialized\")\r\n\tmainFrame = MainFrame()\r\n\twx.GetApp().SetTopWindow(mainFrame)\r\n\r\ndef terminate():\r\n\tglobal mainFrame\r\n\t# This is called after the main loop exits because WM_QUIT exits the main loop\r\n\t# without destroying all objects correctly and we need to support WM_QUIT.\r\n\t# Therefore, any request to exit should exit the main loop.\r\n\twx.CallAfter(mainFrame.Destroy)\r\n\t# #4460: We need another iteration of the main loop\r\n\t# so that everything (especially the TaskBarIcon) is cleaned up properly.\r\n\t# ProcessPendingEvents doesn't seem to work, but MainLoop does.\r\n\t# Because the top window gets destroyed,\r\n\t# MainLoop thankfully returns pretty quickly.\r\n\twx.GetApp().MainLoop()\r\n\tmainFrame = None\r\n\r\ndef showGui():\r\n \twx.CallAfter(mainFrame.showGui)\r\n\r\ndef quit():\r\n\twx.CallAfter(mainFrame.onExitCommand, None)\r\n\r\ndef messageBox(message, caption=wx.MessageBoxCaptionStr, style=wx.OK | wx.CENTER, parent=None):\r\n\t\"\"\"Display a message dialog.\r\n\tThis should be used for all message dialogs\r\n\trather than using C{wx.MessageDialog} and C{wx.MessageBox} directly.\r\n\t@param message: The message text.\r\n\t@type message: str\r\n\t@param caption: The caption (title) of the dialog.\r\n\t@type caption: str\r\n\t@param style: Same as for wx.MessageBox.\r\n\t@type style: int\r\n\t@param parent: The parent window (optional).\r\n\t@type parent: C{wx.Window}\r\n\t@return: Same as for wx.MessageBox.\r\n\t@rtype: int\r\n\t\"\"\"\r\n\tglobal isInMessageBox\r\n\twasAlready = isInMessageBox\r\n\tisInMessageBox = True\r\n\tif not parent:\r\n\t\tmainFrame.prePopup()\r\n\tres = wx.MessageBox(message, caption, style, parent or mainFrame)\r\n\tif not parent:\r\n\t\tmainFrame.postPopup()\r\n\tif not wasAlready:\r\n\t\tisInMessageBox = False\r\n\treturn res\r\n\r\ndef runScriptModalDialog(dialog, callback=None):\r\n\t\"\"\"Run a modal dialog from a script.\r\n\tThis will not block the caller,\r\n\tbut will instead call C{callback} (if provided) with the result from the dialog.\r\n\tThe dialog will be destroyed once the callback has returned.\r\n\t@param dialog: The dialog to show.\r\n\t@type dialog: C{wx.Dialog}\r\n\t@param callback: The optional callable to call with the result from the dialog.\r\n\t@type callback: callable\r\n\t\"\"\"\r\n\tdef run():\r\n\t\tmainFrame.prePopup()\r\n\t\tres = dialog.ShowModal()\r\n\t\tmainFrame.postPopup()\r\n\t\tif callback:\r\n\t\t\tcallback(res)\r\n\t\tdialog.Destroy()\r\n\twx.CallAfter(run)\r\n\r\nclass WelcomeDialog(wx.Dialog):\r\n\t\"\"\"The NVDA welcome dialog.\r\n\tThis provides essential information for new users, such as a description of the NVDA key and instructions on how to activate the NVDA menu.\r\n\tIt also provides quick access to some important configuration options.\r\n\tThis dialog is displayed the first time NVDA is started with a new configuration.\r\n\t\"\"\"\r\n\r\n\t# Translators: The main message for the Welcome dialog when the user starts NVDA for the first time.\r\n\tWELCOME_MESSAGE_DETAIL = _(\r\n\t\t\"Most commands for controlling NVDA require you to hold down the NVDA key while pressing other keys.\\n\"\r\n\t\t\"By default, the numpad insert and main insert keys may both be used as the NVDA key.\\n\"\r\n\t\t\"You can also configure NVDA to use the CapsLock as the NVDA key.\\n\"\r\n\t\t\"Press NVDA+n at any time to activate the NVDA menu.\\n\"\r\n\t\t\"From this menu, you can configure NVDA, get help and access other NVDA functions.\\n\"\r\n\t)\r\n\r\n\tdef __init__(self, parent):\r\n\t\t# Translators: The title of the Welcome dialog when user starts NVDA for the first time.\r\n\t\tsuper(WelcomeDialog, self).__init__(parent, wx.ID_ANY, _(\"Welcome to NVDA\"))\r\n\t\tmainSizer=wx.BoxSizer(wx.VERTICAL)\r\n\t\t# Translators: The header for the Welcome dialog when user starts NVDA for the first time. This is in larger,\r\n\t\t# bold lettering \r\n\t\twelcomeTextHeader = wx.StaticText(self, label=_(\"Welcome to NVDA!\"))\r\n\t\twelcomeTextHeader.SetFont(wx.Font(18, wx.NORMAL, wx.NORMAL, wx.BOLD))\r\n\t\tmainSizer.AddSpacer(10)\r\n\t\tmainSizer.Add(welcomeTextHeader,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT)\r\n\t\tmainSizer.AddSpacer(10)\r\n\t\twelcomeTextDetail = wx.StaticText(self, wx.ID_ANY, self.WELCOME_MESSAGE_DETAIL)\r\n\t\tmainSizer.Add(welcomeTextDetail,border=20,flag=wx.EXPAND|wx.LEFT|wx.RIGHT)\r\n\t\toptionsSizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, _(\"Options\")), wx.VERTICAL)\r\n\t\tself.capsAsNVDAModifierCheckBox = wx.CheckBox(self, wx.ID_ANY, _(\"Use CapsLock as an NVDA modifier key\"))\r\n\t\tself.capsAsNVDAModifierCheckBox.SetValue(config.conf[\"keyboard\"][\"useCapsLockAsNVDAModifierKey\"])\r\n\t\toptionsSizer.Add(self.capsAsNVDAModifierCheckBox,flag=wx.TOP|wx.LEFT,border=10)\r\n\t\t# Translators: The label of a check box in the Welcome dialog.\r\n\t\tself.startAfterLogonCheckBox = wx.CheckBox(self, label=_(\"&Automatically start NVDA after I log on to Windows\"))\r\n\t\tself.startAfterLogonCheckBox.Value = config.getStartAfterLogon()\r\n\t\tif globalVars.appArgs.secure or not config.isInstalledCopy():\r\n\t\t\tself.startAfterLogonCheckBox.Disable()\r\n\t\toptionsSizer.Add(self.startAfterLogonCheckBox,flag=wx.TOP|wx.LEFT,border=10)\r\n\t\t# Translators: This is a label for a checkbox in welcome dialog to show welcome dialog at startup.\r\n\t\tself.showWelcomeDialogAtStartupCheckBox = wx.CheckBox(self, wx.ID_ANY, _(\"Show this dialog when NVDA starts\"))\r\n\t\tself.showWelcomeDialogAtStartupCheckBox.SetValue(config.conf[\"general\"][\"showWelcomeDialogAtStartup\"])\r\n\t\toptionsSizer.Add(self.showWelcomeDialogAtStartupCheckBox,flag=wx.TOP|wx.LEFT,border=10)\r\n\t\tmainSizer.Add(optionsSizer,flag=wx.LEFT|wx.TOP|wx.RIGHT|wx.EXPAND,border=20)\r\n\t\tmainSizer.Add(self.CreateButtonSizer(wx.OK),flag=wx.TOP|wx.BOTTOM|wx.ALIGN_CENTER_HORIZONTAL,border=20)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)\r\n\r\n\t\tself.SetSizer(mainSizer)\r\n\t\tmainSizer.Fit(self)\r\n\t\tself.capsAsNVDAModifierCheckBox.SetFocus()\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef onOk(self, evt):\r\n\t\tconfig.conf[\"keyboard\"][\"useCapsLockAsNVDAModifierKey\"] = self.capsAsNVDAModifierCheckBox.IsChecked()\r\n\t\tif self.startAfterLogonCheckBox.Enabled:\r\n\t\t\tconfig.setStartAfterLogon(self.startAfterLogonCheckBox.Value)\r\n\t\tconfig.conf[\"general\"][\"showWelcomeDialogAtStartup\"] = self.showWelcomeDialogAtStartupCheckBox.IsChecked()\r\n\t\ttry:\r\n\t\t\tconfig.conf.save()\r\n\t\texcept:\r\n\t\t\tlog.debugWarning(\"could not save\",exc_info=True)\r\n\t\tself.EndModal(wx.ID_OK)\r\n\r\n\t@classmethod\r\n\tdef run(cls):\r\n\t\t\"\"\"Prepare and display an instance of this dialog.\r\n\t\tThis does not require the dialog to be instantiated.\r\n\t\t\"\"\"\r\n\t\tmainFrame.prePopup()\r\n\t\td = cls(mainFrame)\r\n\t\td.ShowModal()\r\n\t\td.Destroy()\r\n\t\tmainFrame.postPopup()\r\n\r\nclass LauncherDialog(wx.Dialog):\r\n\t\"\"\"The dialog that is displayed when NVDA is started from the launcher.\r\n\tThis displays the license and allows the user to install or create a portable copy of NVDA.\r\n\t\"\"\"\r\n\r\n\tdef __init__(self, parent):\r\n\t\tsuper(LauncherDialog, self).__init__(parent, title=versionInfo.name)\r\n\t\tmainSizer = wx.BoxSizer(wx.VERTICAL)\r\n\t\tsHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)\r\n\r\n\t\t# Translators: The label of the license text which will be shown when NVDA installation program starts.\r\n\t\tgroupLabel = _(\"License Agreement\")\r\n\t\tsizer = sHelper.addItem(wx.StaticBoxSizer(wx.StaticBox(self, label=groupLabel), wx.VERTICAL))\r\n\t\tlicenseTextCtrl = wx.TextCtrl(self, size=(500, 400), style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)\r\n\t\tlicenseTextCtrl.Value = codecs.open(getDocFilePath(\"copying.txt\", False), \"r\", encoding=\"UTF-8\").read()\r\n\t\tsizer.Add(licenseTextCtrl)\r\n\r\n\t\t# Translators: The label for a checkbox in NvDA installation program to agree to the license agreement.\r\n\t\tagreeText = _(\"I &agree\")\r\n\t\tself.licenseAgreeCheckbox = sHelper.addItem(wx.CheckBox(self, label=agreeText))\r\n\t\tself.licenseAgreeCheckbox.Value = False\r\n\t\tself.licenseAgreeCheckbox.Bind(wx.EVT_CHECKBOX, self.onLicenseAgree)\r\n\r\n\t\tsizer = sHelper.addItem(wx.GridSizer(rows=2, cols=2))\r\n\t\tself.actionButtons = []\r\n\t\t# Translators: The label of the button in NVDA installation program to install NvDA on the user's computer.\r\n\t\tctrl = wx.Button(self, label=_(\"&Install NVDA on this computer\"))\r\n\t\tsizer.Add(ctrl)\r\n\t\tctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onInstallCommand))\r\n\t\tself.actionButtons.append(ctrl)\r\n\t\t# Translators: The label of the button in NVDA installation program to create a portable version of NVDA.\r\n\t\tctrl = wx.Button(self, label=_(\"Create &portable copy\"))\r\n\t\tsizer.Add(ctrl)\r\n\t\tctrl.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(evt, mainFrame.onCreatePortableCopyCommand))\r\n\t\tself.actionButtons.append(ctrl)\r\n\t\t# Translators: The label of the button in NVDA installation program to continue using the installation program as a temporary copy of NVDA.\r\n\t\tctrl = wx.Button(self, label=_(\"&Continue running\"))\r\n\t\tsizer.Add(ctrl)\r\n\t\tctrl.Bind(wx.EVT_BUTTON, self.onContinueRunning)\r\n\t\tself.actionButtons.append(ctrl)\r\n\t\tsizer.Add(wx.Button(self, label=_(\"E&xit\"), id=wx.ID_CANCEL))\r\n\t\t# If we bind this on the button, it fails to trigger when the dialog is closed.\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onExit, id=wx.ID_CANCEL)\r\n\r\n\t\tfor ctrl in self.actionButtons:\r\n\t\t\tctrl.Disable()\r\n\r\n\t\tmainSizer.Add(sHelper.sizer, border = guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)\r\n\t\tself.Sizer = mainSizer\r\n\t\tmainSizer.Fit(self)\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef onLicenseAgree(self, evt):\r\n\t\tfor ctrl in self.actionButtons:\r\n\t\t\tctrl.Enable(evt.IsChecked())\r\n\r\n\tdef onAction(self, evt, func):\r\n\t\tself.Destroy()\r\n\t\tfunc(evt)\r\n\r\n\tdef onContinueRunning(self, evt):\r\n\t\tself.Destroy()\r\n\t\tcore.doStartupDialogs()\r\n\r\n\tdef onExit(self, evt):\r\n\t\twx.GetApp().ExitMainLoop()\r\n\r\n\t@classmethod\r\n\tdef run(cls):\r\n\t\t\"\"\"Prepare and display an instance of this dialog.\r\n\t\tThis does not require the dialog to be instantiated.\r\n\t\t\"\"\"\r\n\t\tmainFrame.prePopup()\r\n\t\td = cls(mainFrame)\r\n\t\td.Show()\r\n\t\tmainFrame.postPopup()\r\n\r\nclass ExitDialog(wx.Dialog):\r\n\t_instance = None\r\n\r\n\tdef __new__(cls, parent):\r\n\t\t# Make this a singleton.\r\n\t\tinst = cls._instance() if cls._instance else None\r\n\t\tif not inst:\r\n\t\t\treturn super(cls, cls).__new__(cls, parent)\r\n\t\treturn inst\r\n\r\n\tdef __init__(self, parent):\r\n\t\tinst = ExitDialog._instance() if ExitDialog._instance else None\r\n\t\tif inst:\r\n\t\t\treturn\r\n\t\t# Use a weakref so the instance can die.\r\n\t\tExitDialog._instance = weakref.ref(self)\r\n\t\t# Translators: The title of the dialog to exit NVDA\r\n\t\tsuper(ExitDialog, self).__init__(parent, title=_(\"Exit NVDA\"))\r\n\t\tdialog = self\r\n\t\tmainSizer = wx.BoxSizer(wx.VERTICAL)\r\n\r\n\t\tcontentSizerHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)\r\n\r\n\t\tif globalVars.appArgs.disableAddons:\r\n\t\t\t# Translators: A message in the exit Dialog shown when all add-ons are disabled.\r\n\t\t\taddonsDisabledText = _(\"All add-ons are now disabled. They will be re-enabled on the next restart unless you choose to disable them again.\")\r\n\t\t\tcontentSizerHelper.addItem(wx.StaticText(self, wx.ID_ANY, label=addonsDisabledText))\r\n\r\n\t\t# Translators: The label for actions list in the Exit dialog.\r\n\t\tlabelText=_(\"What would you like to &do?\")\r\n\t\tself.actions = [\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Exit\"),\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Restart\"),\r\n\t\t# Translators: An option in the combo box to choose exit action.\r\n\t\t_(\"Restart with add-ons disabled\")]\r\n\t\tself.actionsList = contentSizerHelper.addLabeledControl(labelText, wx.Choice, choices=self.actions)\r\n\t\tself.actionsList.SetSelection(0)\r\n\r\n\t\tcontentSizerHelper.addItem( self.CreateButtonSizer(wx.OK | wx.CANCEL))\r\n\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)\r\n\t\tself.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)\r\n\r\n\t\tmainSizer.Add(contentSizerHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)\r\n\t\tmainSizer.Fit(self)\r\n\t\tself.Sizer = mainSizer\r\n\t\tself.actionsList.SetFocus()\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef onOk(self, evt):\r\n\t\taction=self.actionsList.GetSelection()\r\n\t\tif action == 0:\r\n\t\t\twx.GetApp().ExitMainLoop()\r\n\t\telif action == 1:\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.restart)\r\n\t\telif action == 2:\r\n\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue,core.restart,True)\r\n\t\tself.Destroy()\r\n\r\n\tdef onCancel(self, evt):\r\n\t\tself.Destroy()\r\n\r\nclass ExecAndPump(threading.Thread):\r\n\t\"\"\"Executes the given function with given args and kwargs in a background thread while blocking and pumping in the current thread.\"\"\"\r\n\r\n\tdef __init__(self,func,*args,**kwargs):\r\n\t\tself.func=func\r\n\t\tself.args=args\r\n\t\tself.kwargs=kwargs\r\n\t\tsuper(ExecAndPump,self).__init__()\r\n\t\tself.threadExc=None\r\n\t\tself.start()\r\n\t\ttime.sleep(0.1)\r\n\t\tthreadHandle=ctypes.c_int()\r\n\t\tthreadHandle.value=ctypes.windll.kernel32.OpenThread(0x100000,False,self.ident)\r\n\t\tmsg=ctypes.wintypes.MSG()\r\n\t\twhile ctypes.windll.user32.MsgWaitForMultipleObjects(1,ctypes.byref(threadHandle),False,-1,255)==1:\r\n\t\t\twhile ctypes.windll.user32.PeekMessageW(ctypes.byref(msg),None,0,0,1):\r\n\t\t\t\tctypes.windll.user32.TranslateMessage(ctypes.byref(msg))\r\n\t\t\t\tctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))\r\n\t\tif self.threadExc:\r\n\t\t\traise self.threadExc\r\n\r\n\tdef run(self):\r\n\t\ttry:\r\n\t\t\tself.func(*self.args,**self.kwargs)\r\n\t\texcept Exception as e:\r\n\t\t\tself.threadExc=e\r\n\t\t\tlog.debugWarning(\"task had errors\",exc_info=True)\r\n\r\nclass IndeterminateProgressDialog(wx.ProgressDialog):\r\n\r\n\tdef __init__(self, parent, title, message):\r\n\t\tsuper(IndeterminateProgressDialog, self).__init__(title, message, parent=parent)\r\n\t\tself._speechCounter = -1\r\n\t\tself.timer = wx.PyTimer(self.Pulse)\r\n\t\tself.timer.Start(1000)\r\n\t\tself.Raise()\r\n\t\tself.Center(wx.BOTH | wx.CENTER_ON_SCREEN)\r\n\r\n\tdef Pulse(self):\r\n\t\tsuper(IndeterminateProgressDialog, self).Pulse()\r\n\t\t# We want progress to be spoken on the first pulse and every 10 pulses thereafter.\r\n\t\t# Therefore, cycle from 0 to 9 inclusive.\r\n\t\tself._speechCounter = (self._speechCounter + 1) % 10\r\n\t\tpbConf = config.conf[\"presentation\"][\"progressBarUpdates\"]\r\n\t\tif pbConf[\"progressBarOutputMode\"] == \"off\":\r\n\t\t\treturn\r\n\t\tif not pbConf[\"reportBackgroundProgressBars\"] and not self.IsActive():\r\n\t\t\treturn\r\n\t\tif pbConf[\"progressBarOutputMode\"] in (\"beep\", \"both\"):\r\n\t\t\ttones.beep(440, 40)\r\n\t\tif pbConf[\"progressBarOutputMode\"] in (\"speak\", \"both\") and self._speechCounter == 0:\r\n\t\t\t# Translators: Announced periodically to indicate progress for an indeterminate progress bar.\r\n\t\t\tspeech.speakMessage(_(\"Please wait\"))\r\n\r\n\tdef IsActive(self):\r\n\t\t#4714: In wxPython 3, ProgressDialog.IsActive always seems to return False.\r\n\t\treturn winUser.isDescendantWindow(winUser.getForegroundWindow(), self.Handle)\r\n\r\n\tdef done(self):\r\n\t\tself.timer.Stop()\r\n\t\tpbConf = config.conf[\"presentation\"][\"progressBarUpdates\"]\r\n\t\tif pbConf[\"progressBarOutputMode\"] in (\"beep\", \"both\") and (pbConf[\"reportBackgroundProgressBars\"] or self.IsActive()):\r\n\t\t\ttones.beep(1760, 40)\r\n\t\tself.Hide()\r\n\t\tself.Destroy()\r\n\r\ndef shouldConfigProfileTriggersBeSuspended():\r\n\t\"\"\"Determine whether configuration profile triggers should be suspended in relation to NVDA's GUI.\r\n\tFor NVDA configuration dialogs, the configuration should remain the same as it was before the GUI was popped up\r\n\tso the user can change settings in the correct profile.\r\n\tTop-level windows that require this behavior should have a C{shouldSuspendConfigProfileTriggers} attribute set to C{True}.\r\n\tBecause these dialogs are often opened via the NVDA menu, this applies to the NVDA menu as well.\r\n\t\"\"\"\r\n\tif winUser.getGUIThreadInfo(ctypes.windll.kernel32.GetCurrentThreadId()).flags & 0x00000010:\r\n\t\t# The NVDA menu is active.\r\n\t\treturn True\r\n\tfor window in wx.GetTopLevelWindows():\r\n\t\tif window.IsShown() and getattr(window, \"shouldSuspendConfigProfileTriggers\", False):\r\n\t\t\treturn True\r\n\treturn False\r\n",
"path": "source/gui/__init__.py"
}
] | diff --git a/source/gui/__init__.py b/source/gui/__init__.py
index 941d96250f9..7d547351425 100644
--- a/source/gui/__init__.py
+++ b/source/gui/__init__.py
@@ -829,7 +829,8 @@ def IsActive(self):
def done(self):
self.timer.Stop()
- if self.IsActive():
+ pbConf = config.conf["presentation"]["progressBarUpdates"]
+ if pbConf["progressBarOutputMode"] in ("beep", "both") and (pbConf["reportBackgroundProgressBars"] or self.IsActive()):
tones.beep(1760, 40)
self.Hide()
self.Destroy()
|
huggingface__optimum-334 | Support for GPU optimization of xlm-roberta model type
### System Info
```shell
optimum==1.3.0
python==3.8.13
Ubuntu 18.04
```
### Who can help?
@philschmid
### Information
- [X] The official example scripts
- [x] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)
- [ ] My own task or dataset (give details below)
### Reproduction
Hi,
Thanks for this very nice tool for simple and rapid ONNX optimization of Transformer models.
I would like to GPU optimize this multilingual cross-encoder model : `cross-encoder/mmarco-mMiniLMv2-L12-H384-v1` using the code provided in a blog post by @philschmid:
```
from optimum.onnxruntime import ORTOptimizer
from optimum.onnxruntime.configuration import OptimizationConfig
# create ORTOptimizer and define optimization configuration
optimizer = ORTOptimizer.from_pretrained(model_id, feature=model.pipeline_task)
optimization_config = OptimizationConfig(optimization_level=99, # enable all optimizations
optimize_for_gpu=True,
fp16=True
)
# apply the optimization configuration to the model
optimizer.export(
onnx_model_path=onnx_path / "model.onnx",
onnx_optimized_model_output_path=onnx_path / "model-optimized.onnx",
optimization_config=optimization_config,
)
```
But got the following error:
```
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb Cellule 15 in <cell line: 12>()
[6](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=5) optimization_config = OptimizationConfig(optimization_level=99, # enable all optimizations
[7](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=6) optimize_for_gpu=True,
[8](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=7) fp16=True
[9](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=8) )
[11](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=10) # apply the optimization configuration to the model
---> [12](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=11) optimizer.export(
[13](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=12) onnx_model_path=onnx_path / "model.onnx",
[14](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=13) onnx_optimized_model_output_path=onnx_path / "model-optimized.onnx",
[15](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=14) optimization_config=optimization_config,
[16](vscode-notebook-cell:/home/matthieu/Code/Python/ONNX-Export-Optimum-mmarco-mMiniLMv2-L12-H384-v1.ipynb#ch0000014?line=15) )
File ~/anaconda3/envs/haystack-gpu-fresh/lib/python3.8/site-packages/optimum/onnxruntime/optimization.py:125, in ORTOptimizer.export(self, onnx_model_path, onnx_optimized_model_output_path, optimization_config, use_external_data_format)
122 if not onnx_model_path.exists():
123 export(self.preprocessor, self.model, self._onnx_config, self.opset, onnx_model_path)
--> 125 ORTConfigManager.check_supported_model_or_raise(self._model_type)
126 num_heads = getattr(self.model.config, ORTConfigManager.get_num_heads_name(self._model_type))
127 hidden_size = getattr(self.model.config, ORTConfigManager.get_hidden_size_name(self._model_type))
File ~/anaconda3/envs/haystack-gpu-fresh/lib/python3.8/site-packages/optimum/onnxruntime/utils.py:110, in ORTConfigManager.check_supported_model_or_raise(cls, model_type)
107 @classmethod
108 def check_supported_model_or_raise(cls, model_type: str) -> bool:
109 if model_type not in cls._conf:
...
111 f"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. "
112 f"If you want to support {model_type} please propose a PR or open up an issue."
113 )
KeyError: "xlm-roberta model type is not supported yet. Only ['bert', 'albert', 'camembert', 'codegen', 'distilbert', 'deberta', 'deberta-v2', 'electra', 'roberta', 'bart', 'gpt2', 'gpt_neo'] are supported. If you want to support xlm-roberta please propose a PR or open up an issue."
```
Would it be possible to add support for this model type?
Thanks!
### Expected behavior
Would it be possible to add support for `xlm-roberta` model type?
| [
{
"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\nONNX_ENCODER_NAME = \"encoder_model.onnx\"\nONNX_DECODER_NAME = \"decoder_model.onnx\"\nONNX_DECODER_WITH_PAST_NAME = \"decoder_with_past_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"codegen\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n\n\ndef get_device_for_provider(provider: str) -> torch.device:\n \"\"\"\n Gets the PyTorch device (CPU/CUDA) associated with an ONNX Runtime provider.\n \"\"\"\n return torch.device(\"cuda\") if provider == \"CUDAExecutionProvider\" else torch.device(\"cpu\")\n\n\ndef get_provider_for_device(device: torch.device) -> str:\n \"\"\"\n Gets the ONNX Runtime provider associated with the PyTorch device (CPU/CUDA).\n \"\"\"\n return \"CUDAExecutionProvider\" if device.type.lower() == \"cuda\" else \"CPUExecutionProvider\"\n\n\nclass ORTQuantizableOperator(Enum):\n # Common ops\n Gather = \"Gather\"\n Transpose = \"Transpose\"\n EmbedLayerNormalizationQuant = \"EmbedLayerNormalization\"\n\n # QLinearOps\n Conv = \"Conv\"\n MatMul = \"MatMul\"\n Add = \"Add\"\n Mul = \"Mul\"\n Relu = \"Relu\"\n Clip = \"Clip\"\n LeakyRelu = \"LeakyRelu\"\n Sigmoid = \"Sigmoid\"\n MaxPool = \"MaxPool\"\n GlobalAveragePool = \"GlobalAveragePool\"\n Split = \"Split\"\n Pad = \"Pad\"\n Reshape = \"Reshape\"\n Squeeze = \"Squeeze\"\n Unsqueeze = \"Unsqueeze\"\n Resize = \"Resize\"\n AveragePool = \"AveragePool\"\n Concat = \"Concat\"\n",
"path": "optimum/onnxruntime/utils.py"
}
] | [
{
"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\nONNX_ENCODER_NAME = \"encoder_model.onnx\"\nONNX_DECODER_NAME = \"decoder_model.onnx\"\nONNX_DECODER_WITH_PAST_NAME = \"decoder_with_past_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"codegen\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n \"xlm-roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n\n\ndef get_device_for_provider(provider: str) -> torch.device:\n \"\"\"\n Gets the PyTorch device (CPU/CUDA) associated with an ONNX Runtime provider.\n \"\"\"\n return torch.device(\"cuda\") if provider == \"CUDAExecutionProvider\" else torch.device(\"cpu\")\n\n\ndef get_provider_for_device(device: torch.device) -> str:\n \"\"\"\n Gets the ONNX Runtime provider associated with the PyTorch device (CPU/CUDA).\n \"\"\"\n return \"CUDAExecutionProvider\" if device.type.lower() == \"cuda\" else \"CPUExecutionProvider\"\n\n\nclass ORTQuantizableOperator(Enum):\n # Common ops\n Gather = \"Gather\"\n Transpose = \"Transpose\"\n EmbedLayerNormalizationQuant = \"EmbedLayerNormalization\"\n\n # QLinearOps\n Conv = \"Conv\"\n MatMul = \"MatMul\"\n Add = \"Add\"\n Mul = \"Mul\"\n Relu = \"Relu\"\n Clip = \"Clip\"\n LeakyRelu = \"LeakyRelu\"\n Sigmoid = \"Sigmoid\"\n MaxPool = \"MaxPool\"\n GlobalAveragePool = \"GlobalAveragePool\"\n Split = \"Split\"\n Pad = \"Pad\"\n Reshape = \"Reshape\"\n Squeeze = \"Squeeze\"\n Unsqueeze = \"Unsqueeze\"\n Resize = \"Resize\"\n AveragePool = \"AveragePool\"\n Concat = \"Concat\"\n",
"path": "optimum/onnxruntime/utils.py"
}
] | diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py
index 75617c7335..e422571bbc 100644
--- a/optimum/onnxruntime/utils.py
+++ b/optimum/onnxruntime/utils.py
@@ -70,6 +70,7 @@ class ORTConfigManager:
"bart": ("encoder_attention_heads", "d_model", "bart"),
"gpt2": ("n_head", "n_embd", "gpt2"),
"gpt_neo": ("num_heads", "hidden_size", "gpt2"),
+ "xlm-roberta": ("num_attention_heads", "hidden_size", "bert"),
}
@classmethod
diff --git a/tests/onnxruntime/test_optimization.py b/tests/onnxruntime/test_optimization.py
index 1af509b0ef..3007b678dc 100644
--- a/tests/onnxruntime/test_optimization.py
+++ b/tests/onnxruntime/test_optimization.py
@@ -30,6 +30,7 @@
ElectraForSequenceClassification,
GPT2ForSequenceClassification,
RobertaForSequenceClassification,
+ XLMRobertaForSequenceClassification,
)
import onnx
@@ -67,6 +68,7 @@ class ORTOptimizerTest(unittest.TestCase):
(GPT2ForSequenceClassification, "hf-internal-testing/tiny-random-gpt2"),
(RobertaForSequenceClassification, "hf-internal-testing/tiny-random-roberta"),
(ElectraForSequenceClassification, "hf-internal-testing/tiny-random-electra"),
+ (XLMRobertaForSequenceClassification, "hf-internal-testing/tiny-xlm-roberta"),
)
@parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_MODEL_ID)
|
SeldonIO__MLServer-1168 | Expected XGBoost model file "model.bst" extension is undocumented?
On https://github.com/SeldonIO/MLServer/blob/master/runtimes/xgboost/mlserver_xgboost/xgboost.py#L21 you can see that MLServer is looking for an XGBoost model file called "model.bst". However, I cannot find any reference to that file extension in the XGBoost documentation. As far as I can see, XGBoost's documented file extensions are:
- ".json" added in 1.0.0, an "open format that can be easily reused"
- ".ubj" for Universal Binary JSON format, available in 1.6.0
- ".model" for the "old binary internal format" prior to 1.0.0, as shown in examples
Where does MLServer get the ".bst" extension from, and what model format does it use? Shouldn't it use one of the extensions mentioned in the XGBoost documentation instead, to avoid ambiguity?
| [
{
"content": "import xgboost as xgb\n\nfrom typing import List\nfrom xgboost.sklearn import XGBModel\n\nfrom mlserver.errors import InferenceError\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import NumpyRequestCodec, NumpyCodec\nfrom mlserver.types import (\n InferenceRequest,\n InferenceResponse,\n RequestOutput,\n ResponseOutput,\n)\n\nPREDICT_OUTPUT = \"predict\"\nPREDICT_PROBA_OUTPUT = \"predict_proba\"\nVALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]\n\nWELLKNOWN_MODEL_FILENAMES = [\"model.bst\", \"model.json\"]\n\n\ndef _load_sklearn_interface(model_uri: str) -> XGBModel:\n try:\n regressor = xgb.XGBRegressor()\n regressor.load_model(model_uri)\n return regressor\n except TypeError:\n # If there was an error, it's likely due to the model being a\n # classifier\n classifier = xgb.XGBClassifier()\n classifier.load_model(model_uri)\n return classifier\n\n\nclass XGBoostModel(MLModel):\n \"\"\"\n Implementationof the MLModel interface to load and serve `xgboost` models.\n \"\"\"\n\n async def load(self) -> bool:\n model_uri = await get_model_uri(\n self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES\n )\n\n self._model = _load_sklearn_interface(model_uri)\n\n return True\n\n def _check_request(self, payload: InferenceRequest) -> InferenceRequest:\n if not payload.outputs:\n # By default, only return the result of `predict()`\n payload.outputs = [RequestOutput(name=PREDICT_OUTPUT)]\n else:\n for request_output in payload.outputs:\n if request_output.name not in VALID_OUTPUTS:\n raise InferenceError(\n f\"XGBoostModel only supports '{PREDICT_OUTPUT}' and \"\n f\"'{PREDICT_PROBA_OUTPUT}' as outputs \"\n f\"({request_output.name} was received)\"\n )\n\n # Regression models do not support `predict_proba`\n if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]:\n if isinstance(self._model, xgb.XGBRegressor):\n raise InferenceError(\n f\"XGBRegressor models do not support '{PREDICT_PROBA_OUTPUT}\"\n )\n\n return payload\n\n def _get_model_outputs(self, payload: InferenceRequest) -> List[ResponseOutput]:\n decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec)\n\n outputs = []\n for request_output in payload.outputs: # type: ignore\n predict_fn = getattr(self._model, request_output.name)\n y = predict_fn(decoded_request)\n\n output = self.encode(y, request_output, default_codec=NumpyCodec)\n outputs.append(output)\n\n return outputs\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n payload = self._check_request(payload)\n outputs = self._get_model_outputs(payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=outputs,\n )\n",
"path": "runtimes/xgboost/mlserver_xgboost/xgboost.py"
}
] | [
{
"content": "import xgboost as xgb\n\nfrom typing import List\nfrom xgboost.sklearn import XGBModel\n\nfrom mlserver.errors import InferenceError\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import NumpyRequestCodec, NumpyCodec\nfrom mlserver.types import (\n InferenceRequest,\n InferenceResponse,\n RequestOutput,\n ResponseOutput,\n)\n\nPREDICT_OUTPUT = \"predict\"\nPREDICT_PROBA_OUTPUT = \"predict_proba\"\nVALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]\n\nWELLKNOWN_MODEL_FILENAMES = [\"model.bst\", \"model.json\", \"model.ubj\"]\n\n\ndef _load_sklearn_interface(model_uri: str) -> XGBModel:\n try:\n regressor = xgb.XGBRegressor()\n regressor.load_model(model_uri)\n return regressor\n except TypeError:\n # If there was an error, it's likely due to the model being a\n # classifier\n classifier = xgb.XGBClassifier()\n classifier.load_model(model_uri)\n return classifier\n\n\nclass XGBoostModel(MLModel):\n \"\"\"\n Implementationof the MLModel interface to load and serve `xgboost` models.\n \"\"\"\n\n async def load(self) -> bool:\n model_uri = await get_model_uri(\n self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES\n )\n\n self._model = _load_sklearn_interface(model_uri)\n\n return True\n\n def _check_request(self, payload: InferenceRequest) -> InferenceRequest:\n if not payload.outputs:\n # By default, only return the result of `predict()`\n payload.outputs = [RequestOutput(name=PREDICT_OUTPUT)]\n else:\n for request_output in payload.outputs:\n if request_output.name not in VALID_OUTPUTS:\n raise InferenceError(\n f\"XGBoostModel only supports '{PREDICT_OUTPUT}' and \"\n f\"'{PREDICT_PROBA_OUTPUT}' as outputs \"\n f\"({request_output.name} was received)\"\n )\n\n # Regression models do not support `predict_proba`\n if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]:\n if isinstance(self._model, xgb.XGBRegressor):\n raise InferenceError(\n f\"XGBRegressor models do not support '{PREDICT_PROBA_OUTPUT}\"\n )\n\n return payload\n\n def _get_model_outputs(self, payload: InferenceRequest) -> List[ResponseOutput]:\n decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec)\n\n outputs = []\n for request_output in payload.outputs: # type: ignore\n predict_fn = getattr(self._model, request_output.name)\n y = predict_fn(decoded_request)\n\n output = self.encode(y, request_output, default_codec=NumpyCodec)\n outputs.append(output)\n\n return outputs\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n payload = self._check_request(payload)\n outputs = self._get_model_outputs(payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=outputs,\n )\n",
"path": "runtimes/xgboost/mlserver_xgboost/xgboost.py"
}
] | diff --git a/runtimes/xgboost/README.md b/runtimes/xgboost/README.md
index a00630fbd..cff6b104b 100644
--- a/runtimes/xgboost/README.md
+++ b/runtimes/xgboost/README.md
@@ -13,6 +13,37 @@ pip install mlserver mlserver-xgboost
For further information on how to use MLServer with XGBoost, you can check out
this [worked out example](../../docs/examples/xgboost/README.md).
+## XGBoost Artifact Type
+
+The XGBoost inference runtime will expect that your model is serialised via one
+of the following methods:
+
+| Extension | Docs | Example |
+| --------- | -------------------------------------------------------------------------------------------------------------------- | ---------------------------------- |
+| `*.json` | [JSON Format](https://xgboost.readthedocs.io/en/stable/tutorials/saving_model.html#introduction-to-model-io) | `booster.save_model("model.json")` |
+| `*.ubj` | [Binary JSON Format](https://xgboost.readthedocs.io/en/stable/tutorials/saving_model.html#introduction-to-model-io) | `booster.save_model("model.ubj")` |
+| `*.bst` | [(Old) Binary Format](https://xgboost.readthedocs.io/en/stable/tutorials/saving_model.html#introduction-to-model-io) | `booster.save_model("model.bst")` |
+
+````{note}
+By default, the runtime will look for a file called `model.[json | ubj | bst]`.
+However, this can be modified through the `parameters.uri` field of your
+{class}`ModelSettings <mlserver.settings.ModelSettings>` config (see the
+section on [Model Settings](../../docs/reference/model-settings.md) for more
+details).
+
+```{code-block} json
+---
+emphasize-lines: 3-5
+---
+{
+ "name": "foo",
+ "parameters": {
+ "uri": "./my-own-model-filename.json"
+ }
+}
+```
+````
+
## Content Types
If no [content type](../../docs/user-guide/content-type) is present on the
@@ -21,3 +52,42 @@ request or metadata, the XGBoost runtime will try to decode the payload as a
To avoid this, either send a different content type explicitly, or define the
correct one as part of your [model's
metadata](../../docs/reference/model-settings).
+
+## Model Outputs
+
+The XGBoost inference runtime exposes a number of outputs depending on the
+model type.
+These outputs match to the `predict` and `predict_proba` methods of the XGBoost
+model.
+
+| Output | Returned By Default | Availability |
+| --------------- | ------------------- | --------------------------------------------------------------------- |
+| `predict` | ✅ | Available on all XGBoost models. |
+| `predict_proba` | ❌ | Only available on non-regressor models (i.e. `XGBClassifier` models). |
+
+By default, the runtime will only return the output of `predict`.
+However, you are able to control which outputs you want back through the
+`outputs` field of your {class}`InferenceRequest
+<mlserver.types.InferenceRequest>` payload.
+
+For example, to only return the model's `predict_proba` output, you could
+define a payload such as:
+
+```{code-block} json
+---
+emphasize-lines: 10-12
+---
+{
+ "inputs": [
+ {
+ "name": "my-input",
+ "datatype": "INT32",
+ "shape": [2, 2],
+ "data": [1, 2, 3, 4]
+ }
+ ],
+ "outputs": [
+ { "name": "predict_proba" }
+ ]
+}
+```
diff --git a/runtimes/xgboost/mlserver_xgboost/xgboost.py b/runtimes/xgboost/mlserver_xgboost/xgboost.py
index ba4f0f44d..9e97fe132 100644
--- a/runtimes/xgboost/mlserver_xgboost/xgboost.py
+++ b/runtimes/xgboost/mlserver_xgboost/xgboost.py
@@ -18,7 +18,7 @@
PREDICT_PROBA_OUTPUT = "predict_proba"
VALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]
-WELLKNOWN_MODEL_FILENAMES = ["model.bst", "model.json"]
+WELLKNOWN_MODEL_FILENAMES = ["model.bst", "model.json", "model.ubj"]
def _load_sklearn_interface(model_uri: str) -> XGBModel:
diff --git a/runtimes/xgboost/tests/conftest.py b/runtimes/xgboost/tests/conftest.py
index e7525332c..13272290c 100644
--- a/runtimes/xgboost/tests/conftest.py
+++ b/runtimes/xgboost/tests/conftest.py
@@ -9,6 +9,7 @@
from mlserver.utils import install_uvloop_event_loop
from mlserver_xgboost import XGBoostModel
+from mlserver_xgboost.xgboost import WELLKNOWN_MODEL_FILENAMES
TESTS_PATH = os.path.dirname(__file__)
TESTDATA_PATH = os.path.join(TESTS_PATH, "testdata")
@@ -23,15 +24,16 @@ def event_loop():
loop.close()
[email protected]
-def model_uri(tmp_path) -> str:
[email protected](params=WELLKNOWN_MODEL_FILENAMES)
+def model_uri(request, tmp_path) -> str:
n = 4
d = 3
dtrain = xgb.DMatrix(data=np.random.rand(n, d), label=np.random.rand(n))
bst = xgb.train(params={}, dtrain=dtrain)
- model_uri = os.path.join(tmp_path, "xgboost-model.json")
+ _, ext = os.path.splitext(request.param)
+ model_uri = os.path.join(tmp_path, f"xgboost-model{ext}")
bst.save_model(model_uri)
return model_uri
diff --git a/runtimes/xgboost/tests/test_xgboost.py b/runtimes/xgboost/tests/test_xgboost.py
index 39847d08d..748cb74a7 100644
--- a/runtimes/xgboost/tests/test_xgboost.py
+++ b/runtimes/xgboost/tests/test_xgboost.py
@@ -11,7 +11,6 @@
from mlserver_xgboost import XGBoostModel
from mlserver_xgboost.xgboost import (
- WELLKNOWN_MODEL_FILENAMES,
PREDICT_OUTPUT,
PREDICT_PROBA_OUTPUT,
)
@@ -27,13 +26,15 @@ def test_load_classifier(classifier: XGBoostModel):
assert type(classifier._model) == xgb.XGBClassifier
[email protected]("fname", WELLKNOWN_MODEL_FILENAMES)
-async def test_load_folder(fname, model_uri: str, model_settings: ModelSettings):
+async def test_load_folder(model_uri: str, model_settings: ModelSettings):
+ # Rename `xgboost-model.[ext]` to `model.[ext]`
+ _, ext = os.path.splitext(model_uri)
+ fname = f"model{ext}"
model_folder = os.path.dirname(model_uri)
model_path = os.path.join(model_folder, fname)
os.rename(model_uri, model_path)
- model_settings.parameters.uri = model_path # type: ignore
+ model_settings.parameters.uri = model_folder # type: ignore
model = XGBoostModel(model_settings)
model.ready = await model.load()
|
django-oscar__django-oscar-2346 | oscar.apps.offer.custom dependency on models prevents customization
The module `oscar.apps.offer.custom` imports the `Benefit` and `Condition` models directly instead of via `get_model()`. If either of these models is overridden by a forked version, this module cannot be imported.
The module should be updated to call `get_model()`.
| [
{
"content": "from django.core import exceptions\nfrom django.db import IntegrityError\n\nfrom oscar.apps.offer.models import Benefit, Condition\nfrom oscar.core.loading import get_model\n\nRange = get_model('offer', 'Range')\n\n\ndef _class_path(klass):\n return '%s.%s' % (klass.__module__, klass.__name__)\n\n\ndef create_range(range_class):\n \"\"\"\n Create a custom range instance from the passed range class\n\n This function creates the appropriate database record for this custom\n range, including setting the class path for the custom proxy class.\n \"\"\"\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n\n # Ensure range name is text (not ugettext wrapper)\n if range_class.name.__class__.__name__ == '__proxy__':\n raise exceptions.ValidationError(\n \"Custom ranges must have text names (not ugettext proxies)\")\n\n try:\n return Range.objects.create(\n name=range_class.name, proxy_class=_class_path(range_class))\n except IntegrityError:\n raise ValueError(\"The passed range already exists in the database.\")\n\n\ndef create_condition(condition_class, **kwargs):\n \"\"\"\n Create a custom condition instance\n \"\"\"\n return Condition.objects.create(\n proxy_class=_class_path(condition_class), **kwargs)\n\n\ndef create_benefit(benefit_class, **kwargs):\n \"\"\"\n Create a custom benefit instance\n \"\"\"\n # The custom benefit_class must override __str__ and description to\n # avoid a recursion error\n if benefit_class.description is Benefit.description:\n raise RuntimeError(\"Your custom benefit must implement its own \"\n \"'description' property\")\n return Benefit.objects.create(\n proxy_class=_class_path(benefit_class), **kwargs)\n",
"path": "src/oscar/apps/offer/custom.py"
}
] | [
{
"content": "from django.core import exceptions\nfrom django.db import IntegrityError\n\nfrom oscar.core.loading import get_model\n\nBenefit = get_model('offer', 'Benefit')\nCondition = get_model('offer', 'Condition')\nRange = get_model('offer', 'Range')\n\n\ndef _class_path(klass):\n return '%s.%s' % (klass.__module__, klass.__name__)\n\n\ndef create_range(range_class):\n \"\"\"\n Create a custom range instance from the passed range class\n\n This function creates the appropriate database record for this custom\n range, including setting the class path for the custom proxy class.\n \"\"\"\n if not hasattr(range_class, 'name'):\n raise exceptions.ValidationError(\n \"A custom range must have a name attribute\")\n\n # Ensure range name is text (not ugettext wrapper)\n if range_class.name.__class__.__name__ == '__proxy__':\n raise exceptions.ValidationError(\n \"Custom ranges must have text names (not ugettext proxies)\")\n\n try:\n return Range.objects.create(\n name=range_class.name, proxy_class=_class_path(range_class))\n except IntegrityError:\n raise ValueError(\"The passed range already exists in the database.\")\n\n\ndef create_condition(condition_class, **kwargs):\n \"\"\"\n Create a custom condition instance\n \"\"\"\n return Condition.objects.create(\n proxy_class=_class_path(condition_class), **kwargs)\n\n\ndef create_benefit(benefit_class, **kwargs):\n \"\"\"\n Create a custom benefit instance\n \"\"\"\n # The custom benefit_class must override __str__ and description to\n # avoid a recursion error\n if benefit_class.description is Benefit.description:\n raise RuntimeError(\"Your custom benefit must implement its own \"\n \"'description' property\")\n return Benefit.objects.create(\n proxy_class=_class_path(benefit_class), **kwargs)\n",
"path": "src/oscar/apps/offer/custom.py"
}
] | diff --git a/src/oscar/apps/offer/custom.py b/src/oscar/apps/offer/custom.py
index df16a5ee4ed..e23e3e07982 100644
--- a/src/oscar/apps/offer/custom.py
+++ b/src/oscar/apps/offer/custom.py
@@ -1,9 +1,10 @@
from django.core import exceptions
from django.db import IntegrityError
-from oscar.apps.offer.models import Benefit, Condition
from oscar.core.loading import get_model
+Benefit = get_model('offer', 'Benefit')
+Condition = get_model('offer', 'Condition')
Range = get_model('offer', 'Range')
|
holoviz__panel-4155 | _tkinter.TclError: invalid command name ".!canvas"
```bash
pip install panel==0.13.1 hvplot==0.8.0 holoviews==1.14.9 matplotlib==3.5.2
```
--------
I'm trying to create a demo app using Panel, hvplot and HoloViews using the matplotlib backend
```python
import pandas as pd
import hvplot.pandas
import panel as pn
import holoviews as hv
import holoviews.plotting.mpl
pn.extension("plotly", sizing_mode="stretch_width")
hv.extension("matplotlib")
data = pd.DataFrame(
{
"x": [1, 2, 3, 4, 5, 6],
"y": [2, 4, 8, 16, 32, 64],
}
)
plot = data.hvplot(x="x", y="y", responsive=True)
pn.pane.HoloViews(plot, backend="matplotlib", sizing_mode="stretch_both", min_height=400).servable()
```
```
panel serve script2.py
```

```bash
TclError: invalid command name ".!canvas"
Traceback (most recent call last):
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\bokeh\application\handlers\code_runner.py", line 231, in run
exec(self._code, module.__dict__)
File "C:\repos\private\awesome-panel-lightning\script2.py", line 19, in <module>
pn.pane.HoloViews(plot, backend="matplotlib", sizing_mode="stretch_both", min_height=400).servable()
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\viewable.py", line 374, in servable
self.server_doc(title=title, location=location) # type: ignore
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\viewable.py", line 853, in server_doc
model = self.get_root(doc)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\pane\base.py", line 314, in get_root
root = self.layout._get_model(doc, comm=comm)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\layout\base.py", line 146, in _get_model
objects = self._get_objects(model, [], doc, root, comm)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\layout\base.py", line 131, in _get_objects
child = pane._get_model(doc, root, model, comm)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\pane\holoviews.py", line 273, in _get_model
model = child_pane._get_model(doc, root, parent, comm)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\pane\plot.py", line 210, in _get_model
return PNG._get_model(self, doc, root, parent, comm)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\pane\markup.py", line 39, in _get_model
model = self._bokeh_model(**self._get_properties())
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\pane\image.py", line 125, in _get_properties
data = self._data()
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\panel\pane\plot.py", line 244, in _data
self.object.set_dpi(self.dpi)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\matplotlib\figure.py", line 2767, in set_dpi
self.dpi = val
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\matplotlib\figure.py", line 2457, in _set_dpi
self.set_size_inches(w, h, forward=forward)
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\matplotlib\figure.py", line 2723, in set_size_inches
manager.resize(*(size * self.dpi).astype(int))
File "c:\repos\private\awesome-panel-lightning\.venv\lib\site-packages\matplotlib\backends\_backend_tk.py", line 472, in resize
self.canvas._tkcanvas.configure(width=width, height=height)
File "C:\Program Files\Python39\lib\tkinter\__init__.py", line 1646, in configure
return self._configure('configure', cnf, kw)
File "C:\Program Files\Python39\lib\tkinter\__init__.py", line 1636, in _configure
self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
_tkinter.TclError: invalid command name ".!canvas"
```
| [
{
"content": "\"\"\"\nPane class which render plots from different libraries\n\"\"\"\nfrom __future__ import annotations\n\nimport sys\n\nfrom contextlib import contextmanager\nfrom io import BytesIO\nfrom typing import (\n TYPE_CHECKING, Any, ClassVar, Mapping, Optional,\n)\n\nimport param\n\nfrom bokeh.models import (\n CustomJS, LayoutDOM, Model, Spacer as BkSpacer,\n)\nfrom bokeh.themes import Theme\n\nfrom ..io import remove_root\nfrom ..io.notebook import push\nfrom ..util import escape\nfrom ..viewable import Layoutable\nfrom .base import PaneBase\nfrom .image import PNG\nfrom .ipywidget import IPyWidget\nfrom .markup import HTML\n\nif TYPE_CHECKING:\n from bokeh.document import Document\n from pyviz_comms import Comm\n\nFOLIUM_BEFORE = '<div style=\"width:100%;\"><div style=\"position:relative;width:100%;height:0;padding-bottom:60%;\">'\nFOLIUM_AFTER = '<div style=\"width:100%;height:100%\"><div style=\"position:relative;width:100%;height:100%;padding-bottom:0%;\">'\n\n@contextmanager\ndef _wrap_callback(cb, wrapped, doc, comm, callbacks):\n \"\"\"\n Wraps a bokeh callback ensuring that any events triggered by it\n appropriately dispatch events in the notebook. Also temporarily\n replaces the wrapped callback with the real one while the callback\n is exectuted to ensure the callback can be removed as usual.\n \"\"\"\n hold = doc.callbacks.hold_value\n doc.hold('combine')\n if wrapped in callbacks:\n index = callbacks.index(wrapped)\n callbacks[index] = cb\n yield\n if cb in callbacks:\n index = callbacks.index(cb)\n callbacks[index] = wrapped\n push(doc, comm)\n doc.hold(hold)\n\n\nclass Bokeh(PaneBase):\n \"\"\"\n The Bokeh pane allows displaying any displayable Bokeh model inside a\n Panel app.\n\n Reference: https://panel.holoviz.org/reference/panes/Bokeh.html\n\n :Example:\n\n >>> Bokeh(some_bokeh_figure)\n \"\"\"\n\n autodispatch = param.Boolean(default=True, doc=\"\"\"\n Whether to automatically dispatch events inside bokeh on_change\n and on_event callbacks in the notebook.\"\"\")\n\n theme = param.ClassSelector(default=None, class_=(Theme, str), doc=\"\"\"\n Bokeh theme to apply to the plot.\"\"\")\n\n priority: ClassVar[float | bool | None] = 0.8\n\n _rename: ClassVar[Mapping[str, str | None]] = {'autodispatch': None, 'theme': None}\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n return isinstance(obj, LayoutDOM)\n\n @classmethod\n def _property_callback_wrapper(cls, cb, doc, comm, callbacks):\n def wrapped_callback(attr, old, new):\n with _wrap_callback(cb, wrapped_callback, doc, comm, callbacks):\n cb(attr, old, new)\n return wrapped_callback\n\n @classmethod\n def _event_callback_wrapper(cls, cb, doc, comm, callbacks):\n def wrapped_callback(event):\n with _wrap_callback(cb, wrapped_callback, doc, comm, callbacks):\n cb(event)\n return wrapped_callback\n\n @classmethod\n def _wrap_bokeh_callbacks(cls, root, bokeh_model, doc, comm):\n for model in bokeh_model.select({'type': Model}):\n for key, cbs in model._callbacks.items():\n callbacks = model._callbacks[key]\n callbacks[:] = [\n cls._property_callback_wrapper(cb, doc, comm, callbacks)\n for cb in cbs\n ]\n for key, cbs in model._event_callbacks.items():\n callbacks = model._event_callbacks[key]\n callbacks[:] = [\n cls._event_callback_wrapper(cb, doc, comm, callbacks)\n for cb in cbs\n ]\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n if root is None:\n return self.get_root(doc, comm)\n\n if self.object is None:\n model = BkSpacer()\n else:\n model = self.object\n\n properties = {}\n for p, value in self.param.values().items():\n if (p not in Layoutable.param or p == 'name' or\n value is self.param[p].default):\n continue\n properties[p] = value\n model.update(**properties)\n if comm and self.autodispatch:\n self._wrap_bokeh_callbacks(root, model, doc, comm)\n\n ref = root.ref['id']\n for js in model.select({'type': CustomJS}):\n js.code = js.code.replace(model.ref['id'], ref)\n\n if model._document and doc is not model._document:\n remove_root(model, doc)\n\n self._models[ref] = (model, parent)\n\n if self.theme:\n doc.theme = self.theme\n\n return model\n\n\nclass Matplotlib(PNG, IPyWidget):\n \"\"\"\n The `Matplotlib` pane allows displaying any displayable Matplotlib figure\n inside a Panel app.\n\n - It will render the plot to PNG at the declared DPI and then embed it.\n - If you find the figure to be clipped on the edges, you can set `tight=True`\n to automatically resize objects to fit within the pane.\n - If you have installed `ipympl` you will also be able to use the\n interactive backend.\n\n Reference: https://panel.holoviz.org/reference/panes/Matplotlib.html\n\n :Example:\n\n >>> Matplotlib(some_matplotlib_figure, dpi=144)\n \"\"\"\n\n dpi = param.Integer(default=144, bounds=(1, None), doc=\"\"\"\n Scales the dpi of the matplotlib figure.\"\"\")\n\n high_dpi = param.Boolean(default=True, doc=\"\"\"\n Whether to optimize output for high-dpi displays.\"\"\")\n\n interactive = param.Boolean(default=False, constant=True, doc=\"\"\"\n Whether to render interactive matplotlib plot with ipympl.\"\"\")\n\n tight = param.Boolean(default=False, doc=\"\"\"\n Automatically adjust the figure size to fit the\n subplots and other artist elements.\"\"\")\n\n _rename: ClassVar[Mapping[str, str | None]] = {\n 'object': 'text', 'interactive': None, 'dpi': None, 'tight': None, 'high_dpi': None\n }\n\n _rerender_params = PNG._rerender_params + ['object', 'dpi', 'tight']\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n if 'matplotlib' not in sys.modules:\n return False\n from matplotlib.figure import Figure\n is_fig = isinstance(obj, Figure)\n if is_fig and obj.canvas is None:\n raise ValueError('Matplotlib figure has no canvas and '\n 'cannot be rendered.')\n return is_fig\n\n def __init__(self, object=None, **params):\n super().__init__(object, **params)\n self._managers = {}\n self._explicit_width = params.get('width') is not None\n self._explicit_height = params.get('height') is not None\n\n def _get_widget(self, fig):\n import matplotlib.backends\n old_backend = getattr(matplotlib.backends, 'backend', 'agg')\n\n from ipympl.backend_nbagg import Canvas, FigureManager, is_interactive\n from matplotlib._pylab_helpers import Gcf\n\n matplotlib.use(old_backend)\n\n def closer(event):\n Gcf.destroy(0)\n\n canvas = Canvas(fig)\n fig.patch.set_alpha(0)\n manager = FigureManager(canvas, 0)\n\n if is_interactive():\n fig.canvas.draw_idle()\n\n canvas.mpl_connect('close_event', closer)\n return manager\n\n @param.depends('width', watch=True)\n def _set_explicict_width(self):\n self._explicit_width = self.width is not None\n\n @param.depends('height', watch=True)\n def _set_explicict_height(self):\n self._explicit_height = self.height is not None\n\n def _update_dimensions(self):\n w, h = self.object.get_size_inches()\n dpi = self.dpi / 2. if self.high_dpi else self.dpi\n with param.discard_events(self):\n if not self._explicit_width:\n if self._explicit_height:\n self.width = int(self.height * (w/h))\n else:\n self.width = int(dpi * w)\n self._explicit_width = False\n if not self._explicit_height:\n if self._explicit_width:\n self.height = int(self.width * (w/h))\n else:\n self.height = self.height or int(dpi * h)\n self._explicit_height = False\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n self._update_dimensions()\n if not self.interactive:\n model = PNG._get_model(self, doc, root, parent, comm)\n return model\n self.object.set_dpi(self.dpi)\n manager = self._get_widget(self.object)\n props = self._process_param_change(self._init_params())\n kwargs = {k: v for k, v in props.items()\n if k not in self._rerender_params+['interactive']}\n kwargs['width'] = self.width\n kwargs['height'] = self.height\n kwargs['sizing_mode'] = self.sizing_mode\n model = self._get_ipywidget(\n manager.canvas, doc, root, comm, **kwargs\n )\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n self._managers[root.ref['id']] = manager\n return model\n\n def _update(self, ref: str, model: Model) -> None:\n if not self.interactive:\n self._update_dimensions()\n model.update(**self._get_properties())\n return\n manager = self._managers[ref]\n if self.object is not manager.canvas.figure:\n self.object.set_dpi(self.dpi)\n self.object.patch.set_alpha(0)\n manager.canvas.figure = self.object\n self.object.set_canvas(manager.canvas)\n event = {'width': manager.canvas._width,\n 'height': manager.canvas._height}\n manager.canvas.handle_resize(event)\n manager.canvas.draw_idle()\n\n def _data(self):\n self.object.set_dpi(self.dpi)\n b = BytesIO()\n\n if self.tight:\n bbox_inches = 'tight'\n else:\n bbox_inches = None\n\n self.object.canvas.print_figure(b, bbox_inches=bbox_inches)\n return b.getvalue()\n\n\nclass RGGPlot(PNG):\n \"\"\"\n An RGGPlot pane renders an r2py-based ggplot2 figure to png\n and wraps the base64-encoded data in a bokeh Div model.\n \"\"\"\n\n height = param.Integer(default=400)\n\n width = param.Integer(default=400)\n\n dpi = param.Integer(default=144, bounds=(1, None))\n\n _rerender_params = PNG._rerender_params + ['object', 'dpi', 'width', 'height']\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n return type(obj).__name__ == 'GGPlot' and hasattr(obj, 'r_repr')\n\n def _img(self):\n from rpy2 import robjects\n from rpy2.robjects.lib import grdevices\n with grdevices.render_to_bytesio(grdevices.png,\n type=\"cairo-png\", width=self.width, height=self.height,\n res=self.dpi, antialias=\"subpixel\") as b:\n robjects.r(\"print\")(self.object)\n return b.getvalue()\n\n\nclass YT(HTML):\n \"\"\"\n YT panes wrap plottable objects from the YT library.\n By default, the height and width are calculated by summing all\n contained plots, but can optionally be specified explicitly to\n provide additional space.\n \"\"\"\n\n priority: ClassVar[float | bool | None] = 0.5\n\n @classmethod\n def applies(cls, obj: bool) -> float | bool | None:\n return (getattr(obj, '__module__', '').startswith('yt.') and\n hasattr(obj, \"plots\") and\n hasattr(obj, \"_repr_html_\"))\n\n def _get_properties(self):\n p = super()._get_properties()\n if self.object is None:\n return p\n\n width = height = 0\n if self.width is None or self.height is None:\n for k,v in self.object.plots.items():\n if hasattr(v, \"_repr_png_\"):\n img = v._repr_png_()\n w,h = PNG._imgshape(img)\n height += h\n width = max(w, width)\n\n if self.width is None: p[\"width\"] = width\n if self.height is None: p[\"height\"] = height\n\n return p\n\n\nclass Folium(HTML):\n \"\"\"\n The Folium pane wraps Folium map components.\n \"\"\"\n\n sizing_mode = param.ObjectSelector(default='stretch_width', objects=[\n 'fixed', 'stretch_width', 'stretch_height', 'stretch_both',\n 'scale_width', 'scale_height', 'scale_both', None])\n\n priority: ClassVar[float | bool | None] = 0.6\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n return (getattr(obj, '__module__', '').startswith('folium.') and\n hasattr(obj, \"_repr_html_\"))\n\n def _get_properties(self):\n properties = super()._get_properties()\n text = '' if self.object is None else self.object\n if hasattr(text, '_repr_html_'):\n text = text._repr_html_().replace(FOLIUM_BEFORE, FOLIUM_AFTER)\n return dict(properties, text=escape(text))\n",
"path": "panel/pane/plot.py"
}
] | [
{
"content": "\"\"\"\nPane class which render plots from different libraries\n\"\"\"\nfrom __future__ import annotations\n\nimport sys\n\nfrom contextlib import contextmanager\nfrom io import BytesIO\nfrom typing import (\n TYPE_CHECKING, Any, ClassVar, Mapping, Optional,\n)\n\nimport param\n\nfrom bokeh.models import (\n CustomJS, LayoutDOM, Model, Spacer as BkSpacer,\n)\nfrom bokeh.themes import Theme\n\nfrom ..io import remove_root\nfrom ..io.notebook import push\nfrom ..util import escape\nfrom ..viewable import Layoutable\nfrom .base import PaneBase\nfrom .image import PNG\nfrom .ipywidget import IPyWidget\nfrom .markup import HTML\n\nif TYPE_CHECKING:\n from bokeh.document import Document\n from pyviz_comms import Comm\n\nFOLIUM_BEFORE = '<div style=\"width:100%;\"><div style=\"position:relative;width:100%;height:0;padding-bottom:60%;\">'\nFOLIUM_AFTER = '<div style=\"width:100%;height:100%\"><div style=\"position:relative;width:100%;height:100%;padding-bottom:0%;\">'\n\n@contextmanager\ndef _wrap_callback(cb, wrapped, doc, comm, callbacks):\n \"\"\"\n Wraps a bokeh callback ensuring that any events triggered by it\n appropriately dispatch events in the notebook. Also temporarily\n replaces the wrapped callback with the real one while the callback\n is exectuted to ensure the callback can be removed as usual.\n \"\"\"\n hold = doc.callbacks.hold_value\n doc.hold('combine')\n if wrapped in callbacks:\n index = callbacks.index(wrapped)\n callbacks[index] = cb\n yield\n if cb in callbacks:\n index = callbacks.index(cb)\n callbacks[index] = wrapped\n push(doc, comm)\n doc.hold(hold)\n\n\nclass Bokeh(PaneBase):\n \"\"\"\n The Bokeh pane allows displaying any displayable Bokeh model inside a\n Panel app.\n\n Reference: https://panel.holoviz.org/reference/panes/Bokeh.html\n\n :Example:\n\n >>> Bokeh(some_bokeh_figure)\n \"\"\"\n\n autodispatch = param.Boolean(default=True, doc=\"\"\"\n Whether to automatically dispatch events inside bokeh on_change\n and on_event callbacks in the notebook.\"\"\")\n\n theme = param.ClassSelector(default=None, class_=(Theme, str), doc=\"\"\"\n Bokeh theme to apply to the plot.\"\"\")\n\n priority: ClassVar[float | bool | None] = 0.8\n\n _rename: ClassVar[Mapping[str, str | None]] = {'autodispatch': None, 'theme': None}\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n return isinstance(obj, LayoutDOM)\n\n @classmethod\n def _property_callback_wrapper(cls, cb, doc, comm, callbacks):\n def wrapped_callback(attr, old, new):\n with _wrap_callback(cb, wrapped_callback, doc, comm, callbacks):\n cb(attr, old, new)\n return wrapped_callback\n\n @classmethod\n def _event_callback_wrapper(cls, cb, doc, comm, callbacks):\n def wrapped_callback(event):\n with _wrap_callback(cb, wrapped_callback, doc, comm, callbacks):\n cb(event)\n return wrapped_callback\n\n @classmethod\n def _wrap_bokeh_callbacks(cls, root, bokeh_model, doc, comm):\n for model in bokeh_model.select({'type': Model}):\n for key, cbs in model._callbacks.items():\n callbacks = model._callbacks[key]\n callbacks[:] = [\n cls._property_callback_wrapper(cb, doc, comm, callbacks)\n for cb in cbs\n ]\n for key, cbs in model._event_callbacks.items():\n callbacks = model._event_callbacks[key]\n callbacks[:] = [\n cls._event_callback_wrapper(cb, doc, comm, callbacks)\n for cb in cbs\n ]\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n if root is None:\n return self.get_root(doc, comm)\n\n if self.object is None:\n model = BkSpacer()\n else:\n model = self.object\n\n properties = {}\n for p, value in self.param.values().items():\n if (p not in Layoutable.param or p == 'name' or\n value is self.param[p].default):\n continue\n properties[p] = value\n model.update(**properties)\n if comm and self.autodispatch:\n self._wrap_bokeh_callbacks(root, model, doc, comm)\n\n ref = root.ref['id']\n for js in model.select({'type': CustomJS}):\n js.code = js.code.replace(model.ref['id'], ref)\n\n if model._document and doc is not model._document:\n remove_root(model, doc)\n\n self._models[ref] = (model, parent)\n\n if self.theme:\n doc.theme = self.theme\n\n return model\n\n\nclass Matplotlib(PNG, IPyWidget):\n \"\"\"\n The `Matplotlib` pane allows displaying any displayable Matplotlib figure\n inside a Panel app.\n\n - It will render the plot to PNG at the declared DPI and then embed it.\n - If you find the figure to be clipped on the edges, you can set `tight=True`\n to automatically resize objects to fit within the pane.\n - If you have installed `ipympl` you will also be able to use the\n interactive backend.\n\n Reference: https://panel.holoviz.org/reference/panes/Matplotlib.html\n\n :Example:\n\n >>> Matplotlib(some_matplotlib_figure, dpi=144)\n \"\"\"\n\n dpi = param.Integer(default=144, bounds=(1, None), doc=\"\"\"\n Scales the dpi of the matplotlib figure.\"\"\")\n\n high_dpi = param.Boolean(default=True, doc=\"\"\"\n Whether to optimize output for high-dpi displays.\"\"\")\n\n interactive = param.Boolean(default=False, constant=True, doc=\"\"\"\n Whether to render interactive matplotlib plot with ipympl.\"\"\")\n\n tight = param.Boolean(default=False, doc=\"\"\"\n Automatically adjust the figure size to fit the\n subplots and other artist elements.\"\"\")\n\n _rename: ClassVar[Mapping[str, str | None]] = {\n 'object': 'text', 'interactive': None, 'dpi': None, 'tight': None, 'high_dpi': None\n }\n\n _rerender_params = PNG._rerender_params + ['object', 'dpi', 'tight']\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n if 'matplotlib' not in sys.modules:\n return False\n from matplotlib.figure import Figure\n is_fig = isinstance(obj, Figure)\n if is_fig and obj.canvas is None:\n raise ValueError('Matplotlib figure has no canvas and '\n 'cannot be rendered.')\n return is_fig\n\n def __init__(self, object=None, **params):\n super().__init__(object, **params)\n self._managers = {}\n self._explicit_width = params.get('width') is not None\n self._explicit_height = params.get('height') is not None\n\n def _get_widget(self, fig):\n import matplotlib.backends\n old_backend = getattr(matplotlib.backends, 'backend', 'agg')\n\n from ipympl.backend_nbagg import Canvas, FigureManager, is_interactive\n from matplotlib._pylab_helpers import Gcf\n\n matplotlib.use(old_backend)\n\n def closer(event):\n Gcf.destroy(0)\n\n canvas = Canvas(fig)\n fig.patch.set_alpha(0)\n manager = FigureManager(canvas, 0)\n\n if is_interactive():\n fig.canvas.draw_idle()\n\n canvas.mpl_connect('close_event', closer)\n return manager\n\n @param.depends('width', watch=True)\n def _set_explicict_width(self):\n self._explicit_width = self.width is not None\n\n @param.depends('height', watch=True)\n def _set_explicict_height(self):\n self._explicit_height = self.height is not None\n\n def _update_dimensions(self):\n w, h = self.object.get_size_inches()\n dpi = self.dpi / 2. if self.high_dpi else self.dpi\n with param.discard_events(self):\n if not self._explicit_width:\n if self._explicit_height:\n self.width = int(self.height * (w/h))\n else:\n self.width = int(dpi * w)\n self._explicit_width = False\n if not self._explicit_height:\n if self._explicit_width:\n self.height = int(self.width * (w/h))\n else:\n self.height = self.height or int(dpi * h)\n self._explicit_height = False\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n self._update_dimensions()\n if not self.interactive:\n model = PNG._get_model(self, doc, root, parent, comm)\n return model\n self.object.set_dpi(self.dpi)\n manager = self._get_widget(self.object)\n props = self._process_param_change(self._init_params())\n kwargs = {k: v for k, v in props.items()\n if k not in self._rerender_params+['interactive']}\n kwargs['width'] = self.width\n kwargs['height'] = self.height\n kwargs['sizing_mode'] = self.sizing_mode\n model = self._get_ipywidget(\n manager.canvas, doc, root, comm, **kwargs\n )\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n self._managers[root.ref['id']] = manager\n return model\n\n def _update(self, ref: str, model: Model) -> None:\n if not self.interactive:\n self._update_dimensions()\n model.update(**self._get_properties())\n return\n manager = self._managers[ref]\n if self.object is not manager.canvas.figure:\n self.object.set_dpi(self.dpi)\n self.object.patch.set_alpha(0)\n manager.canvas.figure = self.object\n self.object.set_canvas(manager.canvas)\n event = {'width': manager.canvas._width,\n 'height': manager.canvas._height}\n manager.canvas.handle_resize(event)\n manager.canvas.draw_idle()\n\n def _data(self):\n try:\n self.object.set_dpi(self.dpi)\n except Exception as ex:\n raise Exception(\"The Matplotlib backend is not configured. Try adding `matplotlib.use('agg')`\") from ex\n b = BytesIO()\n\n if self.tight:\n bbox_inches = 'tight'\n else:\n bbox_inches = None\n\n self.object.canvas.print_figure(b, bbox_inches=bbox_inches)\n return b.getvalue()\n\n\nclass RGGPlot(PNG):\n \"\"\"\n An RGGPlot pane renders an r2py-based ggplot2 figure to png\n and wraps the base64-encoded data in a bokeh Div model.\n \"\"\"\n\n height = param.Integer(default=400)\n\n width = param.Integer(default=400)\n\n dpi = param.Integer(default=144, bounds=(1, None))\n\n _rerender_params = PNG._rerender_params + ['object', 'dpi', 'width', 'height']\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n return type(obj).__name__ == 'GGPlot' and hasattr(obj, 'r_repr')\n\n def _img(self):\n from rpy2 import robjects\n from rpy2.robjects.lib import grdevices\n with grdevices.render_to_bytesio(grdevices.png,\n type=\"cairo-png\", width=self.width, height=self.height,\n res=self.dpi, antialias=\"subpixel\") as b:\n robjects.r(\"print\")(self.object)\n return b.getvalue()\n\n\nclass YT(HTML):\n \"\"\"\n YT panes wrap plottable objects from the YT library.\n By default, the height and width are calculated by summing all\n contained plots, but can optionally be specified explicitly to\n provide additional space.\n \"\"\"\n\n priority: ClassVar[float | bool | None] = 0.5\n\n @classmethod\n def applies(cls, obj: bool) -> float | bool | None:\n return (getattr(obj, '__module__', '').startswith('yt.') and\n hasattr(obj, \"plots\") and\n hasattr(obj, \"_repr_html_\"))\n\n def _get_properties(self):\n p = super()._get_properties()\n if self.object is None:\n return p\n\n width = height = 0\n if self.width is None or self.height is None:\n for k,v in self.object.plots.items():\n if hasattr(v, \"_repr_png_\"):\n img = v._repr_png_()\n w,h = PNG._imgshape(img)\n height += h\n width = max(w, width)\n\n if self.width is None: p[\"width\"] = width\n if self.height is None: p[\"height\"] = height\n\n return p\n\n\nclass Folium(HTML):\n \"\"\"\n The Folium pane wraps Folium map components.\n \"\"\"\n\n sizing_mode = param.ObjectSelector(default='stretch_width', objects=[\n 'fixed', 'stretch_width', 'stretch_height', 'stretch_both',\n 'scale_width', 'scale_height', 'scale_both', None])\n\n priority: ClassVar[float | bool | None] = 0.6\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n return (getattr(obj, '__module__', '').startswith('folium.') and\n hasattr(obj, \"_repr_html_\"))\n\n def _get_properties(self):\n properties = super()._get_properties()\n text = '' if self.object is None else self.object\n if hasattr(text, '_repr_html_'):\n text = text._repr_html_().replace(FOLIUM_BEFORE, FOLIUM_AFTER)\n return dict(properties, text=escape(text))\n",
"path": "panel/pane/plot.py"
}
] | diff --git a/examples/reference/panes/HoloViews.ipynb b/examples/reference/panes/HoloViews.ipynb
index 18cd9f42d1..7e819f2754 100644
--- a/examples/reference/panes/HoloViews.ipynb
+++ b/examples/reference/panes/HoloViews.ipynb
@@ -208,6 +208,13 @@
"cell_type": "markdown",
"metadata": {},
"source": [
+ "Please note that in a *server context* you will also have to set the matplotlib backend like below\n",
+ "\n",
+ "```python\n",
+ "import matplotlib\n",
+ "matplotlib.use('agg')\n",
+ "```\n",
+ "\n",
"The ``backend``, like all other parameters, can be modified after the fact. To demonstrate, we can set up a select widget to toggle between backends for the above plot:"
]
},
@@ -224,9 +231,22 @@
}
],
"metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
"language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
"name": "python",
- "pygments_lexer": "ipython3"
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.7"
}
},
"nbformat": 4,
diff --git a/panel/pane/plot.py b/panel/pane/plot.py
index 852756900d..6fce60e8fd 100644
--- a/panel/pane/plot.py
+++ b/panel/pane/plot.py
@@ -292,7 +292,10 @@ def _update(self, ref: str, model: Model) -> None:
manager.canvas.draw_idle()
def _data(self):
- self.object.set_dpi(self.dpi)
+ try:
+ self.object.set_dpi(self.dpi)
+ except Exception as ex:
+ raise Exception("The Matplotlib backend is not configured. Try adding `matplotlib.use('agg')`") from ex
b = BytesIO()
if self.tight:
|
localstack__localstack-1217 | Docker image 0.8.7: S3 not working as anymore?
We had been using `localstack/localstack:0.8.7` from last few months, but suddenly the tests started failing. I am able to reproduce the issue.
**Docker compose:**
```
localstack:
image: localstack/localstack:0.8.7
ports:
- "8081:8080"
- "80:80"
networks:
skynet:
aliases:
- localstack
environment:
SERVICES: s3:80
DATA_DIR: /tmp/localstack/data
DEBUG: 1
```
On curl I am able to create, get and delete buckets as `curl -XPUT localhost:80/bigbrownfox` etc. Similar logs are present in docker container for localstack
However following test code using S3 client(software.amazon.awssdk.services.s3.S3Client) is failing:-
**Kotlin code**
```
private val region = Region.US_EAST_1
private val s3 = S3Client.builder()
.endpointOverride(URI("http://localhost:80"))
.region(region)
.credentialsProvider(StaticCredentialsProvider.create(AwsCredentials.create("foo", "bar")))
.build()
val BUCKET_NAME = "bigbrownfox"
private fun bucketExists(): Boolean {
return s3.listBuckets().buckets().count { bucket -> bucket.name() == BUCKET_NAME } == 1
}
fun createBucket() {
val createBucketRequest = CreateBucketRequest.builder()
.bucket(BUCKET_NAME)
.createBucketConfiguration(CreateBucketConfiguration.builder()
.locationConstraint(region.value())
.build())
.build()
//EXCEPTION HERE!!
s3.createBucket(createBucketRequest)
}
fun main(args: Array<String>) {
println("EXIST-> ${bucketExists()}")
createBucket()
}
```
1. `bucketExists()` is working but `createBucket()` is giving exception:-
```
Exception in thread "main" software.amazon.awssdk.core.exception.SdkClientException: Unable to execute HTTP request: The target server failed to respond
```
2. In the docker logs there is no `PUT` request logged, so either the request at localstack had a crash before it could log or S3 client never sent it. Given that S3 client is expecting a reply, it seems to be the first case.
3. The issue is correlated to the latest update on tag `0.8.7`
| [
{
"content": "import os\nimport sys\nimport ssl\nimport socket\nimport inspect\nimport logging\nimport traceback\nimport requests\nfrom flask_cors import CORS\nfrom requests.structures import CaseInsensitiveDict\nfrom requests.models import Response, Request\nfrom six import iteritems\nfrom six.moves.socketserver import ThreadingMixIn\nfrom six.moves.urllib.parse import urlparse\nfrom six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nfrom localstack.config import TMP_FOLDER, USE_SSL\nfrom localstack.constants import ENV_INTERNAL_TEST_RUN\nfrom localstack.utils.common import FuncThread, generate_ssl_cert, to_bytes\n\nQUIET = False\n\n# path for test certificate\nSERVER_CERT_PEM_FILE = '%s/server.test.pem' % (TMP_FOLDER)\n\n# CORS settings\nCORS_ALLOWED_HEADERS = ('authorization', 'content-type', 'content-md5', 'cache-control',\n 'x-amz-content-sha256', 'x-amz-date', 'x-amz-security-token', 'x-amz-user-agent')\nCORS_ALLOWED_METHODS = ('HEAD', 'GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH')\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle each request in a separate thread.\"\"\"\n daemon_threads = True\n\n\nclass ProxyListener(object):\n\n def forward_request(self, method, path, data, headers):\n \"\"\" This interceptor method is called by the proxy when receiving a new request\n (*before* forwarding the request to the backend service). It receives details\n of the incoming request, and returns either of the following results:\n\n * True if the request should be forwarded to the backend service as-is (default).\n * An integer (e.g., 200) status code to return directly to the client without\n calling the backend service.\n * An instance of requests.models.Response to return directly to the client without\n calling the backend service.\n * An instance of requests.models.Request which represents a new/modified request\n that will be forwarded to the backend service.\n * Any other value, in which case a 503 Bad Gateway is returned to the client\n without calling the backend service.\n \"\"\"\n return True\n\n def return_response(self, method, path, data, headers, response):\n \"\"\" This interceptor method is called by the proxy when returning a response\n (*after* having forwarded the request and received a response from the backend\n service). It receives details of the incoming request as well as the response\n from the backend service, and returns either of the following results:\n\n * An instance of requests.models.Response to return to the client instead of the\n actual response returned from the backend service.\n * Any other value, in which case the response from the backend service is\n returned to the client.\n \"\"\"\n return None\n\n\nclass GenericProxyHandler(BaseHTTPRequestHandler):\n\n # List of `ProxyListener` instances that are enabled by default for all requests\n DEFAULT_LISTENERS = []\n\n def __init__(self, request, client_address, server):\n self.request = request\n self.client_address = client_address\n self.server = server\n self.proxy = server.my_object\n self.data_bytes = None\n self.protocol_version = self.proxy.protocol_version\n BaseHTTPRequestHandler.__init__(self, request, client_address, server)\n\n def parse_request(self):\n result = BaseHTTPRequestHandler.parse_request(self)\n if not result:\n return result\n if sys.version_info[0] >= 3:\n return result\n # Required fix for Python 2 (otherwise S3 uploads are hanging), based on the Python 3 code:\n # https://sourcecodebrowser.com/python3.2/3.2.3/http_2server_8py_source.html#l00332\n expect = self.headers.get('Expect', '')\n if (expect.lower() == '100-continue' and\n self.protocol_version >= 'HTTP/1.1' and\n self.request_version >= 'HTTP/1.1'):\n if self.request_version != 'HTTP/0.9':\n self.wfile.write(('%s %d %s\\r\\n' %\n (self.protocol_version, 100, 'Continue')).encode('latin1', 'strict'))\n self.end_headers()\n return result\n\n def do_GET(self):\n self.method = requests.get\n self.read_content()\n self.forward('GET')\n\n def do_PUT(self):\n self.method = requests.put\n self.read_content()\n self.forward('PUT')\n\n def do_POST(self):\n self.method = requests.post\n self.read_content()\n self.forward('POST')\n\n def do_DELETE(self):\n self.data_bytes = None\n self.method = requests.delete\n self.forward('DELETE')\n\n def do_HEAD(self):\n self.data_bytes = None\n self.method = requests.head\n self.forward('HEAD')\n\n def do_PATCH(self):\n self.method = requests.patch\n self.read_content()\n self.forward('PATCH')\n\n def do_OPTIONS(self):\n self.data_bytes = None\n self.method = requests.options\n self.forward('OPTIONS')\n\n def read_content(self):\n content_length = self.headers.get('Content-Length')\n if content_length:\n self.data_bytes = self.rfile.read(int(content_length))\n else:\n self.data_bytes = None\n if self.method in (requests.post, requests.put):\n # If the Content-Length header is missing, try to read\n # content from the socket using a socket timeout.\n socket_timeout_secs = 0.5\n self.request.settimeout(socket_timeout_secs)\n while True:\n try:\n # TODO find a more efficient way to do this!\n tmp = self.rfile.read(1)\n if self.data_bytes is None:\n self.data_bytes = tmp\n else:\n self.data_bytes += tmp\n except socket.timeout:\n break\n\n def build_x_forwarded_for(self, headers):\n x_forwarded_for = headers.get('X-Forwarded-For')\n\n client_address = self.client_address[0]\n server_address = ':'.join(map(str, self.server.server_address))\n\n if x_forwarded_for:\n x_forwarded_for_list = (x_forwarded_for, client_address, server_address)\n else:\n x_forwarded_for_list = (client_address, server_address)\n\n return ', '.join(x_forwarded_for_list)\n\n def forward(self, method):\n path = self.path\n if '://' in path:\n path = '/' + path.split('://', 1)[1].split('/', 1)[1]\n proxy_url = '%s%s' % (self.proxy.forward_url, path)\n target_url = self.path\n if '://' not in target_url:\n target_url = '%s%s' % (self.proxy.forward_url, target_url)\n data = self.data_bytes\n\n forward_headers = CaseInsensitiveDict(self.headers)\n # update original \"Host\" header (moto s3 relies on this behavior)\n if not forward_headers.get('Host'):\n forward_headers['host'] = urlparse(target_url).netloc\n if 'localhost.atlassian.io' in forward_headers.get('Host'):\n forward_headers['host'] = 'localhost'\n\n forward_headers['X-Forwarded-For'] = self.build_x_forwarded_for(forward_headers)\n\n try:\n response = None\n modified_request = None\n # update listener (pre-invocation)\n for listener in self.DEFAULT_LISTENERS + [self.proxy.update_listener]:\n if not listener:\n continue\n listener_result = listener.forward_request(method=method,\n path=path, data=data, headers=forward_headers)\n if isinstance(listener_result, Response):\n response = listener_result\n break\n elif isinstance(listener_result, Request):\n modified_request = listener_result\n data = modified_request.data\n forward_headers = modified_request.headers\n break\n elif listener_result is not True:\n # get status code from response, or use Bad Gateway status code\n code = listener_result if isinstance(listener_result, int) else 503\n self.send_response(code)\n self.end_headers()\n return\n # perform the actual invocation of the backend service\n if response is None:\n if modified_request:\n response = self.method(proxy_url, data=modified_request.data,\n headers=modified_request.headers, stream=True)\n else:\n response = self.method(proxy_url, data=self.data_bytes,\n headers=forward_headers, stream=True)\n # prevent requests from processing response body\n if not response._content_consumed and response.raw:\n response._content = response.raw.read()\n # update listener (post-invocation)\n if self.proxy.update_listener:\n kwargs = {\n 'method': method,\n 'path': path,\n 'data': data,\n 'headers': forward_headers,\n 'response': response\n }\n if 'request_handler' in inspect.getargspec(self.proxy.update_listener.return_response)[0]:\n # some listeners (e.g., sqs_listener.py) require additional details like the original\n # request port, hence we pass in a reference to this request handler as well.\n kwargs['request_handler'] = self\n updated_response = self.proxy.update_listener.return_response(**kwargs)\n if isinstance(updated_response, Response):\n response = updated_response\n\n # copy headers and return response\n self.send_response(response.status_code)\n\n content_length_sent = False\n for header_key, header_value in iteritems(response.headers):\n # filter out certain headers that we don't want to transmit\n if header_key.lower() not in ('transfer-encoding', 'date', 'server'):\n self.send_header(header_key, header_value)\n content_length_sent = content_length_sent or header_key.lower() == 'content-length'\n if not content_length_sent:\n self.send_header('Content-Length', '%s' % len(response.content) if response.content else 0)\n\n # allow pre-flight CORS headers by default\n if 'Access-Control-Allow-Origin' not in response.headers:\n self.send_header('Access-Control-Allow-Origin', '*')\n if 'Access-Control-Allow-Methods' not in response.headers:\n self.send_header('Access-Control-Allow-Methods', ','.join(CORS_ALLOWED_METHODS))\n if 'Access-Control-Allow-Headers' not in response.headers:\n self.send_header('Access-Control-Allow-Headers', ','.join(CORS_ALLOWED_HEADERS))\n\n self.end_headers()\n if response.content and len(response.content):\n self.wfile.write(to_bytes(response.content))\n self.wfile.flush()\n except Exception as e:\n trace = str(traceback.format_exc())\n conn_errors = ('ConnectionRefusedError', 'NewConnectionError')\n conn_error = any(e in trace for e in conn_errors)\n error_msg = 'Error forwarding request: %s %s' % (e, trace)\n if 'Broken pipe' in trace:\n LOGGER.warn('Connection prematurely closed by client (broken pipe).')\n elif not self.proxy.quiet or not conn_error:\n LOGGER.error(error_msg)\n if os.environ.get(ENV_INTERNAL_TEST_RUN):\n # During a test run, we also want to print error messages, because\n # log messages are delayed until the entire test run is over, and\n # hence we are missing messages if the test hangs for some reason.\n print('ERROR: %s' % error_msg)\n self.send_response(502) # bad gateway\n self.end_headers()\n finally:\n # force close connection\n self.close_connection = 1\n\n def log_message(self, format, *args):\n return\n\n\nclass GenericProxy(FuncThread):\n def __init__(self, port, forward_url=None, ssl=False, host=None, update_listener=None, quiet=False, params={}):\n FuncThread.__init__(self, self.run_cmd, params, quiet=quiet)\n self.httpd = None\n self.port = port\n self.ssl = ssl\n self.quiet = quiet\n if forward_url:\n if '://' not in forward_url:\n forward_url = 'http://%s' % forward_url\n forward_url = forward_url.rstrip('/')\n self.forward_url = forward_url\n self.update_listener = update_listener\n self.server_stopped = False\n # Required to enable 'Connection: keep-alive' for S3 uploads\n self.protocol_version = params.get('protocol_version') or 'HTTP/1.1'\n self.listen_host = host or ''\n\n def run_cmd(self, params):\n try:\n self.httpd = ThreadedHTTPServer((self.listen_host, self.port), GenericProxyHandler)\n if self.ssl:\n # make sure we have a cert generated\n combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert()\n self.httpd.socket = ssl.wrap_socket(self.httpd.socket,\n server_side=True, certfile=combined_file)\n self.httpd.my_object = self\n self.httpd.serve_forever()\n except Exception as e:\n if not self.quiet or not self.server_stopped:\n LOGGER.error('Exception running proxy on port %s: %s %s' % (self.port, e, traceback.format_exc()))\n\n def stop(self, quiet=False):\n self.quiet = quiet\n if self.httpd:\n self.httpd.server_close()\n self.server_stopped = True\n\n @classmethod\n def create_ssl_cert(cls, random=True):\n return generate_ssl_cert(SERVER_CERT_PEM_FILE, random=random)\n\n @classmethod\n def get_flask_ssl_context(cls):\n if USE_SSL:\n combined_file, cert_file_name, key_file_name = cls.create_ssl_cert()\n return (cert_file_name, key_file_name)\n return None\n\n\ndef serve_flask_app(app, port, quiet=True, host=None, cors=True):\n if cors:\n CORS(app)\n if quiet:\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)\n if not host:\n host = '0.0.0.0'\n ssl_context = GenericProxy.get_flask_ssl_context()\n app.run(port=int(port), threaded=True, host=host, ssl_context=ssl_context)\n return app\n",
"path": "localstack/services/generic_proxy.py"
}
] | [
{
"content": "import os\nimport sys\nimport ssl\nimport socket\nimport inspect\nimport logging\nimport traceback\nimport requests\nfrom flask_cors import CORS\nfrom requests.structures import CaseInsensitiveDict\nfrom requests.models import Response, Request\nfrom six import iteritems\nfrom six.moves.socketserver import ThreadingMixIn\nfrom six.moves.urllib.parse import urlparse\nfrom six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nfrom localstack.config import TMP_FOLDER, USE_SSL\nfrom localstack.constants import ENV_INTERNAL_TEST_RUN\nfrom localstack.utils.common import FuncThread, generate_ssl_cert, to_bytes\n\nQUIET = False\n\n# path for test certificate\nSERVER_CERT_PEM_FILE = '%s/server.test.pem' % (TMP_FOLDER)\n\n# CORS settings\nCORS_ALLOWED_HEADERS = ('authorization', 'content-type', 'content-md5', 'cache-control',\n 'x-amz-content-sha256', 'x-amz-date', 'x-amz-security-token', 'x-amz-user-agent')\nCORS_ALLOWED_METHODS = ('HEAD', 'GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH')\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle each request in a separate thread.\"\"\"\n daemon_threads = True\n\n\nclass ProxyListener(object):\n\n def forward_request(self, method, path, data, headers):\n \"\"\" This interceptor method is called by the proxy when receiving a new request\n (*before* forwarding the request to the backend service). It receives details\n of the incoming request, and returns either of the following results:\n\n * True if the request should be forwarded to the backend service as-is (default).\n * An integer (e.g., 200) status code to return directly to the client without\n calling the backend service.\n * An instance of requests.models.Response to return directly to the client without\n calling the backend service.\n * An instance of requests.models.Request which represents a new/modified request\n that will be forwarded to the backend service.\n * Any other value, in which case a 503 Bad Gateway is returned to the client\n without calling the backend service.\n \"\"\"\n return True\n\n def return_response(self, method, path, data, headers, response):\n \"\"\" This interceptor method is called by the proxy when returning a response\n (*after* having forwarded the request and received a response from the backend\n service). It receives details of the incoming request as well as the response\n from the backend service, and returns either of the following results:\n\n * An instance of requests.models.Response to return to the client instead of the\n actual response returned from the backend service.\n * Any other value, in which case the response from the backend service is\n returned to the client.\n \"\"\"\n return None\n\n\nclass GenericProxyHandler(BaseHTTPRequestHandler):\n\n # List of `ProxyListener` instances that are enabled by default for all requests\n DEFAULT_LISTENERS = []\n\n def __init__(self, request, client_address, server):\n self.request = request\n self.client_address = client_address\n self.server = server\n self.proxy = server.my_object\n self.data_bytes = None\n self.protocol_version = self.proxy.protocol_version\n BaseHTTPRequestHandler.__init__(self, request, client_address, server)\n\n def parse_request(self):\n result = BaseHTTPRequestHandler.parse_request(self)\n if not result:\n return result\n if sys.version_info[0] >= 3:\n return result\n # Required fix for Python 2 (otherwise S3 uploads are hanging), based on the Python 3 code:\n # https://sourcecodebrowser.com/python3.2/3.2.3/http_2server_8py_source.html#l00332\n expect = self.headers.get('Expect', '')\n if (expect.lower() == '100-continue' and\n self.protocol_version >= 'HTTP/1.1' and\n self.request_version >= 'HTTP/1.1'):\n if self.request_version != 'HTTP/0.9':\n self.wfile.write(('%s %d %s\\r\\n' %\n (self.protocol_version, 100, 'Continue')).encode('latin1', 'strict'))\n self.end_headers()\n return result\n\n def do_GET(self):\n self.method = requests.get\n self.read_content()\n self.forward('GET')\n\n def do_PUT(self):\n self.method = requests.put\n self.read_content()\n self.forward('PUT')\n\n def do_POST(self):\n self.method = requests.post\n self.read_content()\n self.forward('POST')\n\n def do_DELETE(self):\n self.data_bytes = None\n self.method = requests.delete\n self.forward('DELETE')\n\n def do_HEAD(self):\n self.data_bytes = None\n self.method = requests.head\n self.forward('HEAD')\n\n def do_PATCH(self):\n self.method = requests.patch\n self.read_content()\n self.forward('PATCH')\n\n def do_OPTIONS(self):\n self.data_bytes = None\n self.method = requests.options\n self.forward('OPTIONS')\n\n def read_content(self):\n content_length = self.headers.get('Content-Length')\n if content_length:\n self.data_bytes = self.rfile.read(int(content_length))\n else:\n self.data_bytes = None\n if self.method in (requests.post, requests.put):\n # If the Content-Length header is missing, try to read\n # content from the socket using a socket timeout.\n socket_timeout_secs = 0.5\n self.request.settimeout(socket_timeout_secs)\n while True:\n try:\n # TODO find a more efficient way to do this!\n tmp = self.rfile.read(1)\n if self.data_bytes is None:\n self.data_bytes = tmp\n else:\n self.data_bytes += tmp\n except socket.timeout:\n break\n\n def build_x_forwarded_for(self, headers):\n x_forwarded_for = headers.get('X-Forwarded-For')\n\n client_address = self.client_address[0]\n server_address = ':'.join(map(str, self.server.server_address))\n\n if x_forwarded_for:\n x_forwarded_for_list = (x_forwarded_for, client_address, server_address)\n else:\n x_forwarded_for_list = (client_address, server_address)\n\n return ', '.join(x_forwarded_for_list)\n\n def forward(self, method):\n path = self.path\n if '://' in path:\n path = '/' + path.split('://', 1)[1].split('/', 1)[1]\n proxy_url = '%s%s' % (self.proxy.forward_url, path)\n target_url = self.path\n if '://' not in target_url:\n target_url = '%s%s' % (self.proxy.forward_url, target_url)\n data = self.data_bytes\n\n forward_headers = CaseInsensitiveDict(self.headers)\n # update original \"Host\" header (moto s3 relies on this behavior)\n if not forward_headers.get('Host'):\n forward_headers['host'] = urlparse(target_url).netloc\n if 'localhost.atlassian.io' in forward_headers.get('Host'):\n forward_headers['host'] = 'localhost'\n\n forward_headers['X-Forwarded-For'] = self.build_x_forwarded_for(forward_headers)\n\n try:\n response = None\n modified_request = None\n # update listener (pre-invocation)\n for listener in self.DEFAULT_LISTENERS + [self.proxy.update_listener]:\n if not listener:\n continue\n listener_result = listener.forward_request(method=method,\n path=path, data=data, headers=forward_headers)\n if isinstance(listener_result, Response):\n response = listener_result\n break\n elif isinstance(listener_result, Request):\n modified_request = listener_result\n data = modified_request.data\n forward_headers = modified_request.headers\n break\n elif listener_result is not True:\n # get status code from response, or use Bad Gateway status code\n code = listener_result if isinstance(listener_result, int) else 503\n self.send_response(code)\n self.end_headers()\n return\n # perform the actual invocation of the backend service\n if response is None:\n if modified_request:\n response = self.method(proxy_url, data=modified_request.data,\n headers=modified_request.headers, stream=True)\n else:\n response = self.method(proxy_url, data=self.data_bytes,\n headers=forward_headers, stream=True)\n # prevent requests from processing response body\n if not response._content_consumed and response.raw:\n response._content = response.raw.read()\n # update listener (post-invocation)\n if self.proxy.update_listener:\n kwargs = {\n 'method': method,\n 'path': path,\n 'data': data,\n 'headers': forward_headers,\n 'response': response\n }\n if 'request_handler' in inspect.getargspec(self.proxy.update_listener.return_response)[0]:\n # some listeners (e.g., sqs_listener.py) require additional details like the original\n # request port, hence we pass in a reference to this request handler as well.\n kwargs['request_handler'] = self\n updated_response = self.proxy.update_listener.return_response(**kwargs)\n if isinstance(updated_response, Response):\n response = updated_response\n\n # copy headers and return response\n self.send_response(response.status_code)\n\n content_length_sent = False\n for header_key, header_value in iteritems(response.headers):\n # filter out certain headers that we don't want to transmit\n if header_key.lower() not in ('transfer-encoding', 'date', 'server'):\n self.send_header(header_key, header_value)\n content_length_sent = content_length_sent or header_key.lower() == 'content-length'\n if not content_length_sent:\n self.send_header('Content-Length', '%s' % len(response.content) if response.content else 0)\n\n # allow pre-flight CORS headers by default\n if 'Access-Control-Allow-Origin' not in response.headers:\n self.send_header('Access-Control-Allow-Origin', '*')\n if 'Access-Control-Allow-Methods' not in response.headers:\n self.send_header('Access-Control-Allow-Methods', ','.join(CORS_ALLOWED_METHODS))\n if 'Access-Control-Allow-Headers' not in response.headers:\n self.send_header('Access-Control-Allow-Headers', ','.join(CORS_ALLOWED_HEADERS))\n\n self.end_headers()\n if response.content and len(response.content):\n self.wfile.write(to_bytes(response.content))\n self.wfile.flush()\n except Exception as e:\n trace = str(traceback.format_exc())\n conn_errors = ('ConnectionRefusedError', 'NewConnectionError')\n conn_error = any(e in trace for e in conn_errors)\n error_msg = 'Error forwarding request: %s %s' % (e, trace)\n if 'Broken pipe' in trace:\n LOGGER.warn('Connection prematurely closed by client (broken pipe).')\n elif not self.proxy.quiet or not conn_error:\n LOGGER.error(error_msg)\n if os.environ.get(ENV_INTERNAL_TEST_RUN):\n # During a test run, we also want to print error messages, because\n # log messages are delayed until the entire test run is over, and\n # hence we are missing messages if the test hangs for some reason.\n print('ERROR: %s' % error_msg)\n self.send_response(502) # bad gateway\n self.end_headers()\n # force close connection\n self.close_connection = 1\n\n def log_message(self, format, *args):\n return\n\n\nclass GenericProxy(FuncThread):\n def __init__(self, port, forward_url=None, ssl=False, host=None, update_listener=None, quiet=False, params={}):\n FuncThread.__init__(self, self.run_cmd, params, quiet=quiet)\n self.httpd = None\n self.port = port\n self.ssl = ssl\n self.quiet = quiet\n if forward_url:\n if '://' not in forward_url:\n forward_url = 'http://%s' % forward_url\n forward_url = forward_url.rstrip('/')\n self.forward_url = forward_url\n self.update_listener = update_listener\n self.server_stopped = False\n # Required to enable 'Connection: keep-alive' for S3 uploads\n self.protocol_version = params.get('protocol_version') or 'HTTP/1.1'\n self.listen_host = host or ''\n\n def run_cmd(self, params):\n try:\n self.httpd = ThreadedHTTPServer((self.listen_host, self.port), GenericProxyHandler)\n if self.ssl:\n # make sure we have a cert generated\n combined_file, cert_file_name, key_file_name = GenericProxy.create_ssl_cert()\n self.httpd.socket = ssl.wrap_socket(self.httpd.socket,\n server_side=True, certfile=combined_file)\n self.httpd.my_object = self\n self.httpd.serve_forever()\n except Exception as e:\n if not self.quiet or not self.server_stopped:\n LOGGER.error('Exception running proxy on port %s: %s %s' % (self.port, e, traceback.format_exc()))\n\n def stop(self, quiet=False):\n self.quiet = quiet\n if self.httpd:\n self.httpd.server_close()\n self.server_stopped = True\n\n @classmethod\n def create_ssl_cert(cls, random=True):\n return generate_ssl_cert(SERVER_CERT_PEM_FILE, random=random)\n\n @classmethod\n def get_flask_ssl_context(cls):\n if USE_SSL:\n combined_file, cert_file_name, key_file_name = cls.create_ssl_cert()\n return (cert_file_name, key_file_name)\n return None\n\n\ndef serve_flask_app(app, port, quiet=True, host=None, cors=True):\n if cors:\n CORS(app)\n if quiet:\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)\n if not host:\n host = '0.0.0.0'\n ssl_context = GenericProxy.get_flask_ssl_context()\n app.run(port=int(port), threaded=True, host=host, ssl_context=ssl_context)\n return app\n",
"path": "localstack/services/generic_proxy.py"
}
] | diff --git a/localstack/services/generic_proxy.py b/localstack/services/generic_proxy.py
index 9b15094a2f77a..cbe9904d1ae0b 100644
--- a/localstack/services/generic_proxy.py
+++ b/localstack/services/generic_proxy.py
@@ -281,7 +281,6 @@ def forward(self, method):
print('ERROR: %s' % error_msg)
self.send_response(502) # bad gateway
self.end_headers()
- finally:
# force close connection
self.close_connection = 1
diff --git a/tests/integration/test_lambda.py b/tests/integration/test_lambda.py
index 02ba12358aa9d..7156bc5874392 100644
--- a/tests/integration/test_lambda.py
+++ b/tests/integration/test_lambda.py
@@ -4,6 +4,7 @@
import time
import unittest
from io import BytesIO
+from requests.models import Response
from localstack import config
from localstack.constants import LOCALSTACK_ROOT_FOLDER, LOCALSTACK_MAVEN_VERSION
from localstack.utils import testutil
@@ -85,6 +86,10 @@ def test_forward_to_fallback_url_http(self):
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append(data)
+ response = Response()
+ response.status_code = 200
+ response._content = ''
+ return response
records = []
local_port = get_free_tcp_port()
|
google__turbinia-1099 | plaso VSS option incorrect
https://github.com/log2timeline/plaso/blob/9cc50c972b257d6cbbea38fa8b39f0bf027e0960/plaso/cli/storage_media_tool.py#L581
^ option should be --no_vss in below location
https://github.com/google/turbinia/blob/86158a95a0b134978628c1680d0997667ec7c935/turbinia/workers/plaso.py#L43
Please check how this will work if recipes pass in the --vss_stores option
| [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to extract binary files from an evidence object provided.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport json\nimport os\nimport textwrap\n\nfrom turbinia import TurbiniaException\nfrom turbinia import config\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import BinaryExtraction\n\n\nclass BinaryExtractorTask(TurbiniaTask):\n \"\"\"Extract binaries out of evidence and provide JSON file with hashes.\n\n Attributes:\n json_path(str): path to output JSON file.\n binary_extraction_dir(str): path to extraction directory.\n \"\"\"\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {\n # This is an arbitrary path that will be put into a custom artifact\n # definition so that the files at this path are extracted. See the path\n # specification format in the ForensicArtifacts documentation:\n # https://artifacts.readthedocs.io/en/latest/sources/Format-specification.html\n 'binary_extraction_path': None\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes BinaryExtractorTask.\"\"\"\n super(BinaryExtractorTask, self).__init__(*args, **kwargs)\n self.json_path = None\n self.binary_extraction_dir = None\n\n def check_extraction(self):\n \"\"\"Checks counts for extracted binaries and hashes.\n\n Returns:\n Tuple(\n binary_cnt(int): Number of extracted binaries.\n hash_cnt(int): Number of extracted hashes.\n )\n \"\"\"\n\n # Check if hashes.json file was generated.\n if not os.path.exists(self.json_path):\n raise TurbiniaException(\n 'The file {0:s} was not found. Please ensure you '\n 'have Plaso version 20191203 or greater deployed'.format(\n self.json_path))\n\n with open(self.json_path) as json_file:\n hashes = json.load(json_file)\n\n binary_cnt = sum(\n len(files) for _, _, files in os.walk(self.binary_extraction_dir)) - 1\n hash_cnt = len(hashes)\n\n return (binary_cnt, hash_cnt)\n\n def run(self, evidence, result):\n \"\"\"Task that extracts binaries with image_export.py.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n config.LoadConfig()\n binary_extraction_evidence = BinaryExtraction()\n\n binary_extraction_evidence.local_path = self.output_dir\n binary_extraction_evidence.uncompressed_directory = self.output_dir\n image_export_log = os.path.join(self.output_dir, 'binary_extraction.log')\n self.binary_extraction_dir = os.path.join(\n self.output_dir, 'extracted_binaries')\n self.json_path = os.path.join(self.binary_extraction_dir, 'hashes.json')\n\n cmd = [\n 'image_export.py', '--partitions', 'all', '--volumes', 'all',\n '--no_vss', '--unattended', '--logfile', image_export_log\n ]\n\n if self.task_config.get('binary_extraction_path'):\n artifact_dir = os.path.join(self.tmp_dir, 'artifacts')\n artifact_file = os.path.join(artifact_dir, 'artifacts.yaml')\n os.mkdir(artifact_dir)\n binary_extraction_path = self.task_config.get('binary_extraction_path')\n result.log(\n 'Using custom artifact path {0:s}'.format(binary_extraction_path))\n\n artifact_text = textwrap.dedent(\n \"\"\"\n name: TurbiniaCustomArtifact\n doc: Ad hoc artifact created for file extraction.\n sources:\n - type: FILE\n attributes:\n paths: ['{0:s}']\n \"\"\")\n artifact_text = artifact_text.format(binary_extraction_path)\n\n with open(artifact_file, 'wb') as artifact:\n artifact.write(artifact_text.encode('utf-8'))\n cmd.extend([\n '--custom_artifact_definitions', artifact_file, '--artifact_filters',\n 'TurbiniaCustomArtifact'\n ])\n else:\n cmd.extend(['--signatures', 'elf,exe_mz'])\n\n if evidence.credentials:\n for credential_type, credential_data in evidence.credentials:\n cmd.extend([\n '--credential', '{0:s}:{1:s}'.format(\n credential_type, credential_data)\n ])\n\n if config.DEBUG_TASKS or self.task_config.get('debug_tasks'):\n cmd.append('-d')\n cmd.extend(['-w', self.binary_extraction_dir, evidence.local_path])\n\n result.log('Running image_export as [{0:s}]'.format(' '.join(cmd)))\n self.execute(\n cmd, result, log_files=[image_export_log, self.json_path],\n new_evidence=[binary_extraction_evidence])\n\n try:\n binary_cnt, hash_cnt = self.check_extraction()\n except TurbiniaException as exception:\n message = 'File extraction failed: {0!s}'.format(exception)\n result.close(self, success=False, status=message)\n return result\n\n status = (\n 'Extracted {0:d} hashes and {1:d} files from the '\n 'evidence.'.format(hash_cnt, binary_cnt))\n\n if hash_cnt != binary_cnt:\n result.log(\n 'Number of extracted binaries is not equal to the number '\n 'of extracted hashes. This might indicate issues with '\n 'image_export.py. Check binary_extraction.log for more '\n 'details.', logging.WARNING)\n\n binary_extraction_evidence.compress()\n result.close(self, success=True, status=status)\n\n return result\n",
"path": "turbinia/workers/binary_extractor.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to extract binary files from an evidence object provided.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport json\nimport os\nimport textwrap\n\nfrom turbinia import TurbiniaException\nfrom turbinia import config\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import BinaryExtraction\n\n\nclass BinaryExtractorTask(TurbiniaTask):\n \"\"\"Extract binaries out of evidence and provide JSON file with hashes.\n\n Attributes:\n json_path(str): path to output JSON file.\n binary_extraction_dir(str): path to extraction directory.\n \"\"\"\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {\n # This is an arbitrary path that will be put into a custom artifact\n # definition so that the files at this path are extracted. See the path\n # specification format in the ForensicArtifacts documentation:\n # https://artifacts.readthedocs.io/en/latest/sources/Format-specification.html\n 'binary_extraction_path': None\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes BinaryExtractorTask.\"\"\"\n super(BinaryExtractorTask, self).__init__(*args, **kwargs)\n self.json_path = None\n self.binary_extraction_dir = None\n\n def check_extraction(self):\n \"\"\"Checks counts for extracted binaries and hashes.\n\n Returns:\n Tuple(\n binary_cnt(int): Number of extracted binaries.\n hash_cnt(int): Number of extracted hashes.\n )\n \"\"\"\n\n # Check if hashes.json file was generated.\n if not os.path.exists(self.json_path):\n raise TurbiniaException(\n 'The file {0:s} was not found. Please ensure you '\n 'have Plaso version 20191203 or greater deployed'.format(\n self.json_path))\n\n with open(self.json_path) as json_file:\n hashes = json.load(json_file)\n\n binary_cnt = sum(\n len(files) for _, _, files in os.walk(self.binary_extraction_dir)) - 1\n hash_cnt = len(hashes)\n\n return (binary_cnt, hash_cnt)\n\n def run(self, evidence, result):\n \"\"\"Task that extracts binaries with image_export.py.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n config.LoadConfig()\n binary_extraction_evidence = BinaryExtraction()\n\n binary_extraction_evidence.local_path = self.output_dir\n binary_extraction_evidence.uncompressed_directory = self.output_dir\n image_export_log = os.path.join(self.output_dir, 'binary_extraction.log')\n self.binary_extraction_dir = os.path.join(\n self.output_dir, 'extracted_binaries')\n self.json_path = os.path.join(self.binary_extraction_dir, 'hashes.json')\n\n cmd = [\n 'image_export.py', '--partitions', 'all', '--volumes', 'all',\n '--vss_stores', 'none', '--unattended', '--logfile', image_export_log\n ]\n\n if self.task_config.get('binary_extraction_path'):\n artifact_dir = os.path.join(self.tmp_dir, 'artifacts')\n artifact_file = os.path.join(artifact_dir, 'artifacts.yaml')\n os.mkdir(artifact_dir)\n binary_extraction_path = self.task_config.get('binary_extraction_path')\n result.log(\n 'Using custom artifact path {0:s}'.format(binary_extraction_path))\n\n artifact_text = textwrap.dedent(\n \"\"\"\n name: TurbiniaCustomArtifact\n doc: Ad hoc artifact created for file extraction.\n sources:\n - type: FILE\n attributes:\n paths: ['{0:s}']\n \"\"\")\n artifact_text = artifact_text.format(binary_extraction_path)\n\n with open(artifact_file, 'wb') as artifact:\n artifact.write(artifact_text.encode('utf-8'))\n cmd.extend([\n '--custom_artifact_definitions', artifact_file, '--artifact_filters',\n 'TurbiniaCustomArtifact'\n ])\n else:\n cmd.extend(['--signatures', 'elf,exe_mz'])\n\n if evidence.credentials:\n for credential_type, credential_data in evidence.credentials:\n cmd.extend([\n '--credential', '{0:s}:{1:s}'.format(\n credential_type, credential_data)\n ])\n\n if config.DEBUG_TASKS or self.task_config.get('debug_tasks'):\n cmd.append('-d')\n cmd.extend(['-w', self.binary_extraction_dir, evidence.local_path])\n\n result.log('Running image_export as [{0:s}]'.format(' '.join(cmd)))\n self.execute(\n cmd, result, log_files=[image_export_log, self.json_path],\n new_evidence=[binary_extraction_evidence])\n\n try:\n binary_cnt, hash_cnt = self.check_extraction()\n except TurbiniaException as exception:\n message = 'File extraction failed: {0!s}'.format(exception)\n result.close(self, success=False, status=message)\n return result\n\n status = (\n 'Extracted {0:d} hashes and {1:d} files from the '\n 'evidence.'.format(hash_cnt, binary_cnt))\n\n if hash_cnt != binary_cnt:\n result.log(\n 'Number of extracted binaries is not equal to the number '\n 'of extracted hashes. This might indicate issues with '\n 'image_export.py. Check binary_extraction.log for more '\n 'details.', logging.WARNING)\n\n binary_extraction_evidence.compress()\n result.close(self, success=True, status=status)\n\n return result\n",
"path": "turbinia/workers/binary_extractor.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index a05f592c0..46491ba1e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,7 +20,7 @@ psq
pyparsing<3
pyyaml>=5.4.1
pyOpenSSL<=21.0.0
-redis
+redis<4.2
six>=1.15.0
urllib3[secure]
vine>=5.0.0
diff --git a/turbinia/e2e/e2e-local.sh b/turbinia/e2e/e2e-local.sh
index 5208b641d..e740f3232 100755
--- a/turbinia/e2e/e2e-local.sh
+++ b/turbinia/e2e/e2e-local.sh
@@ -58,5 +58,10 @@ docker logs turbinia-worker
echo "==> Show evidence volume contents in worker"
docker exec -t turbinia-worker ls -al /evidence/
+docker exec -t turbinia-worker find /evidence -ls
+
+echo "==> Show PlasoTask logs"
+for i in cat `docker exec turbinia-server turbiniactl -a status -r 123456789|grep -Eo '*/evidence/123456789/.*PlasoTask.*txt'`; do docker exec turbinia-worker cat $i; done
+
exit $RET
diff --git a/turbinia/workers/binary_extractor.py b/turbinia/workers/binary_extractor.py
index 619cd52b4..d631f1071 100644
--- a/turbinia/workers/binary_extractor.py
+++ b/turbinia/workers/binary_extractor.py
@@ -101,7 +101,7 @@ def run(self, evidence, result):
cmd = [
'image_export.py', '--partitions', 'all', '--volumes', 'all',
- '--no_vss', '--unattended', '--logfile', image_export_log
+ '--vss_stores', 'none', '--unattended', '--logfile', image_export_log
]
if self.task_config.get('binary_extraction_path'):
|
interactions-py__interactions.py-174 | Editing SlashMessage should support multiple embeds
Calling `discord_slash.model.SlashMessage.edit` with an `embeds` kwarg does not work. However the official Discord API documentation says that it should be possible [when editing interaction messages](https://discord.com/developers/docs/interactions/slash-commands#edit-original-interaction-response).
| [
{
"content": "import asyncio\nimport discord\nfrom enum import IntEnum\nfrom contextlib import suppress\nfrom inspect import iscoroutinefunction\nfrom . import http\nfrom . import error\n\n\nclass ChoiceData:\n \"\"\"\n Command choice data object\n\n :ivar name: Name of the choice, this is what the user will see\n :ivar value: Values of the choice, this is what discord will return to you\n \"\"\"\n\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def __eq__(self, other):\n return isinstance(other, ChoiceData) and self.__dict__ == other.__dict__\n\n\nclass OptionData:\n \"\"\"\n Command option data object\n\n :ivar name: Name of the option.\n :ivar description: Description of the option.\n :ivar required: If the option is required.\n :ivar choices: A list of :class:`ChoiceData`, cannot be present on subcommand groups\n :ivar options: List of :class:`OptionData`, this will be present if it's a subcommand group\n \"\"\"\n\n def __init__(\n self, name, description, required=False, choices=None, options=None, **kwargs\n ):\n self.name = name\n self.description = description\n self.type = kwargs.get(\"type\")\n if self.type is None:\n raise error.IncorrectCommandData(\"type is required for options\")\n self.required = required\n if choices is not None:\n self.choices = []\n for choice in choices:\n self.choices.append(ChoiceData(**choice))\n else:\n self.choices = None\n\n if self.type in (1, 2):\n self.options = []\n if options is not None:\n for option in options:\n self.options.append(OptionData(**option))\n elif self.type == 2:\n raise error.IncorrectCommandData(\n \"Options are required for subcommands / subcommand groups\"\n )\n\n def __eq__(self, other):\n return isinstance(other, OptionData) and self.__dict__ == other.__dict__\n\n\nclass CommandData:\n \"\"\"\n Slash command data object\n\n :ivar name: Name of the command.\n :ivar description: Description of the command.\n :ivar options: List of :class:`OptionData`.\n :ivar id: Command id, this is received from discord so may not be present\n \"\"\"\n\n def __init__(\n self, name, description, options=None, id=None, application_id=None, version=None, **kwargs\n ):\n self.name = name\n self.description = description\n self.id = id\n self.application_id = application_id\n self.version = version\n if options is not None:\n self.options = []\n for option in options:\n self.options.append(OptionData(**option))\n else:\n self.options = None\n\n def __eq__(self, other):\n if isinstance(other, CommandData):\n return (\n self.name == other.name\n and self.description == other.description\n and self.options == other.options\n )\n else:\n return False\n\n\nclass CommandObject:\n \"\"\"\n Slash command object of this extension.\n\n .. warning::\n Do not manually init this model.\n\n :ivar name: Name of the command.\n :ivar func: The coroutine of the command.\n :ivar description: Description of the command.\n :ivar allowed_guild_ids: List of the allowed guild id.\n :ivar options: List of the option of the command. Used for `auto_register`.\n :ivar connector: Kwargs connector of the command.\n :ivar __commands_checks__: Check of the command.\n \"\"\"\n\n def __init__(self, name, cmd): # Let's reuse old command formatting.\n self.name = name.lower()\n self.func = cmd[\"func\"]\n self.description = cmd[\"description\"]\n self.allowed_guild_ids = cmd[\"guild_ids\"] or []\n self.options = cmd[\"api_options\"] or []\n self.connector = cmd[\"connector\"] or {}\n self.has_subcommands = cmd[\"has_subcommands\"]\n # Ref https://github.com/Rapptz/discord.py/blob/master/discord/ext/commands/core.py#L1447\n # Since this isn't inherited from `discord.ext.commands.Command`, discord.py's check decorator will\n # add checks at this var.\n self.__commands_checks__ = []\n if hasattr(self.func, '__commands_checks__'):\n self.__commands_checks__ = self.func.__commands_checks__\n\n async def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes the command.\n\n :param args: Args for the command.\n :raises: .error.CheckFailure\n \"\"\"\n can_run = await self.can_run(args[0])\n if not can_run:\n raise error.CheckFailure\n\n return await self.func(*args, **kwargs)\n\n def add_check(self, func):\n \"\"\"\n Adds check to the command.\n\n :param func: Any callable. Coroutines are supported.\n \"\"\"\n self.__commands_checks__.append(func)\n\n def remove_check(self, func):\n \"\"\"\n Removes check to the command.\n\n .. note::\n If the function is not found at the command check, it will ignore.\n\n :param func: Any callable. Coroutines are supported.\n \"\"\"\n with suppress(ValueError):\n self.__commands_checks__.remove(func)\n\n async def can_run(self, ctx) -> bool:\n \"\"\"\n Whether the command can be run.\n\n :param ctx: SlashContext for the check running.\n :type ctx: .context.SlashContext\n :return: bool\n \"\"\"\n res = [bool(x(ctx)) if not iscoroutinefunction(x) else bool(await x(ctx)) for x in self.__commands_checks__]\n return False not in res\n\n\nclass SubcommandObject(CommandObject):\n \"\"\"\n Subcommand object of this extension.\n\n .. note::\n This model inherits :class:`.model.CommandObject`, so this has every variables from that.\n\n .. warning::\n Do not manually init this model.\n\n :ivar base: Name of the base slash command.\n :ivar subcommand_group: Name of the subcommand group. ``None`` if not exist.\n :ivar base_description: Description of the base command.\n :ivar subcommand_group_description: Description of the subcommand_group.\n \"\"\"\n\n def __init__(self, sub, base, name, sub_group=None):\n sub[\"has_subcommands\"] = True # For the inherited class.\n super().__init__(name, sub)\n self.base = base.lower()\n self.subcommand_group = sub_group.lower() if sub_group else sub_group\n self.base_description = sub[\"base_desc\"]\n self.subcommand_group_description = sub[\"sub_group_desc\"]\n\n\nclass CogCommandObject(CommandObject):\n \"\"\"\n Slash command object but for Cog.\n\n .. warning::\n Do not manually init this model.\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n self.cog = None # Manually set this later.\n\n async def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes the command.\n\n :param args: Args for the command.\n :raises: .error.CheckFailure\n \"\"\"\n can_run = await self.can_run(args[0])\n if not can_run:\n raise error.CheckFailure\n\n return await self.func(self.cog, *args, **kwargs)\n\n\nclass CogSubcommandObject(SubcommandObject):\n \"\"\"\n Subcommand object but for Cog.\n\n .. warning::\n Do not manually init this model.\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n self.cog = None # Manually set this later.\n\n async def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes the command.\n\n :param args: Args for the command.\n :raises: .error.CheckFailure\n \"\"\"\n can_run = await self.can_run(args[0])\n if not can_run:\n raise error.CheckFailure\n\n return await self.func(self.cog, *args, **kwargs)\n\n\nclass SlashCommandOptionType(IntEnum):\n \"\"\"\n Equivalent of `ApplicationCommandOptionType <https://discord.com/developers/docs/interactions/slash-commands#applicationcommandoptiontype>`_ in the Discord API.\n \"\"\"\n SUB_COMMAND = 1\n SUB_COMMAND_GROUP = 2\n STRING = 3\n INTEGER = 4\n BOOLEAN = 5\n USER = 6\n CHANNEL = 7\n ROLE = 8\n\n @classmethod\n def from_type(cls, t: type):\n \"\"\"\n Get a specific SlashCommandOptionType from a type (or object).\n\n :param t: The type or object to get a SlashCommandOptionType for.\n :return: :class:`.model.SlashCommandOptionType` or ``None``\n \"\"\"\n if issubclass(t, str): return cls.STRING\n if issubclass(t, bool): return cls.BOOLEAN\n # The check for bool MUST be above the check for integers as booleans subclass integers\n if issubclass(t, int): return cls.INTEGER\n if issubclass(t, discord.abc.User): return cls.USER\n if issubclass(t, discord.abc.GuildChannel): return cls.CHANNEL\n if issubclass(t, discord.abc.Role): return cls.ROLE\n\n\nclass SlashMessage(discord.Message):\n \"\"\"discord.py's :class:`discord.Message` but overridden ``edit`` and ``delete`` to work for slash command.\"\"\"\n\n def __init__(self, *, state, channel, data, _http: http.SlashCommandRequest, interaction_token):\n # Yes I know it isn't the best way but this makes implementation simple.\n super().__init__(state=state, channel=channel, data=data)\n self._http = _http\n self.__interaction_token = interaction_token\n\n async def _slash_edit(self, **fields):\n \"\"\"\n An internal function\n \"\"\"\n _resp = {}\n\n content = fields.get(\"content\")\n if content:\n _resp[\"content\"] = str(content)\n\n embed = fields.get(\"embed\")\n embeds = fields.get(\"embeds\")\n file = fields.get(\"file\")\n files = fields.get(\"files\")\n\n if embed and embeds:\n raise error.IncorrectFormat(\"You can't use both `embed` and `embeds`!\")\n if file and files:\n raise error.IncorrectFormat(\"You can't use both `file` and `files`!\")\n if file:\n files = [file]\n if embed:\n embeds = [embed]\n if embeds:\n if not isinstance(embeds, list):\n raise error.IncorrectFormat(\"Provide a list of embeds.\")\n elif len(embeds) > 10:\n raise error.IncorrectFormat(\"Do not provide more than 10 embeds.\")\n _resp[\"embeds\"] = [x.to_dict() for x in embeds]\n\n allowed_mentions = fields.get(\"allowed_mentions\")\n _resp[\"allowed_mentions\"] = allowed_mentions.to_dict() if allowed_mentions else \\\n self._state.allowed_mentions.to_dict() if self._state.allowed_mentions else {}\n\n await self._http.edit(_resp, self.__interaction_token, self.id, files=files)\n\n delete_after = fields.get(\"delete_after\")\n if delete_after:\n await self.delete(delay=delete_after)\n if files:\n [x.close() for x in files]\n\n async def edit(self, **fields):\n \"\"\"Refer :meth:`discord.Message.edit`.\"\"\"\n if \"file\" in fields or \"files\" in fields:\n await self._slash_edit(**fields)\n else:\n try:\n await super().edit(**fields)\n except discord.Forbidden:\n await self._slash_edit(**fields)\n\n async def delete(self, *, delay=None):\n \"\"\"Refer :meth:`discord.Message.delete`.\"\"\"\n try:\n await super().delete(delay=delay)\n except discord.Forbidden:\n if not delay:\n return await self._http.delete(self.__interaction_token, self.id)\n\n async def wrap():\n with suppress(discord.HTTPException):\n await asyncio.sleep(delay)\n await self._http.delete(self.__interaction_token, self.id)\n\n self._state.loop.create_task(wrap())\n",
"path": "discord_slash/model.py"
}
] | [
{
"content": "import asyncio\nimport discord\nfrom enum import IntEnum\nfrom contextlib import suppress\nfrom inspect import iscoroutinefunction\nfrom . import http\nfrom . import error\n\n\nclass ChoiceData:\n \"\"\"\n Command choice data object\n\n :ivar name: Name of the choice, this is what the user will see\n :ivar value: Values of the choice, this is what discord will return to you\n \"\"\"\n\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def __eq__(self, other):\n return isinstance(other, ChoiceData) and self.__dict__ == other.__dict__\n\n\nclass OptionData:\n \"\"\"\n Command option data object\n\n :ivar name: Name of the option.\n :ivar description: Description of the option.\n :ivar required: If the option is required.\n :ivar choices: A list of :class:`ChoiceData`, cannot be present on subcommand groups\n :ivar options: List of :class:`OptionData`, this will be present if it's a subcommand group\n \"\"\"\n\n def __init__(\n self, name, description, required=False, choices=None, options=None, **kwargs\n ):\n self.name = name\n self.description = description\n self.type = kwargs.get(\"type\")\n if self.type is None:\n raise error.IncorrectCommandData(\"type is required for options\")\n self.required = required\n if choices is not None:\n self.choices = []\n for choice in choices:\n self.choices.append(ChoiceData(**choice))\n else:\n self.choices = None\n\n if self.type in (1, 2):\n self.options = []\n if options is not None:\n for option in options:\n self.options.append(OptionData(**option))\n elif self.type == 2:\n raise error.IncorrectCommandData(\n \"Options are required for subcommands / subcommand groups\"\n )\n\n def __eq__(self, other):\n return isinstance(other, OptionData) and self.__dict__ == other.__dict__\n\n\nclass CommandData:\n \"\"\"\n Slash command data object\n\n :ivar name: Name of the command.\n :ivar description: Description of the command.\n :ivar options: List of :class:`OptionData`.\n :ivar id: Command id, this is received from discord so may not be present\n \"\"\"\n\n def __init__(\n self, name, description, options=None, id=None, application_id=None, version=None, **kwargs\n ):\n self.name = name\n self.description = description\n self.id = id\n self.application_id = application_id\n self.version = version\n if options is not None:\n self.options = []\n for option in options:\n self.options.append(OptionData(**option))\n else:\n self.options = None\n\n def __eq__(self, other):\n if isinstance(other, CommandData):\n return (\n self.name == other.name\n and self.description == other.description\n and self.options == other.options\n )\n else:\n return False\n\n\nclass CommandObject:\n \"\"\"\n Slash command object of this extension.\n\n .. warning::\n Do not manually init this model.\n\n :ivar name: Name of the command.\n :ivar func: The coroutine of the command.\n :ivar description: Description of the command.\n :ivar allowed_guild_ids: List of the allowed guild id.\n :ivar options: List of the option of the command. Used for `auto_register`.\n :ivar connector: Kwargs connector of the command.\n :ivar __commands_checks__: Check of the command.\n \"\"\"\n\n def __init__(self, name, cmd): # Let's reuse old command formatting.\n self.name = name.lower()\n self.func = cmd[\"func\"]\n self.description = cmd[\"description\"]\n self.allowed_guild_ids = cmd[\"guild_ids\"] or []\n self.options = cmd[\"api_options\"] or []\n self.connector = cmd[\"connector\"] or {}\n self.has_subcommands = cmd[\"has_subcommands\"]\n # Ref https://github.com/Rapptz/discord.py/blob/master/discord/ext/commands/core.py#L1447\n # Since this isn't inherited from `discord.ext.commands.Command`, discord.py's check decorator will\n # add checks at this var.\n self.__commands_checks__ = []\n if hasattr(self.func, '__commands_checks__'):\n self.__commands_checks__ = self.func.__commands_checks__\n\n async def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes the command.\n\n :param args: Args for the command.\n :raises: .error.CheckFailure\n \"\"\"\n can_run = await self.can_run(args[0])\n if not can_run:\n raise error.CheckFailure\n\n return await self.func(*args, **kwargs)\n\n def add_check(self, func):\n \"\"\"\n Adds check to the command.\n\n :param func: Any callable. Coroutines are supported.\n \"\"\"\n self.__commands_checks__.append(func)\n\n def remove_check(self, func):\n \"\"\"\n Removes check to the command.\n\n .. note::\n If the function is not found at the command check, it will ignore.\n\n :param func: Any callable. Coroutines are supported.\n \"\"\"\n with suppress(ValueError):\n self.__commands_checks__.remove(func)\n\n async def can_run(self, ctx) -> bool:\n \"\"\"\n Whether the command can be run.\n\n :param ctx: SlashContext for the check running.\n :type ctx: .context.SlashContext\n :return: bool\n \"\"\"\n res = [bool(x(ctx)) if not iscoroutinefunction(x) else bool(await x(ctx)) for x in self.__commands_checks__]\n return False not in res\n\n\nclass SubcommandObject(CommandObject):\n \"\"\"\n Subcommand object of this extension.\n\n .. note::\n This model inherits :class:`.model.CommandObject`, so this has every variables from that.\n\n .. warning::\n Do not manually init this model.\n\n :ivar base: Name of the base slash command.\n :ivar subcommand_group: Name of the subcommand group. ``None`` if not exist.\n :ivar base_description: Description of the base command.\n :ivar subcommand_group_description: Description of the subcommand_group.\n \"\"\"\n\n def __init__(self, sub, base, name, sub_group=None):\n sub[\"has_subcommands\"] = True # For the inherited class.\n super().__init__(name, sub)\n self.base = base.lower()\n self.subcommand_group = sub_group.lower() if sub_group else sub_group\n self.base_description = sub[\"base_desc\"]\n self.subcommand_group_description = sub[\"sub_group_desc\"]\n\n\nclass CogCommandObject(CommandObject):\n \"\"\"\n Slash command object but for Cog.\n\n .. warning::\n Do not manually init this model.\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n self.cog = None # Manually set this later.\n\n async def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes the command.\n\n :param args: Args for the command.\n :raises: .error.CheckFailure\n \"\"\"\n can_run = await self.can_run(args[0])\n if not can_run:\n raise error.CheckFailure\n\n return await self.func(self.cog, *args, **kwargs)\n\n\nclass CogSubcommandObject(SubcommandObject):\n \"\"\"\n Subcommand object but for Cog.\n\n .. warning::\n Do not manually init this model.\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n self.cog = None # Manually set this later.\n\n async def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes the command.\n\n :param args: Args for the command.\n :raises: .error.CheckFailure\n \"\"\"\n can_run = await self.can_run(args[0])\n if not can_run:\n raise error.CheckFailure\n\n return await self.func(self.cog, *args, **kwargs)\n\n\nclass SlashCommandOptionType(IntEnum):\n \"\"\"\n Equivalent of `ApplicationCommandOptionType <https://discord.com/developers/docs/interactions/slash-commands#applicationcommandoptiontype>`_ in the Discord API.\n \"\"\"\n SUB_COMMAND = 1\n SUB_COMMAND_GROUP = 2\n STRING = 3\n INTEGER = 4\n BOOLEAN = 5\n USER = 6\n CHANNEL = 7\n ROLE = 8\n\n @classmethod\n def from_type(cls, t: type):\n \"\"\"\n Get a specific SlashCommandOptionType from a type (or object).\n\n :param t: The type or object to get a SlashCommandOptionType for.\n :return: :class:`.model.SlashCommandOptionType` or ``None``\n \"\"\"\n if issubclass(t, str): return cls.STRING\n if issubclass(t, bool): return cls.BOOLEAN\n # The check for bool MUST be above the check for integers as booleans subclass integers\n if issubclass(t, int): return cls.INTEGER\n if issubclass(t, discord.abc.User): return cls.USER\n if issubclass(t, discord.abc.GuildChannel): return cls.CHANNEL\n if issubclass(t, discord.abc.Role): return cls.ROLE\n\n\nclass SlashMessage(discord.Message):\n \"\"\"discord.py's :class:`discord.Message` but overridden ``edit`` and ``delete`` to work for slash command.\"\"\"\n\n def __init__(self, *, state, channel, data, _http: http.SlashCommandRequest, interaction_token):\n # Yes I know it isn't the best way but this makes implementation simple.\n super().__init__(state=state, channel=channel, data=data)\n self._http = _http\n self.__interaction_token = interaction_token\n\n async def _slash_edit(self, **fields):\n \"\"\"\n An internal function\n \"\"\"\n _resp = {}\n\n content = fields.get(\"content\")\n if content:\n _resp[\"content\"] = str(content)\n\n embed = fields.get(\"embed\")\n embeds = fields.get(\"embeds\")\n file = fields.get(\"file\")\n files = fields.get(\"files\")\n\n if embed and embeds:\n raise error.IncorrectFormat(\"You can't use both `embed` and `embeds`!\")\n if file and files:\n raise error.IncorrectFormat(\"You can't use both `file` and `files`!\")\n if file:\n files = [file]\n if embed:\n embeds = [embed]\n if embeds:\n if not isinstance(embeds, list):\n raise error.IncorrectFormat(\"Provide a list of embeds.\")\n elif len(embeds) > 10:\n raise error.IncorrectFormat(\"Do not provide more than 10 embeds.\")\n _resp[\"embeds\"] = [x.to_dict() for x in embeds]\n\n allowed_mentions = fields.get(\"allowed_mentions\")\n _resp[\"allowed_mentions\"] = allowed_mentions.to_dict() if allowed_mentions else \\\n self._state.allowed_mentions.to_dict() if self._state.allowed_mentions else {}\n\n await self._http.edit(_resp, self.__interaction_token, self.id, files=files)\n\n delete_after = fields.get(\"delete_after\")\n if delete_after:\n await self.delete(delay=delete_after)\n if files:\n [x.close() for x in files]\n\n async def edit(self, **fields):\n \"\"\"Refer :meth:`discord.Message.edit`.\"\"\"\n if \"file\" in fields or \"files\" in fields or \"embeds\" in fields:\n await self._slash_edit(**fields)\n else:\n try:\n await super().edit(**fields)\n except discord.Forbidden:\n await self._slash_edit(**fields)\n\n async def delete(self, *, delay=None):\n \"\"\"Refer :meth:`discord.Message.delete`.\"\"\"\n try:\n await super().delete(delay=delay)\n except discord.Forbidden:\n if not delay:\n return await self._http.delete(self.__interaction_token, self.id)\n\n async def wrap():\n with suppress(discord.HTTPException):\n await asyncio.sleep(delay)\n await self._http.delete(self.__interaction_token, self.id)\n\n self._state.loop.create_task(wrap())\n",
"path": "discord_slash/model.py"
}
] | diff --git a/discord_slash/model.py b/discord_slash/model.py
index 6fe252644..58a3a2cfe 100644
--- a/discord_slash/model.py
+++ b/discord_slash/model.py
@@ -336,7 +336,7 @@ async def _slash_edit(self, **fields):
async def edit(self, **fields):
"""Refer :meth:`discord.Message.edit`."""
- if "file" in fields or "files" in fields:
+ if "file" in fields or "files" in fields or "embeds" in fields:
await self._slash_edit(**fields)
else:
try:
|
Pyomo__pyomo-1385 | Error with TransformationFactory('core.relax_integers')
When I use `TransformationFactory('core.relax_integrality').apply_to(m)`, a warning came up.
`WARNING: DEPRECATED: core.relax_integrality is deprecated. Use core.relax_integers (deprecated in TBD)`
When I changed the code to `TransformationFactory('core.relax_integers').apply_to(m)`, an error came up:
```
TransformationFactory('core.relax_integers').apply_to(m)
AttributeError: 'NoneType' object has no attribute 'apply_to'
```
Error with TransformationFactory('core.relax_integers')
When I use `TransformationFactory('core.relax_integrality').apply_to(m)`, a warning came up.
`WARNING: DEPRECATED: core.relax_integrality is deprecated. Use core.relax_integers (deprecated in TBD)`
When I changed the code to `TransformationFactory('core.relax_integers').apply_to(m)`, an error came up:
```
TransformationFactory('core.relax_integers').apply_to(m)
AttributeError: 'NoneType' object has no attribute 'apply_to'
```
| [
{
"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom pyomo.common import deprecated\nfrom pyomo.core.base import TransformationFactory\nfrom pyomo.core.plugins.transform.discrete_vars import RelaxIntegerVars\n\n\[email protected](\n 'core.relax_integrality',\n doc=\"[DEPRECATED] Create a model where integer variables are replaced with \"\n \"real variables.\")\nclass RelaxIntegrality(RelaxIntegerVars):\n \"\"\"\n This plugin relaxes integrality in a Pyomo model.\n \"\"\"\n\n @deprecated(\n \"core.relax_integrality is deprecated. Use core.relax_integers\",\n version='TBD')\n def __init__(self, **kwds):\n super(RelaxIntegrality, self).__init__(**kwds)\n",
"path": "pyomo/core/plugins/transform/relax_integrality.py"
}
] | [
{
"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom pyomo.common import deprecated\nfrom pyomo.core.base import TransformationFactory\nfrom pyomo.core.plugins.transform.discrete_vars import RelaxIntegerVars\n\n\[email protected](\n 'core.relax_integrality',\n doc=\"[DEPRECATED] Create a model where integer variables are replaced with \"\n \"real variables.\")\nclass RelaxIntegrality(RelaxIntegerVars):\n \"\"\"\n This plugin relaxes integrality in a Pyomo model.\n \"\"\"\n\n @deprecated(\n \"core.relax_integrality is deprecated. Use core.relax_integer_vars\",\n version='TBD')\n def __init__(self, **kwds):\n super(RelaxIntegrality, self).__init__(**kwds)\n",
"path": "pyomo/core/plugins/transform/relax_integrality.py"
}
] | diff --git a/pyomo/core/plugins/transform/relax_integrality.py b/pyomo/core/plugins/transform/relax_integrality.py
index 43dca23f19b..5e0776182da 100644
--- a/pyomo/core/plugins/transform/relax_integrality.py
+++ b/pyomo/core/plugins/transform/relax_integrality.py
@@ -23,7 +23,7 @@ class RelaxIntegrality(RelaxIntegerVars):
"""
@deprecated(
- "core.relax_integrality is deprecated. Use core.relax_integers",
+ "core.relax_integrality is deprecated. Use core.relax_integer_vars",
version='TBD')
def __init__(self, **kwds):
super(RelaxIntegrality, self).__init__(**kwds)
|
zigpy__zha-device-handlers-462 | [Device Support Request] Philips Hue Power Cycle Attribute
Hi
It would be great to have the Philips Hue Power Cycle attribute added. This allows for the bulb to remain powered on, off or set to previous settings when powered on.
The vendor attribute is `0x4003` and falls part of the `0x0006` (on_off) cluster. The possible values (enum8) are
```
Off = 0x00
On = 0x01
Previous = 0xff
```
The device signature for the ambient color bulb is as follows:
```
{
"node_descriptor": "<NodeDescriptor byte1=1 byte2=64 mac_capability_flags=142 manufacturer_code=4107 maximum_buffer_size=71 maximum_incoming_transfer_size=45 server_mask=0 maximum_outgoing_transfer_size=45 descriptor_capability_field=0>",
"endpoints": {
"11": {
"profile_id": 49246,
"device_type": "0x0210",
"in_clusters": [
"0x0000",
"0x0003",
"0x0004",
"0x0005",
"0x0006",
"0x0008",
"0x0300",
"0x1000",
"0xfc01"
],
"out_clusters": [
"0x0019"
]
},
"242": {
"profile_id": 41440,
"device_type": "0x0061",
"in_clusters": [
"0x0021"
],
"out_clusters": [
"0x0021"
]
}
},
"manufacturer": "Philips",
"model": "LCT015",
"class": "zigpy.device.Device"
}
```
The device signature for the dimmable white bulb is as follows:
```
{
"node_descriptor": "<NodeDescriptor byte1=1 byte2=64 mac_capability_flags=142 manufacturer_code=4107 maximum_buffer_size=71 maximum_incoming_transfer_size=45 server_mask=0 maximum_outgoing_transfer_size=45 descriptor_capability_field=0>",
"endpoints": {
"11": {
"profile_id": 49246,
"device_type": "0x0100",
"in_clusters": [
"0x0000",
"0x0003",
"0x0004",
"0x0005",
"0x0006",
"0x0008",
"0x1000"
],
"out_clusters": [
"0x0019"
]
},
"242": {
"profile_id": 41440,
"device_type": "0x0061",
"in_clusters": [
"0x0021"
],
"out_clusters": [
"0x0021"
]
}
},
"manufacturer": "Philips",
"model": "LWB010",
"class": "zigpy.device.Device"
}
```
I hope this is all that is needed to implement this. Please let me know if there anything you need.
Thanks.
Kurt
| [
{
"content": "\"\"\"Module for Philips quirks implementations.\"\"\"\nimport logging\n\nfrom zigpy.quirks import CustomCluster\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import Basic, OnOff\n\nfrom ..const import (\n ARGS,\n BUTTON,\n COMMAND,\n COMMAND_ID,\n DIM_DOWN,\n DIM_UP,\n LONG_PRESS,\n LONG_RELEASE,\n PRESS_TYPE,\n SHORT_PRESS,\n SHORT_RELEASE,\n TURN_OFF,\n TURN_ON,\n ZHA_SEND_EVENT,\n)\n\nDIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821\nPHILIPS = \"Philips\"\n_LOGGER = logging.getLogger(__name__)\n\nHUE_REMOTE_DEVICE_TRIGGERS = {\n (SHORT_PRESS, TURN_ON): {COMMAND: \"on_press\"},\n (SHORT_PRESS, TURN_OFF): {COMMAND: \"off_press\"},\n (SHORT_PRESS, DIM_UP): {COMMAND: \"up_press\"},\n (SHORT_PRESS, DIM_DOWN): {COMMAND: \"down_press\"},\n (LONG_PRESS, TURN_ON): {COMMAND: \"on_hold\"},\n (LONG_PRESS, TURN_OFF): {COMMAND: \"off_hold\"},\n (LONG_PRESS, DIM_UP): {COMMAND: \"up_hold\"},\n (LONG_PRESS, DIM_DOWN): {COMMAND: \"down_hold\"},\n (SHORT_RELEASE, TURN_ON): {COMMAND: \"on_short_release\"},\n (SHORT_RELEASE, TURN_OFF): {COMMAND: \"off_short_release\"},\n (SHORT_RELEASE, DIM_UP): {COMMAND: \"up_short_release\"},\n (SHORT_RELEASE, DIM_DOWN): {COMMAND: \"down_short_release\"},\n (LONG_RELEASE, TURN_ON): {COMMAND: \"on_long_release\"},\n (LONG_RELEASE, TURN_OFF): {COMMAND: \"off_long_release\"},\n (LONG_RELEASE, DIM_UP): {COMMAND: \"up_long_release\"},\n (LONG_RELEASE, DIM_DOWN): {COMMAND: \"down_long_release\"},\n}\n\n\nclass PowerOnState(t.enum8):\n \"\"\"Philips power on state enum.\"\"\"\n\n Off = 0x00\n On = 0x01\n LastState = 0xFF\n\n\nclass PhilipsOnOffCluster(CustomCluster, OnOff):\n \"\"\"Philips OnOff cluster.\"\"\"\n\n manufacturer_attributes = {0x4003: (\"power_on_state\", PowerOnState)}\n\n\nclass PhilipsBasicCluster(CustomCluster, Basic):\n \"\"\"Philips Basic cluster.\"\"\"\n\n manufacturer_attributes = {0x0031: (\"philips\", t.bitmap16)}\n\n attr_config = {0x0031: 0x000B}\n\n async def bind(self):\n \"\"\"Bind cluster.\"\"\"\n result = await super().bind()\n await self.write_attributes(self.attr_config, manufacturer=0x100B)\n return result\n\n\nclass PhilipsRemoteCluster(CustomCluster):\n \"\"\"Philips remote cluster.\"\"\"\n\n cluster_id = 64512\n name = \"PhilipsRemoteCluster\"\n ep_attribute = \"philips_remote_cluster\"\n manufacturer_client_commands = {\n 0x0000: (\n \"notification\",\n (t.uint8_t, t.uint24_t, t.uint8_t, t.uint8_t, t.uint8_t, t.uint8_t),\n False,\n )\n }\n BUTTONS = {1: \"on\", 2: \"up\", 3: \"down\", 4: \"off\"}\n PRESS_TYPES = {0: \"press\", 1: \"hold\", 2: \"short_release\", 3: \"long_release\"}\n\n def handle_cluster_request(self, tsn, command_id, args):\n \"\"\"Handle the cluster command.\"\"\"\n _LOGGER.debug(\n \"PhilipsRemoteCluster - handle_cluster_request tsn: [%s] command id: %s - args: [%s]\",\n tsn,\n command_id,\n args,\n )\n button = self.BUTTONS.get(args[0], args[0])\n press_type = self.PRESS_TYPES.get(args[2], args[2])\n\n event_args = {\n BUTTON: button,\n PRESS_TYPE: press_type,\n COMMAND_ID: command_id,\n ARGS: args,\n }\n action = \"{}_{}\".format(button, press_type)\n self.listener_event(ZHA_SEND_EVENT, action, event_args)\n",
"path": "zhaquirks/philips/__init__.py"
}
] | [
{
"content": "\"\"\"Module for Philips quirks implementations.\"\"\"\nimport logging\n\nfrom zigpy.quirks import CustomCluster\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import Basic, OnOff\n\nfrom ..const import (\n ARGS,\n BUTTON,\n COMMAND,\n COMMAND_ID,\n DIM_DOWN,\n DIM_UP,\n LONG_PRESS,\n LONG_RELEASE,\n PRESS_TYPE,\n SHORT_PRESS,\n SHORT_RELEASE,\n TURN_OFF,\n TURN_ON,\n ZHA_SEND_EVENT,\n)\n\nDIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821\nPHILIPS = \"Philips\"\n_LOGGER = logging.getLogger(__name__)\n\nHUE_REMOTE_DEVICE_TRIGGERS = {\n (SHORT_PRESS, TURN_ON): {COMMAND: \"on_press\"},\n (SHORT_PRESS, TURN_OFF): {COMMAND: \"off_press\"},\n (SHORT_PRESS, DIM_UP): {COMMAND: \"up_press\"},\n (SHORT_PRESS, DIM_DOWN): {COMMAND: \"down_press\"},\n (LONG_PRESS, TURN_ON): {COMMAND: \"on_hold\"},\n (LONG_PRESS, TURN_OFF): {COMMAND: \"off_hold\"},\n (LONG_PRESS, DIM_UP): {COMMAND: \"up_hold\"},\n (LONG_PRESS, DIM_DOWN): {COMMAND: \"down_hold\"},\n (SHORT_RELEASE, TURN_ON): {COMMAND: \"on_short_release\"},\n (SHORT_RELEASE, TURN_OFF): {COMMAND: \"off_short_release\"},\n (SHORT_RELEASE, DIM_UP): {COMMAND: \"up_short_release\"},\n (SHORT_RELEASE, DIM_DOWN): {COMMAND: \"down_short_release\"},\n (LONG_RELEASE, TURN_ON): {COMMAND: \"on_long_release\"},\n (LONG_RELEASE, TURN_OFF): {COMMAND: \"off_long_release\"},\n (LONG_RELEASE, DIM_UP): {COMMAND: \"up_long_release\"},\n (LONG_RELEASE, DIM_DOWN): {COMMAND: \"down_long_release\"},\n}\n\n\nclass PowerOnState(t.enum8):\n \"\"\"Philips power on state enum.\"\"\"\n\n Off = 0x00\n On = 0x01\n LastState = 0xFF\n\n\nclass PhilipsOnOffCluster(CustomCluster, OnOff):\n \"\"\"Philips OnOff cluster.\"\"\"\n\n attributes = OnOff.attributes.copy()\n attributes.update({0x4003: (\"power_on_state\", PowerOnState)})\n\n\nclass PhilipsBasicCluster(CustomCluster, Basic):\n \"\"\"Philips Basic cluster.\"\"\"\n\n manufacturer_attributes = {0x0031: (\"philips\", t.bitmap16)}\n\n attr_config = {0x0031: 0x000B}\n\n async def bind(self):\n \"\"\"Bind cluster.\"\"\"\n result = await super().bind()\n await self.write_attributes(self.attr_config, manufacturer=0x100B)\n return result\n\n\nclass PhilipsRemoteCluster(CustomCluster):\n \"\"\"Philips remote cluster.\"\"\"\n\n cluster_id = 64512\n name = \"PhilipsRemoteCluster\"\n ep_attribute = \"philips_remote_cluster\"\n manufacturer_client_commands = {\n 0x0000: (\n \"notification\",\n (t.uint8_t, t.uint24_t, t.uint8_t, t.uint8_t, t.uint8_t, t.uint8_t),\n False,\n )\n }\n BUTTONS = {1: \"on\", 2: \"up\", 3: \"down\", 4: \"off\"}\n PRESS_TYPES = {0: \"press\", 1: \"hold\", 2: \"short_release\", 3: \"long_release\"}\n\n def handle_cluster_request(self, tsn, command_id, args):\n \"\"\"Handle the cluster command.\"\"\"\n _LOGGER.debug(\n \"PhilipsRemoteCluster - handle_cluster_request tsn: [%s] command id: %s - args: [%s]\",\n tsn,\n command_id,\n args,\n )\n button = self.BUTTONS.get(args[0], args[0])\n press_type = self.PRESS_TYPES.get(args[2], args[2])\n\n event_args = {\n BUTTON: button,\n PRESS_TYPE: press_type,\n COMMAND_ID: command_id,\n ARGS: args,\n }\n action = \"{}_{}\".format(button, press_type)\n self.listener_event(ZHA_SEND_EVENT, action, event_args)\n",
"path": "zhaquirks/philips/__init__.py"
}
] | diff --git a/zhaquirks/philips/__init__.py b/zhaquirks/philips/__init__.py
index 0e366f516f..dbe714e50f 100644
--- a/zhaquirks/philips/__init__.py
+++ b/zhaquirks/philips/__init__.py
@@ -57,7 +57,8 @@ class PowerOnState(t.enum8):
class PhilipsOnOffCluster(CustomCluster, OnOff):
"""Philips OnOff cluster."""
- manufacturer_attributes = {0x4003: ("power_on_state", PowerOnState)}
+ attributes = OnOff.attributes.copy()
+ attributes.update({0x4003: ("power_on_state", PowerOnState)})
class PhilipsBasicCluster(CustomCluster, Basic):
|
coala__coala-bears-2837 | PySafetyBear: Add asciinema
Content of file on Bear will perform it's action also have to shown in asciinema.
difficulty/newcomer
| [
{
"content": "import os\nfrom collections import namedtuple\nimport pkg_resources\nimport re\n\nfrom safety import safety\n\nfrom coalib.bears.LocalBear import LocalBear\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nfrom coalib.results.Result import Result\nfrom coalib.settings.Setting import path\nfrom coalib.results.SourceRange import SourceRange\nfrom coalib.settings.Setting import typed_list\n\n\n# It was for old versions of safety and those versions will be allow in future.\ndef cve_key_checker(vulnerability):\n if 'cve' in vulnerability.data:\n if vulnerability.data['cve'] is None:\n return None\n else:\n return True\n else:\n return None\n\n\n# the safety module expects an object that looks like this\n# (not importing it from there because it's in a private-ish location)\nPackage = namedtuple('Package', ('key', 'version'))\n\nsafety_get_vulnerabilities = safety.get_vulnerabilities\n_insecure_full_json_url = ('https://raw.githubusercontent.com/pyupio/'\n 'safety-db/master/data/insecure_full.json')\n\n_insecure_json_url = ('https://raw.githubusercontent.com/'\n 'pyupio/safety-db/master/data/insecure.json')\n\n\ndef _get_vulnerabilities(pkg, spec, db):\n for entry in safety_get_vulnerabilities(pkg, spec, db):\n entry['cve'] = entry['id'] if entry['cve'] is None else entry['cve']\n entry['id'] = entry['cve']\n yield entry\n\n\nsafety.get_vulnerabilities = _get_vulnerabilities\n\n\nclass PySafetyBear(LocalBear):\n \"\"\"\n Checks if any of your Python dependencies have known security issues.\n\n Data is taken from pyup.io's vulnerability database hosted at\n https://github.com/pyupio/safety.\n \"\"\"\n\n LANGUAGES = {\n 'Python Requirements',\n 'Python 2 Requirements',\n 'Python 3 Requirements',\n }\n AUTHORS = {'Bence Nagy'}\n REQUIREMENTS = {PipRequirement('safety', '1.8.2')}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL'\n CAN_DETECT = {'Security'}\n\n def setup_dependencies(self):\n file = self.download_cached_file(_insecure_full_json_url,\n 'insecure_full.json')\n self.download_cached_file(_insecure_json_url,\n 'insecure.json')\n type(self).db_path = os.path.dirname(file)\n\n def run(self, filename, file,\n db_path: path = '',\n cve_ignore: typed_list(str) = []):\n \"\"\"\n Checks for vulnerable package versions in requirements files.\n\n :param db_path: Path to a local vulnerability database.\n :param cve_ignore: A list of CVE number to be ignore.\n \"\"\"\n db_path = self.db_path if not db_path else db_path\n packages = list(\n Package(key=req.key, version=req.specs[0][1])\n for req in self.try_parse_requirements(file)\n if len(req.specs) == 1 and req.specs[0][0] == '=='\n )\n\n if not packages:\n return\n\n for vulnerability in safety.check(packages, key=None,\n db_mirror=db_path, cached=False,\n ignore_ids=cve_ignore):\n if 'cve' in vulnerability.vuln_id.strip().lower():\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} '\n 'and your project is using {vuln.version}.'\n )\n else:\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to '\n 'pyup.io-{vuln.vuln_id} and your project is using '\n '{vuln.version}.'\n )\n\n # StopIteration should not ever happen so skipping its branch\n line_number, line = next( # pragma: no branch\n (index, line) for index, line in enumerate(file, start=1)\n if vulnerability.name in line\n )\n version_spec_match = re.search(r'[=<>]+(\\S+?)(?:$|\\s|#)', line)\n source_range = SourceRange.from_values(\n filename,\n line_number,\n version_spec_match.start(1) + 1,\n line_number,\n version_spec_match.end(1) + 1,\n )\n\n yield Result(\n self,\n message_template.format(vuln=vulnerability),\n additional_info=vulnerability.advisory,\n affected_code=(source_range, ),\n )\n\n @staticmethod\n def try_parse_requirements(lines: typed_list(str)):\n \"\"\"\n Yields all package requirements parsable from the given lines.\n\n :param lines: An iterable of lines from a requirements file.\n \"\"\"\n for line in lines:\n try:\n yield from pkg_resources.parse_requirements(line)\n except pkg_resources.RequirementParseError:\n # unsupported requirement specification\n pass\n",
"path": "bears/python/requirements/PySafetyBear.py"
}
] | [
{
"content": "import os\nfrom collections import namedtuple\nimport pkg_resources\nimport re\n\nfrom safety import safety\n\nfrom coalib.bears.LocalBear import LocalBear\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nfrom coalib.results.Result import Result\nfrom coalib.settings.Setting import path\nfrom coalib.results.SourceRange import SourceRange\nfrom coalib.settings.Setting import typed_list\n\n\n# It was for old versions of safety and those versions will be allow in future.\ndef cve_key_checker(vulnerability):\n if 'cve' in vulnerability.data:\n if vulnerability.data['cve'] is None:\n return None\n else:\n return True\n else:\n return None\n\n\n# the safety module expects an object that looks like this\n# (not importing it from there because it's in a private-ish location)\nPackage = namedtuple('Package', ('key', 'version'))\n\nsafety_get_vulnerabilities = safety.get_vulnerabilities\n_insecure_full_json_url = ('https://raw.githubusercontent.com/pyupio/'\n 'safety-db/master/data/insecure_full.json')\n\n_insecure_json_url = ('https://raw.githubusercontent.com/'\n 'pyupio/safety-db/master/data/insecure.json')\n\n\ndef _get_vulnerabilities(pkg, spec, db):\n for entry in safety_get_vulnerabilities(pkg, spec, db):\n entry['cve'] = entry['id'] if entry['cve'] is None else entry['cve']\n entry['id'] = entry['cve']\n yield entry\n\n\nsafety.get_vulnerabilities = _get_vulnerabilities\n\n\nclass PySafetyBear(LocalBear):\n \"\"\"\n Checks if any of your Python dependencies have known security issues.\n\n Data is taken from pyup.io's vulnerability database hosted at\n https://github.com/pyupio/safety.\n \"\"\"\n\n LANGUAGES = {\n 'Python Requirements',\n 'Python 2 Requirements',\n 'Python 3 Requirements',\n }\n AUTHORS = {'Bence Nagy'}\n REQUIREMENTS = {PipRequirement('safety', '1.8.2')}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL'\n CAN_DETECT = {'Security'}\n ASCIINEMA_URL = 'https://asciinema.org/a/221386'\n\n def setup_dependencies(self):\n file = self.download_cached_file(_insecure_full_json_url,\n 'insecure_full.json')\n self.download_cached_file(_insecure_json_url,\n 'insecure.json')\n type(self).db_path = os.path.dirname(file)\n\n def run(self, filename, file,\n db_path: path = '',\n cve_ignore: typed_list(str) = []):\n \"\"\"\n Checks for vulnerable package versions in requirements files.\n\n :param db_path: Path to a local vulnerability database.\n :param cve_ignore: A list of CVE number to be ignore.\n \"\"\"\n db_path = self.db_path if not db_path else db_path\n packages = list(\n Package(key=req.key, version=req.specs[0][1])\n for req in self.try_parse_requirements(file)\n if len(req.specs) == 1 and req.specs[0][0] == '=='\n )\n\n if not packages:\n return\n\n for vulnerability in safety.check(packages, key=None,\n db_mirror=db_path, cached=False,\n ignore_ids=cve_ignore):\n if 'cve' in vulnerability.vuln_id.strip().lower():\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} '\n 'and your project is using {vuln.version}.'\n )\n else:\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to '\n 'pyup.io-{vuln.vuln_id} and your project is using '\n '{vuln.version}.'\n )\n\n # StopIteration should not ever happen so skipping its branch\n line_number, line = next( # pragma: no branch\n (index, line) for index, line in enumerate(file, start=1)\n if vulnerability.name in line\n )\n version_spec_match = re.search(r'[=<>]+(\\S+?)(?:$|\\s|#)', line)\n source_range = SourceRange.from_values(\n filename,\n line_number,\n version_spec_match.start(1) + 1,\n line_number,\n version_spec_match.end(1) + 1,\n )\n\n yield Result(\n self,\n message_template.format(vuln=vulnerability),\n additional_info=vulnerability.advisory,\n affected_code=(source_range, ),\n )\n\n @staticmethod\n def try_parse_requirements(lines: typed_list(str)):\n \"\"\"\n Yields all package requirements parsable from the given lines.\n\n :param lines: An iterable of lines from a requirements file.\n \"\"\"\n for line in lines:\n try:\n yield from pkg_resources.parse_requirements(line)\n except pkg_resources.RequirementParseError:\n # unsupported requirement specification\n pass\n",
"path": "bears/python/requirements/PySafetyBear.py"
}
] | diff --git a/bears/python/requirements/PySafetyBear.py b/bears/python/requirements/PySafetyBear.py
index 88bd09c94b..240e7dddf1 100644
--- a/bears/python/requirements/PySafetyBear.py
+++ b/bears/python/requirements/PySafetyBear.py
@@ -64,6 +64,7 @@ class PySafetyBear(LocalBear):
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL'
CAN_DETECT = {'Security'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/221386'
def setup_dependencies(self):
file = self.download_cached_file(_insecure_full_json_url,
|
WordPress__openverse-api-599 | Some waveform peaks have negative values
## Description
<!-- Concisely describe the bug. -->
The frontend expects peaks to be in range of [0..1], however, some peaks returned from the API can have negative values.
## Reproduction
<!-- Provide detailed steps to reproduce the bug. -->
1. Go to https://api.openverse.engineering/v1/audio/727b9979-98f1-4604-9097-0a8b6fa68f2e/
2. Look at the last values of the long array.
3. See error: `...-0.00036,-0.00099,-0.00162,0.00012,0.0]}`
## Expectation
<!-- Concisely describe what you expected to happen. -->
All the peaks should be in [0..1] range.
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
We could use a hack to fix this on the front end by setting negative values to 0 in the mean time, but the solution should be on the API side.
Could this possibly be some leftovers from the mock waveform data?
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
| [
{
"content": "import json\nimport logging\nimport math\nimport mimetypes\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nfrom typing import List\n\nimport requests\n\n\nlog = logging.getLogger(__name__)\n\nTMP_DIR = pathlib.Path(\"/tmp\").resolve()\nUA_STRING = \"OpenverseWaveform/0.0 (https://wordpress.org/openverse)\"\n\n\ndef ext_from_url(url):\n \"\"\"\n Get the file extension from the given URL. Looks at the last part of the URL\n path, and returns the string after the last dot.\n\n :param url: the URL to the file whose extension is being determined\n :returns: the file extension or ``None``\n \"\"\"\n file_name = url.split(\"/\")[-1]\n if \".\" in file_name:\n ext = file_name.split(\".\")[-1]\n return f\".{ext}\"\n else:\n return None\n\n\ndef download_audio(url, identifier):\n \"\"\"\n Download the audio from the given URL to a location on the disk.\n\n :param url: the URL to the file being downloaded\n :param identifier: the identifier of the media object to name the file\n :returns: the name of the file on the disk\n \"\"\"\n log.info(f\"Downloading file at {url}\")\n\n headers = {\"User-Agent\": UA_STRING}\n with requests.get(url, stream=True, headers=headers) as res:\n log.debug(f\"Response code: {res.status_code}\")\n mimetype = res.headers[\"content-type\"]\n log.debug(f\"MIME type: {mimetype}\")\n ext = ext_from_url(url) or mimetypes.guess_extension(mimetype)\n if ext is None:\n raise ValueError(\"Could not identify media extension\")\n file_name = f\"audio-{identifier}{ext}\"\n log.debug(f\"File name: {file_name}\")\n with open(TMP_DIR.joinpath(file_name), \"wb\") as file:\n shutil.copyfileobj(res.raw, file)\n return file_name\n\n\ndef generate_waveform(file_name, duration):\n \"\"\"\n Generate the waveform for the file by invoking the ``audiowaveform`` binary.\n The Python module ``subprocess`` is used to execute the binary and get the\n results that it emits to STDOUT.\n\n :param file_name: the name of the downloaded audio file\n :param duration: the duration of the audio to determine pixels per second\n \"\"\"\n log.info(\"Invoking audiowaveform\")\n\n pps = math.ceil(1e6 / duration) # approx 1000 points in total\n args = [\n \"audiowaveform\",\n \"--input-filename\",\n file_name,\n \"--output-format\",\n \"json\",\n \"--pixels-per-second\",\n str(pps),\n ]\n log.debug(f'Command: {\" \".join(args)}')\n proc = subprocess.run(args, cwd=TMP_DIR, check=True, capture_output=True)\n log.debug(f\"Subprocess exit code: {proc.returncode}\")\n return proc.stdout\n\n\ndef process_waveform_output(json_out):\n \"\"\"\n Parse the waveform output generated by the ``audiowaveform`` binary. The\n output consists of alternating positive and negative values, that are almost\n equal in amplitude. We discard the negative values. We also scale down the\n amplitudes by the largest value so that they lie in the range [0, 1].\n\n :param json_out: the JSON output generated by ``audiowaveform``\n :returns: the list of peaks\n \"\"\"\n log.info(\"Transforming points\")\n\n output = json.loads(json_out)\n data = output[\"data\"]\n log.debug(f\"Original umber of points: {len(data)}\")\n\n transformed_data = []\n max_val = 0\n for idx, val in enumerate(data):\n if idx % 2 == 0:\n continue\n transformed_data.append(val)\n if val > max_val:\n max_val = val\n transformed_data = [round(val / max_val, 5) for val in transformed_data]\n log.debug(f\"Transformed number of points: {len(transformed_data)}\")\n return transformed_data\n\n\ndef cleanup(file_name):\n \"\"\"\n Delete the audio file after it has been processed.\n\n :param file_name: the name of the file to delete\n \"\"\"\n log.info(f\"Deleting {file_name}\")\n\n file_path = TMP_DIR.joinpath(file_name)\n log.debug(f\"File path: {file_path}\")\n if file_path.exists():\n log.info(f\"Deleting file {file_path}\")\n os.remove(file_path)\n else:\n log.info(\"File not found, nothing deleted\")\n\n\ndef generate_peaks(audio) -> List[float]:\n file_name = None\n try:\n file_name = download_audio(audio.url, audio.identifier)\n awf_out = generate_waveform(file_name, audio.duration)\n return process_waveform_output(awf_out)\n finally:\n if file_name is not None:\n cleanup(file_name)\n",
"path": "api/catalog/api/utils/waveform.py"
}
] | [
{
"content": "import json\nimport logging\nimport math\nimport mimetypes\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nfrom typing import List\n\nimport requests\n\n\nlog = logging.getLogger(__name__)\n\nTMP_DIR = pathlib.Path(\"/tmp\").resolve()\nUA_STRING = \"OpenverseWaveform/0.0 (https://wordpress.org/openverse)\"\n\n\ndef ext_from_url(url):\n \"\"\"\n Get the file extension from the given URL. Looks at the last part of the URL\n path, and returns the string after the last dot.\n\n :param url: the URL to the file whose extension is being determined\n :returns: the file extension or ``None``\n \"\"\"\n file_name = url.split(\"/\")[-1]\n if \".\" in file_name:\n ext = file_name.split(\".\")[-1]\n return f\".{ext}\"\n else:\n return None\n\n\ndef download_audio(url, identifier):\n \"\"\"\n Download the audio from the given URL to a location on the disk.\n\n :param url: the URL to the file being downloaded\n :param identifier: the identifier of the media object to name the file\n :returns: the name of the file on the disk\n \"\"\"\n log.info(f\"Downloading file at {url}\")\n\n headers = {\"User-Agent\": UA_STRING}\n with requests.get(url, stream=True, headers=headers) as res:\n log.debug(f\"Response code: {res.status_code}\")\n mimetype = res.headers[\"content-type\"]\n log.debug(f\"MIME type: {mimetype}\")\n ext = ext_from_url(url) or mimetypes.guess_extension(mimetype)\n if ext is None:\n raise ValueError(\"Could not identify media extension\")\n file_name = f\"audio-{identifier}{ext}\"\n log.debug(f\"File name: {file_name}\")\n with open(TMP_DIR.joinpath(file_name), \"wb\") as file:\n shutil.copyfileobj(res.raw, file)\n return file_name\n\n\ndef generate_waveform(file_name, duration):\n \"\"\"\n Generate the waveform for the file by invoking the ``audiowaveform`` binary.\n The Python module ``subprocess`` is used to execute the binary and get the\n results that it emits to STDOUT.\n\n :param file_name: the name of the downloaded audio file\n :param duration: the duration of the audio to determine pixels per second\n \"\"\"\n log.info(\"Invoking audiowaveform\")\n\n pps = math.ceil(1e6 / duration) # approx 1000 points in total\n args = [\n \"audiowaveform\",\n \"--input-filename\",\n file_name,\n \"--output-format\",\n \"json\",\n \"--pixels-per-second\",\n str(pps),\n ]\n log.debug(f'Command: {\" \".join(args)}')\n proc = subprocess.run(args, cwd=TMP_DIR, check=True, capture_output=True)\n log.debug(f\"Subprocess exit code: {proc.returncode}\")\n return proc.stdout\n\n\ndef process_waveform_output(json_out):\n \"\"\"\n Parse the waveform output generated by the ``audiowaveform`` binary. The\n output consists of alternating positive and negative values, that are almost\n equal in amplitude. We discard the negative values. We also scale down the\n amplitudes by the largest value so that they lie in the range [0, 1].\n\n :param json_out: the JSON output generated by ``audiowaveform``\n :returns: the list of peaks\n \"\"\"\n log.info(\"Transforming points\")\n\n output = json.loads(json_out)\n data = output[\"data\"]\n log.debug(f\"Original umber of points: {len(data)}\")\n\n transformed_data = []\n max_val = 0\n for idx, val in enumerate(data):\n if idx % 2 == 0:\n continue\n if val < 0: # Any other odd values are negligible and can be ignored\n val = 0\n transformed_data.append(val)\n if val > max_val:\n max_val = val\n transformed_data = [round(val / max_val, 5) for val in transformed_data]\n log.debug(f\"Transformed number of points: {len(transformed_data)}\")\n return transformed_data\n\n\ndef cleanup(file_name):\n \"\"\"\n Delete the audio file after it has been processed.\n\n :param file_name: the name of the file to delete\n \"\"\"\n log.info(f\"Deleting {file_name}\")\n\n file_path = TMP_DIR.joinpath(file_name)\n log.debug(f\"File path: {file_path}\")\n if file_path.exists():\n log.info(f\"Deleting file {file_path}\")\n os.remove(file_path)\n else:\n log.info(\"File not found, nothing deleted\")\n\n\ndef generate_peaks(audio) -> List[float]:\n file_name = None\n try:\n file_name = download_audio(audio.url, audio.identifier)\n awf_out = generate_waveform(file_name, audio.duration)\n return process_waveform_output(awf_out)\n finally:\n if file_name is not None:\n cleanup(file_name)\n",
"path": "api/catalog/api/utils/waveform.py"
}
] | diff --git a/api/catalog/api/utils/waveform.py b/api/catalog/api/utils/waveform.py
index 821fd3c99..6a5478e7f 100644
--- a/api/catalog/api/utils/waveform.py
+++ b/api/catalog/api/utils/waveform.py
@@ -106,6 +106,8 @@ def process_waveform_output(json_out):
for idx, val in enumerate(data):
if idx % 2 == 0:
continue
+ if val < 0: # Any other odd values are negligible and can be ignored
+ val = 0
transformed_data.append(val)
if val > max_val:
max_val = val
|
ESMCI__cime-2265 | PR #2260 has caused some cime_developer tests to fail on anlworkstation during phase SETUP
The errors can be reproduced on anlworkstation by running a single cime_developer test:
`./create_test SMS.T42_T42.S --machine=anlworkstation --test-root=****`
Errors were:
```
Traceback (most recent call last):
File "./case.setup", line 58, in <module>
_main_func(__doc__)
File "./case.setup", line 55, in _main_func
case_setup(case, clean=clean, test_mode=test_mode, reset=reset)
File "scripts/lib/CIME/case_setup.py", line 233, in case_setup
run_and_log_case_status(functor, phase, caseroot=caseroot)
File "scripts/lib/CIME/utils.py", line 1447, in run_and_log_case_status
rv = func()
File "scripts/lib/CIME/case_setup.py", line 227, in <lambda>
functor = lambda: _case_setup_impl(case, caseroot, clean, test_mode, reset)
File "scripts/lib/CIME/case_setup.py", line 97, in _case_setup_impl
case.load_env()
File "scripts/lib/CIME/case.py", line 1300, in load_env
env_module.load_env(self)
File "scripts/lib/CIME/XML/env_mach_specific.py", line 78, in load_env
self._load_modules(modules_to_load, force_method=force_method)
File "scripts/lib/CIME/XML/env_mach_specific.py", line 102, in _load_modules
self._load_modules_generic(modules_to_load)
File "scripts/lib/CIME/XML/env_mach_specific.py", line 295, in _load_modules_generic
newenv[lastkey] += "\n" + line
KeyError: None
```
Do we need some settings on anlworkstation to make it work?
| [
{
"content": "\"\"\"\nInterface to the env_mach_specific.xml file. This class inherits from EnvBase\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.XML.env_base import EnvBase\nfrom CIME.utils import transform_vars, get_cime_root\nimport string, resource\nfrom collections import OrderedDict\n\nlogger = logging.getLogger(__name__)\n\n# Is not of type EntryID but can use functions from EntryID (e.g\n# get_type) otherwise need to implement own functions and make GenericXML parent class\nclass EnvMachSpecific(EnvBase):\n # pylint: disable=unused-argument\n def __init__(self, caseroot=None, infile=\"env_mach_specific.xml\",\n components=None, unit_testing=False):\n \"\"\"\n initialize an object interface to file env_mach_specific.xml in the case directory\n \"\"\"\n schema = os.path.join(get_cime_root(), \"config\", \"xml_schemas\", \"env_mach_specific.xsd\")\n EnvBase.__init__(self, caseroot, infile, schema=schema)\n self._allowed_mpi_attributes = (\"compiler\", \"mpilib\", \"threaded\", \"unit_testing\")\n self._unit_testing = unit_testing\n\n def populate(self, machobj):\n \"\"\"Add entries to the file using information from a Machines object.\"\"\"\n items = (\"module_system\", \"environment_variables\", \"resource_limits\", \"mpirun\", \"run_exe\",\"run_misc_suffix\")\n default_run_suffix = machobj.get_child(\"default_run_suffix\", root=machobj.root)\n default_run_exe_node = machobj.get_child(\"default_run_exe\", root=default_run_suffix)\n default_run_misc_suffix_node = machobj.get_child(\"default_run_misc_suffix\", root=default_run_suffix)\n\n group_node = self.make_child(\"group\", {\"id\":\"compliant_values\"})\n\n for item in items:\n nodes = machobj.get_first_child_nodes(item)\n if item == \"run_exe\" or item == \"run_misc_suffix\":\n if len(nodes) == 0:\n value = self.text(default_run_exe_node) if item == \"run_exe\" else self.text(default_run_misc_suffix_node)\n else:\n value = nodes[0].text\n\n entity_node = self.make_child(\"entry\", {\"id\":item, \"value\":value}, root=group_node)\n\n self.make_child(\"type\", root=entity_node, text=\"char\")\n self.make_child(\"desc\", root=entity_node, text=(\"executable name\" if item == \"run_exe\" else \"redirect for job output\"))\n\n else:\n for node in nodes:\n self.add_child(node)\n\n def _get_modules_for_case(self, case):\n module_nodes = self.get_children(\"modules\", root=self.get_child(\"module_system\"))\n modules_to_load = None\n if module_nodes is not None:\n modules_to_load = self._compute_module_actions(module_nodes, case)\n\n return modules_to_load\n\n def _get_envs_for_case(self, case):\n env_nodes = self.get_children(\"environment_variables\")\n\n envs_to_set = None\n if env_nodes is not None:\n envs_to_set = self._compute_env_actions(env_nodes, case)\n\n return envs_to_set\n\n def load_env(self, case, force_method=None):\n \"\"\"\n Should only be called by case.load_env\n \"\"\"\n # Do the modules so we can refer to env vars set by the modules\n # in the environment_variables block\n modules_to_load = self._get_modules_for_case(case)\n if (modules_to_load is not None):\n self._load_modules(modules_to_load, force_method=force_method)\n\n envs_to_set = self._get_envs_for_case(case)\n if (envs_to_set is not None):\n self._load_envs(envs_to_set)\n\n self._get_resources_for_case(case)\n\n def _get_resources_for_case(self, case):\n resource_nodes = self.get_children(\"resource_limits\")\n if resource_nodes is not None:\n nodes = self._compute_resource_actions(resource_nodes, case)\n for name, val in nodes:\n attr = getattr(resource, name)\n limits = resource.getrlimit(attr)\n logger.info(\"Setting resource.{} to {} from {}\".format(name, val, limits))\n limits = (int(val), limits[1])\n resource.setrlimit(attr, limits)\n\n def _load_modules(self, modules_to_load, force_method=None):\n module_system = self.get_module_system_type() if force_method is None else force_method\n if (module_system == \"module\"):\n self._load_module_modules(modules_to_load)\n elif (module_system == \"soft\"):\n self._load_modules_generic(modules_to_load)\n elif (module_system == \"generic\"):\n self._load_modules_generic(modules_to_load)\n elif (module_system == \"none\"):\n self._load_none_modules(modules_to_load)\n else:\n expect(False, \"Unhandled module system '{}'\".format(module_system))\n\n def list_modules(self):\n module_system = self.get_module_system_type()\n\n # If the user's login shell is not sh, it's possible that modules\n # won't be configured so we need to be sure to source the module\n # setup script if it exists.\n init_path = self.get_module_system_init_path(\"sh\")\n if init_path:\n source_cmd = \"source {} && \".format(init_path)\n else:\n source_cmd = \"\"\n\n if (module_system in [\"module\"]):\n return run_cmd_no_fail(\"{}module list\".format(source_cmd), combine_output=True)\n elif (module_system == \"soft\"):\n # Does soft really not provide this capability?\n return \"\"\n elif (module_system == \"generic\"):\n return run_cmd_no_fail(\"{}use -lv\".format(source_cmd))\n elif (module_system == \"none\"):\n return \"\"\n else:\n expect(False, \"Unhandled module system '{}'\".format(module_system))\n\n def save_all_env_info(self, filename):\n \"\"\"\n Get a string representation of all current environment info and\n save it to file.\n \"\"\"\n with open(filename, \"w\") as f:\n f.write(self.list_modules())\n run_cmd_no_fail(\"echo -e '\\n' && env\", arg_stdout=filename)\n\n def make_env_mach_specific_file(self, shell, case):\n modules_to_load = self._get_modules_for_case(case)\n envs_to_set = self._get_envs_for_case(case)\n filename = \".env_mach_specific.{}\".format(shell)\n lines = []\n if modules_to_load is not None:\n lines.extend(self._get_module_commands(modules_to_load, shell))\n\n if envs_to_set is not None:\n for env_name, env_value in envs_to_set:\n if shell == \"sh\":\n lines.append(\"export {}={}\".format(env_name, env_value))\n elif shell == \"csh\":\n lines.append(\"setenv {} {}\".format(env_name, env_value))\n else:\n expect(False, \"Unknown shell type: '{}'\".format(shell))\n\n with open(filename, \"w\") as fd:\n fd.write(\"\\n\".join(lines))\n\n def _load_envs(self, envs_to_set):\n for env_name, env_value in envs_to_set:\n os.environ[env_name] = \"\" if env_value is None else env_value\n\n # Private API\n\n def _compute_module_actions(self, module_nodes, case):\n return self._compute_actions(module_nodes, \"command\", case)\n\n def _compute_env_actions(self, env_nodes, case):\n return self._compute_actions(env_nodes, \"env\", case)\n\n def _compute_resource_actions(self, resource_nodes, case):\n return self._compute_actions(resource_nodes, \"resource\", case)\n\n def _compute_actions(self, nodes, child_tag, case):\n result = [] # list of tuples (\"name\", \"argument\")\n compiler, mpilib = case.get_value(\"COMPILER\"), case.get_value(\"MPILIB\")\n\n for node in nodes:\n if (self._match_attribs(self.attrib(node), case)):\n for child in self.get_children(root=node):\n expect(self.name(child) == child_tag, \"Expected {} element\".format(child_tag))\n if (self._match_attribs(self.attrib(child), case)):\n val = self.text(child)\n if val is not None:\n # We allow a couple special substitutions for these fields\n for repl_this, repl_with in [(\"$COMPILER\", compiler), (\"$MPILIB\", mpilib)]:\n val = val.replace(repl_this, repl_with)\n\n val = self.get_resolved_value(val)\n expect(\"$\" not in val, \"Not safe to leave unresolved items in env var value: '{}'\".format(val))\n\n # intentional unindent, result is appended even if val is None\n result.append( (self.get(child, \"name\"), val) )\n\n return result\n\n def _match_attribs(self, attribs, case):\n # check for matches with case-vars\n for attrib in attribs:\n if attrib == \"unit_testing\": # special case\n if not self._match(self._unit_testing, attribs[\"unit_testing\"].upper()):\n return False\n elif attrib == \"name\":\n pass\n else:\n val = case.get_value(attrib.upper())\n expect(val is not None, \"Cannot match attrib '%s', case has no value for it\" % attrib.upper())\n if not self._match(val, attribs[attrib]):\n return False\n\n return True\n\n def _match(self, my_value, xml_value):\n if xml_value.startswith(\"!\"):\n result = re.match(xml_value[1:],str(my_value)) is None\n elif isinstance(my_value, bool):\n if my_value: result = xml_value == \"TRUE\"\n else: result = xml_value == \"FALSE\"\n else:\n result = re.match(xml_value,str(my_value)) is not None\n\n logger.debug(\"(env_mach_specific) _match {} {} {}\".format(my_value, xml_value, result))\n return result\n\n def _get_module_commands(self, modules_to_load, shell):\n # Note this is independent of module system type\n mod_cmd = self.get_module_system_cmd_path(shell)\n cmds = []\n last_action = None\n last_cmd = None\n\n for action, argument in modules_to_load:\n if argument is None:\n argument = \"\"\n\n if action == last_action:\n last_cmd = \"{} {}\".format(last_cmd, argument)\n else:\n if last_cmd is not None:\n cmds.append(last_cmd)\n\n last_cmd = \"{} {} {}\".format(mod_cmd, action, \"\" if argument is None else argument)\n last_action = action\n\n if last_cmd:\n cmds.append(last_cmd)\n\n return cmds\n\n def _load_module_modules(self, modules_to_load):\n for cmd in self._get_module_commands(modules_to_load, \"python\"):\n logger.debug(\"module command is {}\".format(cmd))\n stat, py_module_code, errout = run_cmd(cmd)\n expect(stat==0 and len(errout) == 0,\n \"module command {} failed with message:\\n{}\".format(cmd, errout))\n exec(py_module_code)\n\n def _load_modules_generic(self, modules_to_load):\n sh_init_cmd = self.get_module_system_init_path(\"sh\")\n sh_mod_cmd = self.get_module_system_cmd_path(\"sh\")\n\n # Purpose is for environment management system that does not have\n # a python interface and therefore can only determine what they\n # do by running shell command and looking at the changes\n # in the environment.\n\n cmd = \"source {}\".format(sh_init_cmd)\n\n if \"SOFTENV_ALIASES\" in os.environ:\n cmd += \" && source $SOFTENV_ALIASES\"\n if \"SOFTENV_LOAD\" in os.environ:\n cmd += \" && source $SOFTENV_LOAD\"\n\n for action,argument in modules_to_load:\n cmd += \" && {} {} {}\".format(sh_mod_cmd, action, \"\" if argument is None else argument)\n\n cmd += \" && env\"\n output = run_cmd_no_fail(cmd)\n\n ###################################################\n # Parse the output to set the os.environ dictionary\n ###################################################\n newenv = OrderedDict()\n lastkey = None\n for line in output.splitlines():\n if \"=\" in line:\n key, val = line.split(\"=\", 1)\n newenv[key] = val\n lastkey = key\n else:\n newenv[lastkey] += \"\\n\" + line\n\n # resolve variables\n for key, val in newenv.items():\n newenv[key] = string.Template(val).safe_substitute(newenv)\n\n # Set environment with new or updated values\n for key in newenv:\n if key in os.environ and os.environ[key] == newenv[key]:\n pass\n else:\n os.environ[key] = newenv[key]\n\n for oldkey in list(os.environ.keys()):\n if oldkey not in newenv:\n del os.environ[oldkey]\n\n def _load_none_modules(self, modules_to_load):\n \"\"\"\n No Action required\n \"\"\"\n expect(not modules_to_load,\n \"Module system was specified as 'none' yet there are modules that need to be loaded?\")\n\n def _mach_specific_header(self, shell):\n '''\n write a shell module file for this case.\n '''\n header = '''\n#!/usr/bin/env {}\n#===============================================================================\n# Automatically generated module settings for $self->{{machine}}\n# DO NOT EDIT THIS FILE DIRECTLY! Please edit env_mach_specific.xml\n# in your CASEROOT. This file is overwritten every time modules are loaded!\n#===============================================================================\n'''.format(shell)\n header += \"source {}\".format(self.get_module_system_init_path(shell))\n return header\n\n def get_module_system_type(self):\n \"\"\"\n Return the module system used on this machine\n \"\"\"\n module_system = self.get_child(\"module_system\")\n return self.get(module_system, \"type\")\n\n def get_module_system_init_path(self, lang):\n init_nodes = self.get_optional_child(\"init_path\", attributes={\"lang\":lang}, root=self.get_child(\"module_system\"))\n return self.text(init_nodes) if init_nodes is not None else None\n\n def get_module_system_cmd_path(self, lang):\n cmd_nodes = self.get_optional_child(\"cmd_path\", attributes={\"lang\":lang}, root=self.get_child(\"module_system\"))\n return self.text(cmd_nodes) if cmd_nodes is not None else None\n\n def get_mpirun(self, case, attribs, job=\"case.run\", exe_only=False):\n \"\"\"\n Find best match, return (executable, {arg_name : text})\n \"\"\"\n mpirun_nodes = self.get_children(\"mpirun\")\n best_match = None\n best_num_matched = -1\n default_match = None\n best_num_matched_default = -1\n args = []\n for mpirun_node in mpirun_nodes:\n xml_attribs = self.attrib(mpirun_node)\n all_match = True\n matches = 0\n is_default = False\n\n for key, value in attribs.items():\n expect(key in self._allowed_mpi_attributes, \"Unexpected key {} in mpirun attributes\".format(key))\n if key in xml_attribs:\n if xml_attribs[key].lower() == \"false\":\n xml_attrib = False\n elif xml_attribs[key].lower() == \"true\":\n xml_attrib = True\n else:\n xml_attrib = xml_attribs[key]\n\n if xml_attrib == value:\n matches += 1\n elif key == \"mpilib\" and value != \"mpi-serial\" and xml_attrib == \"default\":\n is_default = True\n else:\n all_match = False\n break\n\n if all_match:\n if is_default:\n if matches > best_num_matched_default:\n default_match = mpirun_node\n best_num_matched_default = matches\n else:\n if matches > best_num_matched:\n best_match = mpirun_node\n best_num_matched = matches\n\n # if there are no special arguments required for mpi-serial it need not have an entry in config_machines.xml\n if \"mpilib\" in attribs and attribs[\"mpilib\"] == \"mpi-serial\" and best_match is None:\n return \"\",[]\n\n expect(best_match is not None or default_match is not None,\n \"Could not find a matching MPI for attributes: {}\".format(attribs))\n\n the_match = best_match if best_match is not None else default_match\n\n # Now that we know the best match, compute the arguments\n if not exe_only:\n arg_node = self.get_optional_child(\"arguments\", root=the_match)\n if arg_node is not None:\n arg_nodes = self.get_children(\"arg\", root=arg_node)\n for arg_node in arg_nodes:\n arg_value = transform_vars(self.text(arg_node),\n case=case,\n subgroup=job,\n default=self.get(arg_node, \"default\"))\n args.append(arg_value)\n\n exec_node = self.get_child(\"executable\", root=the_match)\n expect(exec_node is not None,\"No executable found\")\n executable = self.text(exec_node)\n\n return executable, args\n",
"path": "scripts/lib/CIME/XML/env_mach_specific.py"
}
] | [
{
"content": "\"\"\"\nInterface to the env_mach_specific.xml file. This class inherits from EnvBase\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.XML.env_base import EnvBase\nfrom CIME.utils import transform_vars, get_cime_root\nimport string, resource\nfrom collections import OrderedDict\n\nlogger = logging.getLogger(__name__)\n\n# Is not of type EntryID but can use functions from EntryID (e.g\n# get_type) otherwise need to implement own functions and make GenericXML parent class\nclass EnvMachSpecific(EnvBase):\n # pylint: disable=unused-argument\n def __init__(self, caseroot=None, infile=\"env_mach_specific.xml\",\n components=None, unit_testing=False):\n \"\"\"\n initialize an object interface to file env_mach_specific.xml in the case directory\n \"\"\"\n schema = os.path.join(get_cime_root(), \"config\", \"xml_schemas\", \"env_mach_specific.xsd\")\n EnvBase.__init__(self, caseroot, infile, schema=schema)\n self._allowed_mpi_attributes = (\"compiler\", \"mpilib\", \"threaded\", \"unit_testing\")\n self._unit_testing = unit_testing\n\n def populate(self, machobj):\n \"\"\"Add entries to the file using information from a Machines object.\"\"\"\n items = (\"module_system\", \"environment_variables\", \"resource_limits\", \"mpirun\", \"run_exe\",\"run_misc_suffix\")\n default_run_suffix = machobj.get_child(\"default_run_suffix\", root=machobj.root)\n default_run_exe_node = machobj.get_child(\"default_run_exe\", root=default_run_suffix)\n default_run_misc_suffix_node = machobj.get_child(\"default_run_misc_suffix\", root=default_run_suffix)\n\n group_node = self.make_child(\"group\", {\"id\":\"compliant_values\"})\n\n for item in items:\n nodes = machobj.get_first_child_nodes(item)\n if item == \"run_exe\" or item == \"run_misc_suffix\":\n if len(nodes) == 0:\n value = self.text(default_run_exe_node) if item == \"run_exe\" else self.text(default_run_misc_suffix_node)\n else:\n value = nodes[0].text\n\n entity_node = self.make_child(\"entry\", {\"id\":item, \"value\":value}, root=group_node)\n\n self.make_child(\"type\", root=entity_node, text=\"char\")\n self.make_child(\"desc\", root=entity_node, text=(\"executable name\" if item == \"run_exe\" else \"redirect for job output\"))\n\n else:\n for node in nodes:\n self.add_child(node)\n\n def _get_modules_for_case(self, case):\n module_nodes = self.get_children(\"modules\", root=self.get_child(\"module_system\"))\n modules_to_load = None\n if module_nodes is not None:\n modules_to_load = self._compute_module_actions(module_nodes, case)\n\n return modules_to_load\n\n def _get_envs_for_case(self, case):\n env_nodes = self.get_children(\"environment_variables\")\n\n envs_to_set = None\n if env_nodes is not None:\n envs_to_set = self._compute_env_actions(env_nodes, case)\n\n return envs_to_set\n\n def load_env(self, case, force_method=None):\n \"\"\"\n Should only be called by case.load_env\n \"\"\"\n # Do the modules so we can refer to env vars set by the modules\n # in the environment_variables block\n modules_to_load = self._get_modules_for_case(case)\n if (modules_to_load is not None):\n self._load_modules(modules_to_load, force_method=force_method)\n\n envs_to_set = self._get_envs_for_case(case)\n if (envs_to_set is not None):\n self._load_envs(envs_to_set)\n\n self._get_resources_for_case(case)\n\n def _get_resources_for_case(self, case):\n resource_nodes = self.get_children(\"resource_limits\")\n if resource_nodes is not None:\n nodes = self._compute_resource_actions(resource_nodes, case)\n for name, val in nodes:\n attr = getattr(resource, name)\n limits = resource.getrlimit(attr)\n logger.info(\"Setting resource.{} to {} from {}\".format(name, val, limits))\n limits = (int(val), limits[1])\n resource.setrlimit(attr, limits)\n\n def _load_modules(self, modules_to_load, force_method=None):\n module_system = self.get_module_system_type() if force_method is None else force_method\n if (module_system == \"module\"):\n self._load_module_modules(modules_to_load)\n elif (module_system == \"soft\"):\n self._load_modules_generic(modules_to_load)\n elif (module_system == \"generic\"):\n self._load_modules_generic(modules_to_load)\n elif (module_system == \"none\"):\n self._load_none_modules(modules_to_load)\n else:\n expect(False, \"Unhandled module system '{}'\".format(module_system))\n\n def list_modules(self):\n module_system = self.get_module_system_type()\n\n # If the user's login shell is not sh, it's possible that modules\n # won't be configured so we need to be sure to source the module\n # setup script if it exists.\n init_path = self.get_module_system_init_path(\"sh\")\n if init_path:\n source_cmd = \"source {} && \".format(init_path)\n else:\n source_cmd = \"\"\n\n if (module_system in [\"module\"]):\n return run_cmd_no_fail(\"{}module list\".format(source_cmd), combine_output=True)\n elif (module_system == \"soft\"):\n # Does soft really not provide this capability?\n return \"\"\n elif (module_system == \"generic\"):\n return run_cmd_no_fail(\"{}use -lv\".format(source_cmd))\n elif (module_system == \"none\"):\n return \"\"\n else:\n expect(False, \"Unhandled module system '{}'\".format(module_system))\n\n def save_all_env_info(self, filename):\n \"\"\"\n Get a string representation of all current environment info and\n save it to file.\n \"\"\"\n with open(filename, \"w\") as f:\n f.write(self.list_modules())\n run_cmd_no_fail(\"echo -e '\\n' && env\", arg_stdout=filename)\n\n def make_env_mach_specific_file(self, shell, case):\n modules_to_load = self._get_modules_for_case(case)\n envs_to_set = self._get_envs_for_case(case)\n filename = \".env_mach_specific.{}\".format(shell)\n lines = []\n if modules_to_load is not None:\n lines.extend(self._get_module_commands(modules_to_load, shell))\n\n if envs_to_set is not None:\n for env_name, env_value in envs_to_set:\n if shell == \"sh\":\n lines.append(\"export {}={}\".format(env_name, env_value))\n elif shell == \"csh\":\n lines.append(\"setenv {} {}\".format(env_name, env_value))\n else:\n expect(False, \"Unknown shell type: '{}'\".format(shell))\n\n with open(filename, \"w\") as fd:\n fd.write(\"\\n\".join(lines))\n\n def _load_envs(self, envs_to_set):\n for env_name, env_value in envs_to_set:\n os.environ[env_name] = \"\" if env_value is None else env_value\n\n # Private API\n\n def _compute_module_actions(self, module_nodes, case):\n return self._compute_actions(module_nodes, \"command\", case)\n\n def _compute_env_actions(self, env_nodes, case):\n return self._compute_actions(env_nodes, \"env\", case)\n\n def _compute_resource_actions(self, resource_nodes, case):\n return self._compute_actions(resource_nodes, \"resource\", case)\n\n def _compute_actions(self, nodes, child_tag, case):\n result = [] # list of tuples (\"name\", \"argument\")\n compiler, mpilib = case.get_value(\"COMPILER\"), case.get_value(\"MPILIB\")\n\n for node in nodes:\n if (self._match_attribs(self.attrib(node), case)):\n for child in self.get_children(root=node):\n expect(self.name(child) == child_tag, \"Expected {} element\".format(child_tag))\n if (self._match_attribs(self.attrib(child), case)):\n val = self.text(child)\n if val is not None:\n # We allow a couple special substitutions for these fields\n for repl_this, repl_with in [(\"$COMPILER\", compiler), (\"$MPILIB\", mpilib)]:\n val = val.replace(repl_this, repl_with)\n\n val = self.get_resolved_value(val)\n expect(\"$\" not in val, \"Not safe to leave unresolved items in env var value: '{}'\".format(val))\n\n # intentional unindent, result is appended even if val is None\n result.append( (self.get(child, \"name\"), val) )\n\n return result\n\n def _match_attribs(self, attribs, case):\n # check for matches with case-vars\n for attrib in attribs:\n if attrib == \"unit_testing\": # special case\n if not self._match(self._unit_testing, attribs[\"unit_testing\"].upper()):\n return False\n elif attrib == \"name\":\n pass\n else:\n val = case.get_value(attrib.upper())\n expect(val is not None, \"Cannot match attrib '%s', case has no value for it\" % attrib.upper())\n if not self._match(val, attribs[attrib]):\n return False\n\n return True\n\n def _match(self, my_value, xml_value):\n if xml_value.startswith(\"!\"):\n result = re.match(xml_value[1:],str(my_value)) is None\n elif isinstance(my_value, bool):\n if my_value: result = xml_value == \"TRUE\"\n else: result = xml_value == \"FALSE\"\n else:\n result = re.match(xml_value,str(my_value)) is not None\n\n logger.debug(\"(env_mach_specific) _match {} {} {}\".format(my_value, xml_value, result))\n return result\n\n def _get_module_commands(self, modules_to_load, shell):\n # Note this is independent of module system type\n mod_cmd = self.get_module_system_cmd_path(shell)\n cmds = []\n last_action = None\n last_cmd = None\n\n for action, argument in modules_to_load:\n if argument is None:\n argument = \"\"\n\n if action == last_action:\n last_cmd = \"{} {}\".format(last_cmd, argument)\n else:\n if last_cmd is not None:\n cmds.append(last_cmd)\n\n last_cmd = \"{} {} {}\".format(mod_cmd, action, \"\" if argument is None else argument)\n last_action = action\n\n if last_cmd:\n cmds.append(last_cmd)\n\n return cmds\n\n def _load_module_modules(self, modules_to_load):\n for cmd in self._get_module_commands(modules_to_load, \"python\"):\n logger.debug(\"module command is {}\".format(cmd))\n stat, py_module_code, errout = run_cmd(cmd)\n expect(stat==0 and len(errout) == 0,\n \"module command {} failed with message:\\n{}\".format(cmd, errout))\n exec(py_module_code)\n\n def _load_modules_generic(self, modules_to_load):\n sh_init_cmd = self.get_module_system_init_path(\"sh\")\n sh_mod_cmd = self.get_module_system_cmd_path(\"sh\")\n\n # Purpose is for environment management system that does not have\n # a python interface and therefore can only determine what they\n # do by running shell command and looking at the changes\n # in the environment.\n\n cmd = \"source {}\".format(sh_init_cmd)\n\n if \"SOFTENV_ALIASES\" in os.environ:\n cmd += \" && source $SOFTENV_ALIASES\"\n if \"SOFTENV_LOAD\" in os.environ:\n cmd += \" && source $SOFTENV_LOAD\"\n\n for action,argument in modules_to_load:\n cmd += \" && {} {} {}\".format(sh_mod_cmd, action, \"\" if argument is None else argument)\n\n cmd += \" && env\"\n output = run_cmd_no_fail(cmd)\n\n ###################################################\n # Parse the output to set the os.environ dictionary\n ###################################################\n newenv = OrderedDict()\n lastkey = None\n for line in output.splitlines():\n if \"=\" in line:\n key, val = line.split(\"=\", 1)\n newenv[key] = val\n lastkey = key\n elif lastkey is not None:\n newenv[lastkey] += \"\\n\" + line\n\n # resolve variables\n for key, val in newenv.items():\n newenv[key] = string.Template(val).safe_substitute(newenv)\n\n # Set environment with new or updated values\n for key in newenv:\n if key in os.environ and os.environ[key] == newenv[key]:\n pass\n else:\n os.environ[key] = newenv[key]\n\n for oldkey in list(os.environ.keys()):\n if oldkey not in newenv:\n del os.environ[oldkey]\n\n def _load_none_modules(self, modules_to_load):\n \"\"\"\n No Action required\n \"\"\"\n expect(not modules_to_load,\n \"Module system was specified as 'none' yet there are modules that need to be loaded?\")\n\n def _mach_specific_header(self, shell):\n '''\n write a shell module file for this case.\n '''\n header = '''\n#!/usr/bin/env {}\n#===============================================================================\n# Automatically generated module settings for $self->{{machine}}\n# DO NOT EDIT THIS FILE DIRECTLY! Please edit env_mach_specific.xml\n# in your CASEROOT. This file is overwritten every time modules are loaded!\n#===============================================================================\n'''.format(shell)\n header += \"source {}\".format(self.get_module_system_init_path(shell))\n return header\n\n def get_module_system_type(self):\n \"\"\"\n Return the module system used on this machine\n \"\"\"\n module_system = self.get_child(\"module_system\")\n return self.get(module_system, \"type\")\n\n def get_module_system_init_path(self, lang):\n init_nodes = self.get_optional_child(\"init_path\", attributes={\"lang\":lang}, root=self.get_child(\"module_system\"))\n return self.text(init_nodes) if init_nodes is not None else None\n\n def get_module_system_cmd_path(self, lang):\n cmd_nodes = self.get_optional_child(\"cmd_path\", attributes={\"lang\":lang}, root=self.get_child(\"module_system\"))\n return self.text(cmd_nodes) if cmd_nodes is not None else None\n\n def get_mpirun(self, case, attribs, job=\"case.run\", exe_only=False):\n \"\"\"\n Find best match, return (executable, {arg_name : text})\n \"\"\"\n mpirun_nodes = self.get_children(\"mpirun\")\n best_match = None\n best_num_matched = -1\n default_match = None\n best_num_matched_default = -1\n args = []\n for mpirun_node in mpirun_nodes:\n xml_attribs = self.attrib(mpirun_node)\n all_match = True\n matches = 0\n is_default = False\n\n for key, value in attribs.items():\n expect(key in self._allowed_mpi_attributes, \"Unexpected key {} in mpirun attributes\".format(key))\n if key in xml_attribs:\n if xml_attribs[key].lower() == \"false\":\n xml_attrib = False\n elif xml_attribs[key].lower() == \"true\":\n xml_attrib = True\n else:\n xml_attrib = xml_attribs[key]\n\n if xml_attrib == value:\n matches += 1\n elif key == \"mpilib\" and value != \"mpi-serial\" and xml_attrib == \"default\":\n is_default = True\n else:\n all_match = False\n break\n\n if all_match:\n if is_default:\n if matches > best_num_matched_default:\n default_match = mpirun_node\n best_num_matched_default = matches\n else:\n if matches > best_num_matched:\n best_match = mpirun_node\n best_num_matched = matches\n\n # if there are no special arguments required for mpi-serial it need not have an entry in config_machines.xml\n if \"mpilib\" in attribs and attribs[\"mpilib\"] == \"mpi-serial\" and best_match is None:\n return \"\",[]\n\n expect(best_match is not None or default_match is not None,\n \"Could not find a matching MPI for attributes: {}\".format(attribs))\n\n the_match = best_match if best_match is not None else default_match\n\n # Now that we know the best match, compute the arguments\n if not exe_only:\n arg_node = self.get_optional_child(\"arguments\", root=the_match)\n if arg_node is not None:\n arg_nodes = self.get_children(\"arg\", root=arg_node)\n for arg_node in arg_nodes:\n arg_value = transform_vars(self.text(arg_node),\n case=case,\n subgroup=job,\n default=self.get(arg_node, \"default\"))\n args.append(arg_value)\n\n exec_node = self.get_child(\"executable\", root=the_match)\n expect(exec_node is not None,\"No executable found\")\n executable = self.text(exec_node)\n\n return executable, args\n",
"path": "scripts/lib/CIME/XML/env_mach_specific.py"
}
] | diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py
index 990f0322ddf..cb5beec95c9 100644
--- a/scripts/lib/CIME/XML/env_mach_specific.py
+++ b/scripts/lib/CIME/XML/env_mach_specific.py
@@ -291,7 +291,7 @@ def _load_modules_generic(self, modules_to_load):
key, val = line.split("=", 1)
newenv[key] = val
lastkey = key
- else:
+ elif lastkey is not None:
newenv[lastkey] += "\n" + line
# resolve variables
|
svthalia__concrexit-3485 | Increase profile retention period
<!-- Please add the appropriate label for what change should be made -->
### What?
Currently, Profiles are dataminimised 1 month after the member's last membership ends.
We should increase that period to 3 months. I have discussed this with @JeeVee11.
### Why?
It happens quite often that people forget to renew their membership, and decide to do it later than the first of october. In those cases, currently, their profile will already have been wiped. To make it easier for those people to still renew, we should extend the retention period a bit longer.
### How?
Change a constant somewhere in `members/services.py`, and update the privacy policy.
| [
{
"content": "\"\"\"Services defined in the members package.\"\"\"\nfrom collections.abc import Callable\nfrom datetime import date\nfrom typing import Any\n\nfrom django.conf import settings\nfrom django.db.models import Count, Q\nfrom django.utils import timezone\n\nfrom members import emails\nfrom members.models import Member, Membership\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef _member_group_memberships(\n member: Member, condition: Callable[[Membership], bool]\n) -> dict[str, dict[str, Any]]:\n \"\"\"Determine the group membership of a user based on a condition.\n\n :return: Object with group memberships\n \"\"\"\n memberships = member.membergroupmembership_set.all()\n data = {}\n\n for membership in memberships:\n if not condition(membership):\n continue\n period = {\n \"since\": membership.since,\n \"until\": membership.until,\n \"chair\": membership.chair,\n }\n\n if hasattr(membership.group, \"board\"):\n period[\"role\"] = membership.role\n\n if membership.until is None and hasattr(membership.group, \"board\"):\n period[\"until\"] = membership.group.board.until\n\n name = membership.group.name\n if data.get(name):\n data[name][\"periods\"].append(period)\n if data[name][\"earliest\"] > period[\"since\"]:\n data[name][\"earliest\"] = period[\"since\"]\n if period[\"until\"] is None or (\n data[name][\"latest\"] is not None\n and data[name][\"latest\"] < period[\"until\"]\n ):\n data[name][\"latest\"] = period[\"until\"]\n data[name][\"periods\"].sort(key=lambda x: x[\"since\"])\n else:\n data[name] = {\n \"pk\": membership.group.pk,\n \"active\": membership.group.active,\n \"name\": name,\n \"periods\": [period],\n \"url\": settings.BASE_URL + membership.group.get_absolute_url(),\n \"earliest\": period[\"since\"],\n \"latest\": period[\"until\"],\n }\n return data\n\n\ndef member_achievements(member) -> list:\n \"\"\"Derive a list of achievements of a member.\n\n Committee and board memberships + mentorships\n \"\"\"\n achievements = _member_group_memberships(\n member,\n lambda membership: (\n hasattr(membership.group, \"board\") or hasattr(membership.group, \"committee\")\n ),\n )\n\n mentor_years = member.mentorship_set.all()\n for mentor_year in mentor_years:\n name = f\"Mentor in {mentor_year.year}\"\n # Ensure mentorships appear last but are sorted\n earliest = date.today()\n earliest = earliest.replace(year=earliest.year + mentor_year.year)\n # Making sure it does not crash in leap years\n if earliest.month == 2 and earliest.day == 29:\n earliest = earliest.replace(day=28)\n if not achievements.get(name):\n achievements[name] = {\n \"name\": name,\n \"earliest\": earliest,\n }\n return sorted(achievements.values(), key=lambda x: x[\"earliest\"])\n\n\ndef member_societies(member) -> list:\n \"\"\"Derive a list of societies a member was part of.\"\"\"\n societies = _member_group_memberships(\n member, lambda membership: (hasattr(membership.group, \"society\"))\n )\n return sorted(societies.values(), key=lambda x: x[\"earliest\"])\n\n\ndef gen_stats_member_type() -> dict[str, list]:\n \"\"\"Generate statistics about membership types.\"\"\"\n data = {\n \"labels\": [],\n \"datasets\": [\n {\"data\": []},\n ],\n }\n\n for key, display in Membership.MEMBERSHIP_TYPES:\n data[\"labels\"].append(str(display))\n data[\"datasets\"][0][\"data\"].append(\n Membership.objects.filter(since__lte=date.today())\n .filter(Q(until__isnull=True) | Q(until__gt=date.today()))\n .filter(type=key)\n .count()\n )\n\n return data\n\n\ndef gen_stats_year() -> dict[str, list]:\n \"\"\"Generate statistics on how many members (and other membership types) there were in each cohort.\"\"\"\n years = range(2015, datetime_to_lectureyear(date.today()))\n\n data = {\n \"labels\": list(years),\n \"datasets\": [\n {\"label\": str(display), \"data\": []}\n for _, display in Membership.MEMBERSHIP_TYPES\n ],\n }\n\n for index, (key, _) in enumerate(Membership.MEMBERSHIP_TYPES):\n for year in years:\n data[\"datasets\"][index][\"data\"].append(\n Membership.objects.filter(since__lte=date(year=year, month=9, day=1))\n .filter(\n Q(until__isnull=True) | Q(until__gt=date(year=year, month=9, day=1))\n )\n .filter(type=key)\n .count()\n )\n\n return data\n\n\ndef gen_stats_active_members() -> dict[str, list]:\n \"\"\"Generate statistics about active members.\"\"\"\n return {\n \"labels\": [\"Active Members\", \"Non-active Members\"],\n \"datasets\": [\n {\n \"data\": [\n Member.active_members.count(),\n Member.current_members.count() - Member.active_members.count(),\n ]\n }\n ],\n }\n\n\ndef verify_email_change(change_request) -> None:\n \"\"\"Mark the email change request as verified.\n\n :param change_request: the email change request\n \"\"\"\n change_request.verified = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef confirm_email_change(change_request) -> None:\n \"\"\"Mark the email change request as verified.\n\n :param change_request: the email change request\n \"\"\"\n change_request.confirmed = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef process_email_change(change_request) -> None:\n \"\"\"Change the user's email address if the request was completed and send the completion email.\n\n :param change_request: the email change request\n \"\"\"\n if not change_request.completed:\n return\n\n member = change_request.member\n member.email = change_request.email\n member.save()\n\n emails.send_email_change_completion_message(change_request)\n\n\ndef execute_data_minimisation(dry_run=False, members=None) -> list[Member]:\n \"\"\"Clean the profiles of members/users of whom the last membership ended at least 31 days ago.\n\n :param dry_run: does not really remove data if True\n :param members: queryset of members to process, optional\n :return: list of processed members\n \"\"\"\n if not members:\n members = Member.objects\n members = (\n members.annotate(membership_count=Count(\"membership\"))\n .exclude(\n (\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n & Q(membership_count__gt=0)\n )\n .distinct()\n .prefetch_related(\"membership_set\", \"profile\")\n )\n deletion_period = timezone.now().date() - timezone.timedelta(days=31)\n processed_members = []\n for member in members:\n if (\n member.latest_membership is None\n or member.latest_membership.until <= deletion_period\n ):\n processed_members.append(member)\n profile = member.profile\n profile.student_number = None\n profile.phone_number = None\n profile.address_street = None\n profile.address_street2 = None\n profile.address_postal_code = None\n profile.address_city = None\n profile.address_country = None\n profile.birthday = None\n profile.emergency_contact_phone_number = None\n profile.emergency_contact = None\n profile.is_minimized = True\n if not dry_run:\n profile.save()\n\n return processed_members\n",
"path": "website/members/services.py"
}
] | [
{
"content": "\"\"\"Services defined in the members package.\"\"\"\nfrom collections.abc import Callable\nfrom datetime import date\nfrom typing import Any\n\nfrom django.conf import settings\nfrom django.db.models import Count, Q\nfrom django.utils import timezone\n\nfrom members import emails\nfrom members.models import Member, Membership\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef _member_group_memberships(\n member: Member, condition: Callable[[Membership], bool]\n) -> dict[str, dict[str, Any]]:\n \"\"\"Determine the group membership of a user based on a condition.\n\n :return: Object with group memberships\n \"\"\"\n memberships = member.membergroupmembership_set.all()\n data = {}\n\n for membership in memberships:\n if not condition(membership):\n continue\n period = {\n \"since\": membership.since,\n \"until\": membership.until,\n \"chair\": membership.chair,\n }\n\n if hasattr(membership.group, \"board\"):\n period[\"role\"] = membership.role\n\n if membership.until is None and hasattr(membership.group, \"board\"):\n period[\"until\"] = membership.group.board.until\n\n name = membership.group.name\n if data.get(name):\n data[name][\"periods\"].append(period)\n if data[name][\"earliest\"] > period[\"since\"]:\n data[name][\"earliest\"] = period[\"since\"]\n if period[\"until\"] is None or (\n data[name][\"latest\"] is not None\n and data[name][\"latest\"] < period[\"until\"]\n ):\n data[name][\"latest\"] = period[\"until\"]\n data[name][\"periods\"].sort(key=lambda x: x[\"since\"])\n else:\n data[name] = {\n \"pk\": membership.group.pk,\n \"active\": membership.group.active,\n \"name\": name,\n \"periods\": [period],\n \"url\": settings.BASE_URL + membership.group.get_absolute_url(),\n \"earliest\": period[\"since\"],\n \"latest\": period[\"until\"],\n }\n return data\n\n\ndef member_achievements(member) -> list:\n \"\"\"Derive a list of achievements of a member.\n\n Committee and board memberships + mentorships\n \"\"\"\n achievements = _member_group_memberships(\n member,\n lambda membership: (\n hasattr(membership.group, \"board\") or hasattr(membership.group, \"committee\")\n ),\n )\n\n mentor_years = member.mentorship_set.all()\n for mentor_year in mentor_years:\n name = f\"Mentor in {mentor_year.year}\"\n # Ensure mentorships appear last but are sorted\n earliest = date.today()\n earliest = earliest.replace(year=earliest.year + mentor_year.year)\n # Making sure it does not crash in leap years\n if earliest.month == 2 and earliest.day == 29:\n earliest = earliest.replace(day=28)\n if not achievements.get(name):\n achievements[name] = {\n \"name\": name,\n \"earliest\": earliest,\n }\n return sorted(achievements.values(), key=lambda x: x[\"earliest\"])\n\n\ndef member_societies(member) -> list:\n \"\"\"Derive a list of societies a member was part of.\"\"\"\n societies = _member_group_memberships(\n member, lambda membership: (hasattr(membership.group, \"society\"))\n )\n return sorted(societies.values(), key=lambda x: x[\"earliest\"])\n\n\ndef gen_stats_member_type() -> dict[str, list]:\n \"\"\"Generate statistics about membership types.\"\"\"\n data = {\n \"labels\": [],\n \"datasets\": [\n {\"data\": []},\n ],\n }\n\n for key, display in Membership.MEMBERSHIP_TYPES:\n data[\"labels\"].append(str(display))\n data[\"datasets\"][0][\"data\"].append(\n Membership.objects.filter(since__lte=date.today())\n .filter(Q(until__isnull=True) | Q(until__gt=date.today()))\n .filter(type=key)\n .count()\n )\n\n return data\n\n\ndef gen_stats_year() -> dict[str, list]:\n \"\"\"Generate statistics on how many members (and other membership types) there were in each cohort.\"\"\"\n years = range(2015, datetime_to_lectureyear(date.today()))\n\n data = {\n \"labels\": list(years),\n \"datasets\": [\n {\"label\": str(display), \"data\": []}\n for _, display in Membership.MEMBERSHIP_TYPES\n ],\n }\n\n for index, (key, _) in enumerate(Membership.MEMBERSHIP_TYPES):\n for year in years:\n data[\"datasets\"][index][\"data\"].append(\n Membership.objects.filter(since__lte=date(year=year, month=9, day=1))\n .filter(\n Q(until__isnull=True) | Q(until__gt=date(year=year, month=9, day=1))\n )\n .filter(type=key)\n .count()\n )\n\n return data\n\n\ndef gen_stats_active_members() -> dict[str, list]:\n \"\"\"Generate statistics about active members.\"\"\"\n return {\n \"labels\": [\"Active Members\", \"Non-active Members\"],\n \"datasets\": [\n {\n \"data\": [\n Member.active_members.count(),\n Member.current_members.count() - Member.active_members.count(),\n ]\n }\n ],\n }\n\n\ndef verify_email_change(change_request) -> None:\n \"\"\"Mark the email change request as verified.\n\n :param change_request: the email change request\n \"\"\"\n change_request.verified = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef confirm_email_change(change_request) -> None:\n \"\"\"Mark the email change request as verified.\n\n :param change_request: the email change request\n \"\"\"\n change_request.confirmed = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef process_email_change(change_request) -> None:\n \"\"\"Change the user's email address if the request was completed and send the completion email.\n\n :param change_request: the email change request\n \"\"\"\n if not change_request.completed:\n return\n\n member = change_request.member\n member.email = change_request.email\n member.save()\n\n emails.send_email_change_completion_message(change_request)\n\n\ndef execute_data_minimisation(dry_run=False, members=None) -> list[Member]:\n \"\"\"Clean the profiles of members/users of whom the last membership ended at least 31 days ago.\n\n :param dry_run: does not really remove data if True\n :param members: queryset of members to process, optional\n :return: list of processed members\n \"\"\"\n if not members:\n members = Member.objects\n members = (\n members.annotate(membership_count=Count(\"membership\"))\n .exclude(\n (\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n & Q(membership_count__gt=0)\n )\n .distinct()\n .prefetch_related(\"membership_set\", \"profile\")\n )\n deletion_period = timezone.now().date() - timezone.timedelta(days=90)\n processed_members = []\n for member in members:\n if (\n member.latest_membership is None\n or member.latest_membership.until <= deletion_period\n ):\n processed_members.append(member)\n profile = member.profile\n profile.student_number = None\n profile.phone_number = None\n profile.address_street = None\n profile.address_street2 = None\n profile.address_postal_code = None\n profile.address_city = None\n profile.address_country = None\n profile.birthday = None\n profile.emergency_contact_phone_number = None\n profile.emergency_contact = None\n profile.is_minimized = True\n if not dry_run:\n profile.save()\n\n return processed_members\n",
"path": "website/members/services.py"
}
] | diff --git a/website/members/services.py b/website/members/services.py
index 8896a47b8..fdb2a0f72 100644
--- a/website/members/services.py
+++ b/website/members/services.py
@@ -218,7 +218,7 @@ def execute_data_minimisation(dry_run=False, members=None) -> list[Member]:
.distinct()
.prefetch_related("membership_set", "profile")
)
- deletion_period = timezone.now().date() - timezone.timedelta(days=31)
+ deletion_period = timezone.now().date() - timezone.timedelta(days=90)
processed_members = []
for member in members:
if (
diff --git a/website/members/tests/test_services.py b/website/members/tests/test_services.py
index a030b8294..85e276d06 100644
--- a/website/members/tests/test_services.py
+++ b/website/members/tests/test_services.py
@@ -81,7 +81,7 @@ def test_process_email_change(self, send_message_mock):
send_message_mock.assert_called_once_with(change_request)
-@freeze_time("2018-10-2")
+@freeze_time("2018-12-2")
@override_settings(SUSPEND_SIGNALS=True)
class DataMinimisationTest(TestCase):
@classmethod
@@ -114,12 +114,12 @@ def setUpTestData(cls):
)
def test_removes_after_31_days_or_no_membership(self):
- with self.subTest("Deletes after 31 days"):
+ with self.subTest("Deletes after 90 days"):
processed = services.execute_data_minimisation(True)
self.assertEqual(len(processed), 2)
self.assertEqual(processed[0], self.m1)
- with self.subTest("Deletes after 31 days"):
+ with self.subTest("Deletes after 90 days"):
self.s1.until = timezone.now().replace(year=2018, month=11, day=1)
self.s1.save()
processed = services.execute_data_minimisation(True)
diff --git a/website/singlepages/templates/singlepages/privacy_policy.html b/website/singlepages/templates/singlepages/privacy_policy.html
index cce839c22..a035497a5 100644
--- a/website/singlepages/templates/singlepages/privacy_policy.html
+++ b/website/singlepages/templates/singlepages/privacy_policy.html
@@ -7,86 +7,73 @@
{% block page_title %}{% trans "privacy policy"|capfirst %}{% endblock %}
{% block page_content %}
- <h3>{% trans "Version" %} 2.5, 24-05-2021</h3>
+ <h3>Version 2.7, 2023-12-14</h3>
<p>
- {% blocktrans trimmed %}
- This document contains the privacy conditions of Study Association Thalia.
- The conditions are applicable on all members, benefactors, honorary members of Thalia and
- people who have started the registration process.
- Where there are differences in the applicability of the conditions on the mentioned groups this will
- be stated.
- {% endblocktrans %}
+ This document contains the privacy conditions of Study Association Thalia.
+ The conditions are applicable on all members, benefactors, honorary members of Thalia and
+ people who have started the registration process. Where there are differences in the
+ applicability of the conditions on the mentioned groups this will be stated.
</p>
- <h4>1. {% trans "Categories of personal data" %}</h4>
+ <h4>1. Categories of personal data</h4>
<p>
- {% blocktrans trimmed %}
- All data are stored at least for the length of the membership, unless stated otherwise.
- At the end of the membership the address information will be deleted, the name, email address and
- history with Thalia will be collected in the alumni database.
- The account on the Thalia-website will be operational even after the end of the membership or
- benefactorship to allow easy renewals. The deletion of data will happen on a yearly basis
- (on March 1st), so on average the data will be saved for six months, at most a year after the
- end of the membership. Data is not deleted immediately since the data is needed in case the
- membership gets extended. A person can request immediate deletion of their data by sending an
- email to <a href="mailto:[email protected]">[email protected]</a>.
- {% endblocktrans %}
+ All data are stored at least for the length of the membership, unless stated otherwise.
+ The account on the Thalia-website will be operational even after the end of the membership or
+ benefactorship to allow easy renewals.
+
+ The address information, phone number, profile picture, emergency contact, date of birth and
+ student number are deleted 90 days after the end of a person's last membership. These data
+ are not deleted immediately after the end of the membership to allow for easy renewals.
+
+ The user's username, email address, and membership history are kept indefinitely for the
+ sake of keeping in contact with alumni, and to keep the association's history.
+
+ A person can request immediate deletion of their data by sending an
+ email to <a href="mailto:[email protected]">[email protected]</a>.
</p>
- <h5>{% trans "Applicable to members, benefactors and honorary members:" %}</h5>
+ <h5>Applicable to members, benefactors and honorary members:</h5>
<p>
- <strong>{% trans "Full name" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the name of members, benefactors and honorary members for its administration and for
- personalising its communication.
- Processing of these data happens on the basis of it being necessary to fulfill the membership
- agreement.
- {% endblocktrans %}
+ <strong>Full name</strong><br/>
+ Thalia uses the name of members, benefactors and honorary members for its administration and for
+ personalising its communication.
+ Processing of these data happens on the basis of it being necessary to fulfill the membership
+ agreement.
</p>
<p>
- <strong>{% trans "Address" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the address of the members, benefactors and honorary members for its administration and
- for sending the association magazine.
- Processing of these data happens on the basis of it being necessary to fulfill the membership
- agreement.
- {% endblocktrans %}
+ <strong>Address</strong><br/>
+ Thalia uses the address of the members, benefactors and honorary members for its administration and
+ for sending the association magazine.
+ Processing of these data happens on the basis of it being necessary to fulfill the membership
+ agreement.
</p>
<p>
- <strong>{% trans "Email address" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the email addresses of members, benefactors and honorary members to communicate about
- policy and financial matters and
- for sending the weekly newsletter as well as member specific communication (about an event for
- example). The email addresses are also used to enable functionality provided by the
- website (such as to reset passwords). Processing of these data happens on the basis of it being
- necessary to fulfill the membership agreement.
- {% endblocktrans %}
+ <strong>Email address</strong><br/>
+ Thalia uses the email addresses of members, benefactors and honorary members to communicate about
+ policy and financial matters and
+ for sending the weekly newsletter as well as member specific communication (about an event for
+ example). The email addresses are also used to enable functionality provided by the
+ website (such as to reset passwords). Processing of these data happens on the basis of it being
+ necessary to fulfill the membership agreement.
</p>
<p>
- <strong>{% trans "Phone number" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the phone number of members, benefactors and honorary members to communicate with members
- about activities of Thalia.
- Think about calling a participant who is too late for an activity or communicating a last-minute
- change. The processing of phone numbers by Thalia is optional.
- Processing of these data happens based on consent, which is given implicitly when the phone number
- is entered during registration or on the user profile on the website.
- {% endblocktrans %}
+ <strong>Phone number</strong><br/>
+ Thalia uses the phone number of members, benefactors and honorary members to communicate with members
+ about activities of Thalia.
+ Think about calling a participant who is too late for an activity or communicating a last-minute
+ change. The processing of phone numbers by Thalia is optional.
+ Processing of these data happens based on consent, which is given implicitly when the phone number
+ is entered during registration or on the user profile on the website.
</p>
<p>
- {% blocktrans trimmed %}
- NB: For some events, processing of the phone number is needed because of the nature of the event.
- Processing will happen per activity and will be explained where applicable.
- {% endblocktrans %}
+ NB: For some events, processing of the phone number is needed because of the nature of the event.
+ Processing will happen per activity and will be explained where applicable.
</p>
<p>
- <strong>{% trans "Date of birth" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the date of birth of members, benefactors and honorary members to determine
- whether they are of age.
- Besides this the date of birth is processed to display the birthday in the calendar if the
- member, benefactor or honorary member explicitly chose to allow this in their profile.
- Processing of these data happens on the basis of it being a legitimate interest of Thalia.
- {% endblocktrans %}
+ <strong>Date of birth</strong><br/>
+ Thalia uses the date of birth of members, benefactors and honorary members to determine
+ whether they are of age. Besides this the date of birth is processed to display the birthday
+ in the calendar if the member, benefactor or honorary member explicitly chose to allow this
+ in their profile.
+ Processing of these data happens on the basis of it being a legitimate interest of Thalia.
</p>
<p>
<strong>{% trans "Profile" %}</strong><br/>
@@ -100,252 +87,208 @@ <h5>{% trans "Applicable to members, benefactors and honorary members:" %}</h5>
{% endblocktrans %}
</p>
<p>
- <strong>{% trans "Bank account number" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the bank account number of members, benefactors and honorary members when they pay via
- bank transfer or when they declare costs with Thalia.
- These data are saved for as long is required by law for the financial administration of Thalia.
- Processing of these data happens on the basis of it being necessary to fulfill the membership
- agreement.
- {% endblocktrans %}
+ <strong>Bank account number</strong><br/>
+ Thalia uses the bank account number of members, benefactors and honorary members when they pay via
+ bank transfer or when they declare costs with Thalia.
+ These data are saved for as long as required by law for the financial administration of Thalia.
+ Processing of these data happens on the basis of it being necessary to fulfill the membership agreement.
</p>
<p>
- <strong>{% trans "Emergency contact" %}</strong><br/>
- {% blocktrans trimmed %}
- There is a possibility to add an emergency contact to the user profile on the website, so this
- person can be contacted if it’s needed.
- Adding an emergency contact is optional.
- {% endblocktrans %}
+ <strong>Emergency contact</strong><br/>
+ There is a possibility to add an emergency contact to the user profile on the website,
+ so this person can be contacted if it’s needed. Adding an emergency contact is optional.
</p>
<p>
- <strong>{% trans "Data collected on the website" %}</strong><br/>
- {% blocktrans trimmed %}
- Certain actions on the website may cause data to be collected (such as ordering a pizza on the
- website or registering to attend an event). Processing of these data happens on the basis of
- it being a legitimate interest of Thalia.
- {% endblocktrans %}<br/>
+ <strong>Data collected on the website</strong><br/>
+ Certain actions on the website may cause data to be collected (such as ordering food on the
+ website or registering to attend an event). Processing of these data happens on the basis of
+ it being a legitimate interest of Thalia.
- {% blocktrans trimmed %}
- We additionally may collect data (logs) on anything happening on the site and app to ensure
- the correct functioning of the services provided, the username of a authenticated user is sent along
- with the logs. See the <a href="#sentry">section below on Functional Software, Inc.</a> for
- information on where this data is sent. Thalia tries to only collect data when errors occur.
- The logs are used to fix bugs in the website and app. Processing of these data
- happens on the basis of it being a legitimate interest of Thalia.
- {% endblocktrans %}
+ <br/>
+
+ We additionally may collect data (logs) on anything happening on the site and app to ensure
+ the correct functioning of the services provided, the username of a authenticated user is
+ sent along with the logs.
+ See the <a href="#sentry">section below on Functional Software, Inc.</a> for information on
+ where this data is sent. Thalia tries to only collect data when errors occur. The logs are
+ used to fix bugs in the website and app.
+ Processing of these data happens on the basis of it being a legitimate interest of Thalia.
</p>
<p>
<strong>{% trans "Sales" %}</strong><br/>
- {% blocktrans trimmed %}
- When making a purchase at (an event of) Thalia, this can be recorded. This happens on the basis of
- it being a legal requirement of Thalia to keep their financial administration up-to-date.
- {% endblocktrans %}
+ When making a purchase at (an event of) Thalia, this can be recorded. This happens on the
+ basis of it being a legal requirement of Thalia to keep their financial administration up-to-date.
</p>
<h5>{% trans "Applicable to members:" %}</h5>
<p>
<strong>{% trans "Student number" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses the student number of members to check if they are still studying and to receive
- grants and subsidies.
- Processing of these data happens on the basis of it being a legitimate interest of Thalia.
- Student numbers are only shared with the Radboud University or organisations which operate on
- behalf of the Radboud University.
- {% endblocktrans %}
+ Thalia uses the student number of members to check if they are still studying and to receive
+ grants and subsidies. Student numbers are only shared with the Radboud University or
+ organisations which operate on behalf of the Radboud University.
+ Processing of these data happens on the basis of it being a legitimate interest of Thalia.
</p>
<h5>{% trans "Applicable to everyone:" %}</h5>
<p>
<strong>{% trans "Photos" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia uses photo’s which are made during her events for the promotion of events and the association.
- These photo's can be made available on the website of Thalia for members or they can be posted on social media accounts of Thalia.
- If such image is posted on a social media platform other than the website of Thalia, permission will be asked to all recognisable persons in the picture if this is feasible.
- It is possible to ask for the removal of a certain photo from social media when someone on that photo desires so, by sending a mail to <a href="mailto:[email protected]">[email protected]</a>.
- Processing of this data happens on the basis of it being a legitimate interest of Thalia.
+ Thalia uses photo’s which are made during her events for the promotion of events and the
+ association. These photo's can be made available on the website of Thalia for members, or
+ they can be posted on social media accounts of Thalia. If such image is posted on a social
+ media platform other than the website of Thalia, permission will be asked to all
+ recognisable persons in the picture if this is feasible.
- When someone wishes not to be photographed, they can indicate this to the photographer.
- A request of deletion of a certain photo can also be made after it has been taken, by sending a mail to <a href="mailto:[email protected]">[email protected]</a>.
- {% endblocktrans %}
+ It is possible to ask for the removal of a certain photo from the website or social media
+ when someone on that photo desires so, by sending a mail to
+ <a href="mailto:[email protected]">[email protected]</a>.
+
+ Processing of this data happens on the basis of it being a legitimate interest of Thalia.
+
+ When someone wishes not to be photographed, they can indicate this to the photographer.
+ A request of deletion of a certain photo can also be made after it has been taken, by
+ sending a mail to <a href="mailto:[email protected]">[email protected]</a>.
</p>
<h5>{% trans "Applicable to upcoming members:" %}</h5>
<p>
- {% blocktrans trimmed %}
- Registrations which have been completed or rejected will be removed after 31 days.
- {% endblocktrans %}
+ Registrations which have been completed or rejected will be removed after 31 days.
</p>
<p>
- {% blocktrans trimmed %}
- A registration that is still in the process of collecting references, reviewing by the board,
- or waiting for payment is not removed automatically. A person can request immediate deletion
- of their data by sending an email to <a href="mailto:[email protected]">[email protected]</a>.
- {% endblocktrans %}
+ A registration that is still in the process of collecting references, reviewing by the board,
+ or waiting for payment is not removed automatically. A person can request immediate deletion
+ of their data by sending an email to <a href="mailto:[email protected]">[email protected]</a>.
</p>
<h5>{% trans "Face recognition" %}</h5>
<p>
- {% blocktrans trimmed %}
- Photos uploaded to Thalia's website are scanned on faces. This is done on our own servers, using open source
- models. No third parties will receive these photos for this purpose and the photos are not used to train
- models. Face recognition is only used to allow members to easily search for photos they appear on, and not
- for any other purpose. Members are not allowed to use face recognition to search for photos of other
- members. Measures are implemented to prevent this as much as possible.
- {% endblocktrans %}
+ Photos uploaded to Thalia's website are scanned on faces. This is done on our own servers,
+ using open source models. No third parties will receive these photos for this purpose and
+ the photos are not used to train models. Face recognition is only used to allow members to
+ easily search for photos they appear on, and not for any other purpose.
+ Members are not allowed to use face recognition to search for photos of other members.
+ Measures are implemented to prevent this as much as possible.
</p>
<p>
- {% blocktrans trimmed %}
- In order to use face recognition, members need to upload a photo of their own face, a so-called reference
- face. This photo is stored on our servers and is used to compare faces in photos uploaded to the website.
- This reference face is only used for face recognition and is not used for any other purpose. It will never
- be made public. Members can delete their reference face at any time. After deletion, however, the reference
- face will not be immediately deleted. This allows us to monitor if people actually searched for photos of
- others.
- {% endblocktrans %}
+ In order to use face recognition, members need to upload a photo of their own face, a
+ so-called reference face. This photo is stored on our servers and is used to compare faces
+ in photos uploaded to the website. This reference face is only used for face recognition and
+ is not used for any other purpose. It will never be made public.
+ Members can delete their reference face at any time. After deletion, however, the reference
+ face will not be immediately deleted. This allows us to monitor if people actually searched
+ for photos of others.
</p>
<h5>{% trans "Other processing of personal data" %}</h5>
<p>
- {% blocktrans trimmed %}
- For activities of Thalia, additional personal data may be needed.
- Think about data like allergy information or dietary preferences when there is an event with food,
- or shirt size when clothing is given to participants of an event.
- These data will be processed and explained per event.
- {% endblocktrans %}
+ For activities of Thalia, additional personal data may be needed. Examples include allergy
+ information or dietary preferences when there is an event with food, or shirt size when
+ clothing is given to participants of an event.
+
+ These data will be processed and explained per event.
</p>
<h4>
2. {% trans "Rights of members, benefactors and honorary members concerning processing of personal data" %}</h4>
<p>
- {% blocktrans trimmed %}
- The relevant rights of members, benefactors and honorary members concerning the processing of
- personal data is as follows:
- {% endblocktrans %}
+ The relevant rights of members, benefactors and honorary members concerning the processing
+ of personal data are as follows:
</p>
<ul>
<li>
- {% trans "Right of access. The person concerned to view their data." %}
+ Right of access. The person concerned may request to view their data.
</li>
<li>
- {% trans "Right to rectification. The person concerned may correct or supplement their data." %}
+ Right to rectification. The person concerned may correct or supplement their data.
</li>
<li>
- {% trans "Right to erasure. The person concerned may let their data be deleted." %}
+ Right to erasure. The person concerned may request that their data be deleted.
</li>
<li>
- {% trans "Right to data portability. The person concerned may let their data be transferred to another party." %}
+ Right to data portability. The person concerned may let their data be transferred to another party.
</li>
<li>
- {% trans "Right to restriction of processing. The person concerned may restrict the processing of their personal data." %}
+ Right to restriction of processing. The person concerned may restrict the processing of their personal data.
</li>
<li>
- {% trans "Right to object. The person concerned may object to the processing of their data." %}
+ Right to object. The person concerned may object to the processing of their data.
</li>
</ul>
<p>
- {% blocktrans trimmed %}
- More information on the applicability of these rights can be found on the website of
- <em>Autoriteit Persoonsgegevens</em> (<a href="https://autoriteitpersoonsgegevens.nl/en">https://autoriteitpersoonsgegevens.nl/en</a>).
- {% endblocktrans %}
+ More information on the applicability of these rights can be found on the website of
+ <em>Autoriteit Persoonsgegevens</em> (<a href="https://autoriteitpersoonsgegevens.nl/en">https://autoriteitpersoonsgegevens.nl/en</a>).
</p>
<h5>{% trans "Contact details for processing of personal data" %}</h5>
<p>
- {% blocktrans trimmed %}
- Within Thalia, the secretary is the contact person for all matters concerning the processing of
- personal data.
- Requests and notifications of leaked data can be sent by email to
- <a href="mailto:[email protected]">[email protected]</a> or by post to
- <em>Studievereniging Thalia, Toernooiveld 212, 6525EC Nijmegen</em>.
- {% endblocktrans %}
+ Within Thalia, the secretary is the contact person for all matters concerning the processing
+ of personal data. Requests and notifications of leaked data can be sent by email to
+ <a href="mailto:[email protected]">[email protected]</a> or by post to
+ <em>Studievereniging Thalia, Toernooiveld 212, 6525EC Nijmegen</em>.
</p>
<h4>3. {% trans "Processing of personal data by third parties" %}</h4>
<p>
- {% trans "Thalia can share personal data with the following parties:" %}
+ Thalia can share personal data with the following parties:
</p>
<ul>
<li>
<strong id="radboud-university">{% trans "Radboud University Nijmegen" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia shares names and student numbers with the university (and organizations working on
- behalf of the Radboud University) to request grants and subsidies and
- to check the administration.
- {% endblocktrans %}
+ Thalia shares names and student numbers with the university (and organizations working
+ on behalf of the Radboud University) to request grants and subsidies and to check the
+ administration.
</li>
<li>
<strong id="banks">{% trans "Banks and other financial parties" %}</strong><br/>
- {% blocktrans trimmed %}
- For making payments, data is shared with banks and other financial parties.
- Data will only be shared as far as they are necessary to make or receive payments.
- {% endblocktrans %}
+ For making payments, data is shared with banks and other financial parties. Data will
+ only be shared as far as they are necessary to make or receive payments.
</li>
<li>
<strong id="amazon">{% trans "Amazon Web Services EMEA SARL ('AWS Europe')" %}</strong><br/>
- {% blocktrans trimmed %}
- Thalia's servers are hosted on Amazon Web Services. Therefore, all (personal) data Thalia keeps,
- is processed by Amazon.
- {% endblocktrans %}
+ Thalia's servers are hosted on Amazon Web Services. Therefore, all (personal) data
+ Thalia keeps, is processed by Amazon.
</li>
<li>
<strong id="google">{% trans "Google Ireland Limited" %}</strong><br/>
- {% blocktrans trimmed %}
- When a member joins a committee or society, a Google Workspace account will be created for that user
- to allow them to share files with their committee for example. The website will automatically create
- Google Workspace accounts for these members, and shares the name and email address of the member with
- Google to facilitate this. If a member stops being a member of committees and societies,
- their Google Workspace account will automatically be deleted after one month.
- {% endblocktrans %}<br />
- {% blocktrans trimmed %}
- Additionally, Thalia can post videos, captured during events, on
- <a href="https://www.youtube.com/channel/UCEjYSaa_iSEUDCzKPESdPFg" target="_blank">its YouTube channel</a>
- for promotional purposes.
- {% endblocktrans %}
+ When a member joins a committee or society, a Google Workspace account will be created
+ for that user to allow them to share files with their committee for example. The website
+ will automatically create Google Workspace accounts for these members, and shares the
+ name and email address of the member with Google to facilitate this. If a member stops
+ being a member of committees and societies, their Google Workspace account will
+ automatically be deleted after one month.
+ <br/>
+ Additionally, Thalia can post videos, captured during events, on
+ <a href="https://www.youtube.com/channel/UCEjYSaa_iSEUDCzKPESdPFg" target="_blank">its YouTube channel</a>
+ for promotional purposes.
</li>
<li>
<strong id="sentry">{% trans "Functional Software, Inc." %}</strong>
- {% blocktrans trimmed %}
- Thalia uses <a href="https://sentry.io">Sentry</a>, a platform by Functional Software, Inc.,
- for error monitoring. This means that when server errors occur on the website
- or crashes in the app some data is sent to the Sentry platform. For authenticated users,
- the username is sent along with an error log. We always try to filter out any personal
- information from the error logs.
- {% endblocktrans %}
+ Thalia uses <a href="https://sentry.io">Sentry</a>, a platform by Functional Software, Inc.,
+ for error monitoring. This means that when server errors occur on the website or crashes
+ in the app some data is sent to the Sentry platform. For authenticated users, the
+ username is sent along with an error log. We always try to filter out any other personal
+ information from the error logs.
</li>
<li>
<strong id="facebook">{% trans "Facebook, Inc." %}</strong>
- {% blocktrans trimmed %}
- For promotional purposes, Thalia can share photos or videos of members, that are taken during activities,
- on the <a href="https://www.instagram.com/thalia_nijmegen/" target="_blank">Instagram account</a> or
- <a href="https://www.facebook.com/svThalia/" target="_blank">Facebook page</a> of Thalia.
- {% endblocktrans %}
+ For promotional purposes, Thalia can share photos or videos of members, that are taken during activities,
+ on the <a href="https://www.instagram.com/thalia_nijmegen/" target="_blank">Instagram account</a> or
+ <a href="https://www.facebook.com/svThalia/" target="_blank">Facebook page</a> of Thalia.
</li>
<li>
<strong id="snap">{% trans "Snap, Inc." %}</strong>
- {% blocktrans trimmed %}
- For promotional purposes, Thalia can share photos or videos of members, that are taken during activities,
- on the <a href="https://www.snapchat.com/add/svthalia" target="_blank">Snapchat account</a> of Thalia.
- Read also the provisions about
- {% endblocktrans %}
+ For promotional purposes, Thalia can share photos or videos of members, that are taken during activities,
+ on the <a href="https://www.snapchat.com/add/svthalia" target="_blank">Snapchat account</a> of Thalia.
</li>
<li>
<strong id="discord">{% trans "Discord, Inc." %}</strong>
- {% blocktrans trimmed %}
- Users that choose to join the Thalia Discord and connect their Discord account with their Thalia account,
- will share their Thalia profile with Discord.
- {% endblocktrans %}
+ Users that choose to join the Thalia Discord and connect their Discord account with their Thalia account,
+ will share their Thalia profile with Discord.
</li>
</ul>
<p>
- {% blocktrans trimmed %}
- Thalia will not share personal data with partners, unless a member, benefactor or an honorary member
- registers for a partner event for which sharing personal data is required.
- {% endblocktrans %}
+ Thalia will not share personal data with partners, unless a member, benefactor or an
+ honorary member registers for a partner event for which sharing personal data is required.
</p>
<p>
- {% blocktrans trimmed %}
- Email addresses will not be shared with partners. There is an opt-in mailing list to receive
- messages from partners, but the partners don’t have access to the email addresses in the list.
- {% endblocktrans %}
+ Email addresses will not be shared with partners. There is an opt-in mailing list to receive
+ messages from partners, but the partners don’t have access to the email addresses in the list.
</p>
<h4>4. {% trans "Updating the privacy conditions" %}</h4>
<p>
- {% blocktrans trimmed %}
- Thalia reserves the right to change the privacy conditions.
- The new conditions will be shared as soon as possible with members, benefactors and honorary members.
- When changes require consent to be given anew, this will be done accordingly.
- {% endblocktrans %}
+ Thalia reserves the right to change the privacy conditions. The new conditions will
+ be shared as soon as possible with members, benefactors and honorary members.
+ When changes require consent to be given anew, this will be done accordingly.
</p>
{% endblock %}
|
certbot__certbot-9070 | Incorrect copyright date range on API Documentation
The copyright line for the [API Documentation](https://acme-python.readthedocs.io/en/stable/api.html) presently reads:
> © Copyright 2015-2015, Let's Encrypt Project Revision 93f61887.

This should be 2015-2021
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# acme-python documentation build configuration file, created by\n# sphinx-quickstart on Sun Oct 18 13:38:06 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(os.path.join(here, '..')))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n]\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = ['show-inheritance']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'acme-python'\ncopyright = u'2015-2015, Let\\'s Encrypt Project'\nauthor = u'Let\\'s Encrypt Project'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0'\n# The full version, including alpha/beta/rc tags.\nrelease = '0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n '_build',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'py:obj'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# https://docs.readthedocs.io/en/stable/faq.html#i-want-to-use-the-read-the-docs-theme-locally\n# on_rtd is whether we are on readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'acme-pythondoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n\n # Latex figure (float) alignment\n #'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'acme-python.tex', u'acme-python Documentation',\n u'Let\\'s Encrypt Project', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'acme-python', u'acme-python Documentation',\n [author], 1),\n ('man/jws', 'jws', u'jws script documentation', [project], 1),\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'acme-python', u'acme-python Documentation',\n author, 'acme-python', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'josepy': ('https://josepy.readthedocs.io/en/latest/', None),\n}\n",
"path": "acme/docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# acme-python documentation build configuration file, created by\n# sphinx-quickstart on Sun Oct 18 13:38:06 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(os.path.join(here, '..')))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n]\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = ['show-inheritance']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'acme-python'\ncopyright = u'2015, Let\\'s Encrypt Project'\nauthor = u'Let\\'s Encrypt Project'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0'\n# The full version, including alpha/beta/rc tags.\nrelease = '0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n '_build',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'py:obj'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# https://docs.readthedocs.io/en/stable/faq.html#i-want-to-use-the-read-the-docs-theme-locally\n# on_rtd is whether we are on readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'acme-pythondoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n\n # Latex figure (float) alignment\n #'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'acme-python.tex', u'acme-python Documentation',\n u'Let\\'s Encrypt Project', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'acme-python', u'acme-python Documentation',\n [author], 1),\n ('man/jws', 'jws', u'jws script documentation', [project], 1),\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'acme-python', u'acme-python Documentation',\n author, 'acme-python', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'josepy': ('https://josepy.readthedocs.io/en/latest/', None),\n}\n",
"path": "acme/docs/conf.py"
}
] | diff --git a/acme/docs/conf.py b/acme/docs/conf.py
index d419326df92..facb88a0ac7 100644
--- a/acme/docs/conf.py
+++ b/acme/docs/conf.py
@@ -58,7 +58,7 @@
# General information about the project.
project = u'acme-python'
-copyright = u'2015-2015, Let\'s Encrypt Project'
+copyright = u'2015, Let\'s Encrypt Project'
author = u'Let\'s Encrypt Project'
# The version info for the project you're documenting, acts as replacement for
|
DataDog__dd-trace-py-1563 | Django failure after upgrading
### Which version of dd-trace-py are you using?
0.39.0
### Which version of the libraries are you using?
We use a LOT of dependencies, I've tried to extract out the ones you're going to care here but I can always provide a full list:
```
django==1.8.17
django-18-fast-migrations==0.1.1
django-admin-sortable2==0.5.1
django-adminplus==0.2.1
django-cors-headers==2.1.0
django-csp==3.5
django-debug-toolbar==1.9.1
django-discover-runner==1.0
django-extensions==1.6.7
django-filter==1.1.0
django-formtools==2.1
django-jinja==2.2.1
django-mssql==1.8
django-orderable==6.0.1
django-otp==0.4.3
django-otp-twilio==0.4.2
django-password-validation==0.1.1
django-pyodbc-azure==1.8.17.0
django-ratelimit==2.0.0
django-redis==4.8.0
django-rest-swagger==2.1.2
django-role-permissions==2.2.0
django-sqlserver==1.11
django-sslserver==0.15
django-suit==0.2.13
django-supervisor==0.3.4
django-webpack-loader==0.6.0
djangorestframework==3.6.3
```
```
ddtrace==0.39.0
datadog==0.37.1
```
### How can we reproduce your problem?
This one might be hard to reproduce. We're upgrading:
```
-datadog==0.32.0
+datadog==0.37.1
datetime-truncate==1.0.1
datrie==0.7.1
-ddtrace==0.31.0
+ddtrace==0.39.0
```
And on py2, after upgrading (but not on py3 - we have different dependencies between py2 and py3 however as we transition over to fully py3) I see:
```
#48 1.476 new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
#48 1.476
#48 1.476 2020-07-02 19:58:26,607 [WARNING:140088484815680:8] py.warnings: /root/.local/lib/python2.7/s
ite-packages/django/db/models/base.py:309: RuntimeWarning: Model 'simon.asynctask' was already register
ed. Reloading models is not advised as it can lead to inconsistencies, most notably with related models
.
#48 1.476 new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
#48 1.476
#48 2.127 Traceback (most recent call last):
#48 2.127 File "/root/.local/bin/django-admin", line 8, in <module>
#48 2.127 sys.exit(execute_from_command_line())
#48 2.127 File "/root/.local/lib/python2.7/site-packages/django/core/management/__init__.py", line 35
4, in execute_from_command_line
#48 2.127 utility.execute()
#48 2.127 File "/root/.local/lib/python2.7/site-packages/django/core/management/__init__.py", line 32
8, in execute
#48 2.127 django.setup()
#48 2.127 File "/root/.local/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
#48 2.127 apps.populate(settings.INSTALLED_APPS)
#48 2.127 File "/root/.local/lib/python2.7/site-packages/django/apps/registry.py", line 115, in popul
ate
#48 2.127 app_config.ready()
#48 2.127 File "/root/.local/lib/python2.7/site-packages/rolepermissions/apps.py", line 12, in ready
#48 2.127 load_roles_and_permissions()
#48 2.127 File "/root/.local/lib/python2.7/site-packages/rolepermissions/loader.py", line 28, in load
_roles_and_permissions
#48 2.127 app_name = get_app_name(app_name)
#48 2.127 File "/root/.local/lib/python2.7/site-packages/rolepermissions/loader.py", line 16, in get_
app_name
#48 2.127 type_ = locate(app_name)
#48 2.127 File "/usr/local/lib/python2.7/pydoc.py", line 1518, in locate
#48 2.127 nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
#48 2.127 File "/usr/local/lib/python2.7/pydoc.py", line 342, in safeimport
#48 2.127 raise ErrorDuringImport(path, sys.exc_info())
#48 2.127 pydoc.ErrorDuringImport: problem in django.contrib.admin.apps.SimpleAdminConfig - <type 'exce
ptions.ImportError'>: No module named SimpleAdminConfig
```
This occurs after I change out settings/base/__init__.py to have these lines:
```
from ddtrace import config
config.django['service_name'] = 'django'
```
It dies in:
```
apps.populate(settings.INSTALLED_APPS)
```
Which looks like this (after removing the old ddtrace app)
```
INSTALLED_APPS = (
'identity',
'suit',
'debug_toolbar',
'django_extensions',
'sslserver',
>>) + COMMON_INSTALLED_APPS
```
COMMON_INSTALLED_APPS is:
```
COMMON_INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.admindocs',
# For 2FA
'django_otp',
'django_otp.plugins.otp_totp',
'otp_twilio',
'rolepermissions',
'adminplus',
'django_jinja',
'orderable',
'rest_framework',
'corsheaders',
'rest_framework_swagger',
'accounts',
'auth3',
'api',
'billing',
'costs',
'journeys_advancement',
'journeys_core',
'simon',
'singerio',
'saml',
'results',
'chef',
'webpack_loader',
'django_filters',
'featurestore',
'journeys_app',
```
Ideally what I'm hoping for is help to figure out what to troubleshoot next. I know we're on an ancient version of Django, but I can't do anything about that in the short term.
Simply having 'from ddtrace import config' and nothing else in my settings is enough to break things, and removing this always fixes it.
### What is the result that you get?
The backtrace above.
### What is the result that you expected?
Beautiful tracing.
| [
{
"content": "import pkg_resources\n\n# Always import and patch import hooks before loading anything else\nfrom .internal.import_hooks import patch as patch_import_hooks\n\npatch_import_hooks() # noqa: E402\n\nfrom .monkey import patch, patch_all # noqa: E402\nfrom .pin import Pin # noqa: E402\nfrom .span import Span # noqa: E402\nfrom .tracer import Tracer # noqa: E402\nfrom .settings import config # noqa: E402\nfrom .utils.deprecation import deprecated # noqa: E402\n\ntry:\n __version__ = pkg_resources.get_distribution(__name__).version\nexcept pkg_resources.DistributionNotFound:\n # package is not installed\n __version__ = \"dev\"\n\n\n# a global tracer instance with integration settings\ntracer = Tracer()\n\n__all__ = [\n \"patch\",\n \"patch_all\",\n \"Pin\",\n \"Span\",\n \"tracer\",\n \"Tracer\",\n \"config\",\n]\n\n\n@deprecated(\"This method will be removed altogether\", \"1.0.0\")\ndef install_excepthook():\n \"\"\"Install a hook that intercepts unhandled exception and send metrics about them.\"\"\"\n\n\n@deprecated(\"This method will be removed altogether\", \"1.0.0\")\ndef uninstall_excepthook():\n \"\"\"Uninstall the global tracer except hook.\"\"\"\n",
"path": "ddtrace/__init__.py"
}
] | [
{
"content": "import pkg_resources\n\nfrom .monkey import patch, patch_all # noqa: E402\nfrom .pin import Pin # noqa: E402\nfrom .span import Span # noqa: E402\nfrom .tracer import Tracer # noqa: E402\nfrom .settings import config # noqa: E402\nfrom .utils.deprecation import deprecated # noqa: E402\n\ntry:\n __version__ = pkg_resources.get_distribution(__name__).version\nexcept pkg_resources.DistributionNotFound:\n # package is not installed\n __version__ = \"dev\"\n\n\n# a global tracer instance with integration settings\ntracer = Tracer()\n\n__all__ = [\n \"patch\",\n \"patch_all\",\n \"Pin\",\n \"Span\",\n \"tracer\",\n \"Tracer\",\n \"config\",\n]\n\n\n@deprecated(\"This method will be removed altogether\", \"1.0.0\")\ndef install_excepthook():\n \"\"\"Install a hook that intercepts unhandled exception and send metrics about them.\"\"\"\n\n\n@deprecated(\"This method will be removed altogether\", \"1.0.0\")\ndef uninstall_excepthook():\n \"\"\"Uninstall the global tracer except hook.\"\"\"\n",
"path": "ddtrace/__init__.py"
}
] | diff --git a/ddtrace/__init__.py b/ddtrace/__init__.py
index 4ac6a3c3758..d9dd3c94d2a 100644
--- a/ddtrace/__init__.py
+++ b/ddtrace/__init__.py
@@ -1,10 +1,5 @@
import pkg_resources
-# Always import and patch import hooks before loading anything else
-from .internal.import_hooks import patch as patch_import_hooks
-
-patch_import_hooks() # noqa: E402
-
from .monkey import patch, patch_all # noqa: E402
from .pin import Pin # noqa: E402
from .span import Span # noqa: E402
diff --git a/tests/internal/import_hooks/test_integration.py b/tests/internal/import_hooks/test_integration.py
index cb412a555dc..38abf5f80e3 100644
--- a/tests/internal/import_hooks/test_integration.py
+++ b/tests/internal/import_hooks/test_integration.py
@@ -9,6 +9,9 @@
from tests.subprocesstest import run_in_subprocess, SubprocessTestCase
+import_hooks.patch()
+
+
@pytest.fixture
def hooks():
import_hooks.hooks.reset()
|
docker__docker-py-1671 | Issue with port option in 2.4.0 version
Hi,
I update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) :
`ports:
- "127.0.0.1:9292:9090"`
I got the following error:
`
ERROR: for ContainerName expected string or buffer
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 68, in main
command()
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 118, in perform_command
handler(command, command_options)
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 926, in up
scale_override=parse_scale_args(options['--scale']),
File "/usr/local/lib/python2.7/dist-packages/compose/project.py", line 424, in up
get_deps
File "/usr/local/lib/python2.7/dist-packages/compose/parallel.py", line 69, in parallel_execute
raise error_to_reraise
TypeError: expected string or buffer
`
I have no issue when i downgrade again to the 2.3 version of the package
To reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):
```
version: '2'
services :
ContainerName:
image: bae2d441e03a
ports:
- "127.0.0.1:9292:9090"
```
I run on Ubuntu 14.04.5 LTS with the following package:
```
docker==2.4.0
docker-compose==1.14.0
docker-pycreds==0.2.1
dockerpty==0.4.1
Python 2.7.6
Client:
Version: 17.05.0-ce
API version: 1.29
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Server:
Version: 17.05.0-ce
API version: 1.29 (minimum version 1.12)
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Experimental: false
```
| [
{
"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n",
"path": "docker/utils/ports.py"
}
] | [
{
"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n if hasattr(port, 'legacy_repr'):\n # This is the worst hack, but it prevents a bug in Compose 1.14.0\n # https://github.com/docker/docker-py/issues/1668\n # TODO: remove once fixed in Compose stable\n port = port.legacy_repr()\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n",
"path": "docker/utils/ports.py"
}
] | diff --git a/docker/utils/ports.py b/docker/utils/ports.py
index 8f713c720..bf7d69727 100644
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -54,6 +54,11 @@ def port_range(start, end, proto, randomly_available_port=False):
def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
|
tensorflow__addons-700 | Lifted Struct Loss Error
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Google Colab (Linux)
- TensorFlow version and how it was installed (source or binary): binary 2.0
- TensorFlow-Addons version and how it was installed (source or binary): binary 0.6.0
- Python version: 3
- Is GPU used? (yes/no): Yes
**Describe the bug**
I get an assert shape error when I try to initialize the LiftedStructLoss obj
`assert lshape.shape == 1 AssertionError` is the error I get. However, it can easily be resolved by removing the assert line. I have one more question, why does it say that the vector should not be l2 normalized? while in the case of semihardtriplet loss it says that the vector should be l2 normalized
**Code to reproduce the issue**
`import tensorflow_addons as tfa`
`tfa.losses.LiftedStructLoss()`
| [
{
"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements lifted_struct_loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_addons.losses import metric_learning\n\n\[email protected]_keras_serializable(package='Addons')\[email protected]\ndef lifted_struct_loss(labels, embeddings, margin=1.0):\n \"\"\"Computes the lifted structured loss.\n\n Args:\n labels: 1-D tf.int32 `Tensor` with shape [batch_size] of\n multiclass integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should\n not be l2 normalized.\n margin: Float, margin term in the loss definition.\n\n Returns:\n lifted_loss: tf.float32 scalar.\n \"\"\"\n # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.\n lshape = tf.shape(labels)\n assert lshape.shape == 1\n labels = tf.reshape(labels, [lshape[0], 1])\n\n # Build pairwise squared distance matrix.\n pairwise_distances = metric_learning.pairwise_distance(embeddings)\n\n # Build pairwise binary adjacency matrix.\n adjacency = tf.math.equal(labels, tf.transpose(labels))\n # Invert so we can select negatives only.\n adjacency_not = tf.math.logical_not(adjacency)\n\n batch_size = tf.size(labels)\n\n diff = margin - pairwise_distances\n mask = tf.cast(adjacency_not, dtype=tf.dtypes.float32)\n # Safe maximum: Temporarily shift negative distances\n # above zero before taking max.\n # this is to take the max only among negatives.\n row_minimums = tf.math.reduce_min(diff, 1, keepdims=True)\n row_negative_maximums = tf.math.reduce_max(\n tf.math.multiply(diff - row_minimums, mask), 1,\n keepdims=True) + row_minimums\n\n # Compute the loss.\n # Keep track of matrix of maximums where M_ij = max(m_i, m_j)\n # where m_i is the max of alpha - negative D_i's.\n # This matches the Caffe loss layer implementation at:\n # https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long\n\n max_elements = tf.math.maximum(row_negative_maximums,\n tf.transpose(row_negative_maximums))\n diff_tiled = tf.tile(diff, [batch_size, 1])\n mask_tiled = tf.tile(mask, [batch_size, 1])\n max_elements_vect = tf.reshape(tf.transpose(max_elements), [-1, 1])\n\n loss_exp_left = tf.reshape(\n tf.math.reduce_sum(\n tf.math.multiply(\n tf.math.exp(diff_tiled - max_elements_vect), mask_tiled),\n 1,\n keepdims=True), [batch_size, batch_size])\n\n loss_mat = max_elements + tf.math.log(loss_exp_left +\n tf.transpose(loss_exp_left))\n # Add the positive distance.\n loss_mat += pairwise_distances\n\n mask_positives = tf.cast(\n adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(\n tf.ones([batch_size]))\n\n # *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.\n num_positives = tf.math.reduce_sum(mask_positives) / 2.0\n\n lifted_loss = tf.math.truediv(\n 0.25 * tf.math.reduce_sum(\n tf.math.square(\n tf.math.maximum(\n tf.math.multiply(loss_mat, mask_positives), 0.0))),\n num_positives)\n return lifted_loss\n\n\[email protected]_keras_serializable(package='Addons')\nclass LiftedStructLoss(tf.keras.losses.Loss):\n \"\"\"Computes the lifted structured loss.\n\n The loss encourages the positive distances (between a pair of embeddings\n with the same labels) to be smaller than any negative distances (between\n a pair of embeddings with different labels) in the mini-batch in a way\n that is differentiable with respect to the embedding vectors.\n See: https://arxiv.org/abs/1511.06452.\n\n Args:\n margin: Float, margin term in the loss definition.\n name: Optional name for the op.\n \"\"\"\n\n def __init__(self, margin=1.0, name=None):\n super(LiftedStructLoss, self).__init__(\n name=name, reduction=tf.keras.losses.Reduction.NONE)\n self.margin = margin\n\n def call(self, y_true, y_pred):\n return lifted_struct_loss(y_true, y_pred, self.margin)\n\n def get_config(self):\n config = {\n \"margin\": self.margin,\n }\n base_config = super(LiftedStructLoss, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"path": "tensorflow_addons/losses/lifted.py"
}
] | [
{
"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements lifted_struct_loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_addons.losses import metric_learning\n\n\[email protected]_keras_serializable(package='Addons')\[email protected]\ndef lifted_struct_loss(labels, embeddings, margin=1.0):\n \"\"\"Computes the lifted structured loss.\n\n Args:\n labels: 1-D tf.int32 `Tensor` with shape [batch_size] of\n multiclass integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should\n not be l2 normalized.\n margin: Float, margin term in the loss definition.\n\n Returns:\n lifted_loss: tf.float32 scalar.\n \"\"\"\n # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.\n lshape = tf.shape(labels)\n labels = tf.reshape(labels, [lshape[0], 1])\n\n # Build pairwise squared distance matrix.\n pairwise_distances = metric_learning.pairwise_distance(embeddings)\n\n # Build pairwise binary adjacency matrix.\n adjacency = tf.math.equal(labels, tf.transpose(labels))\n # Invert so we can select negatives only.\n adjacency_not = tf.math.logical_not(adjacency)\n\n batch_size = tf.size(labels)\n\n diff = margin - pairwise_distances\n mask = tf.cast(adjacency_not, dtype=tf.dtypes.float32)\n # Safe maximum: Temporarily shift negative distances\n # above zero before taking max.\n # this is to take the max only among negatives.\n row_minimums = tf.math.reduce_min(diff, 1, keepdims=True)\n row_negative_maximums = tf.math.reduce_max(\n tf.math.multiply(diff - row_minimums, mask), 1,\n keepdims=True) + row_minimums\n\n # Compute the loss.\n # Keep track of matrix of maximums where M_ij = max(m_i, m_j)\n # where m_i is the max of alpha - negative D_i's.\n # This matches the Caffe loss layer implementation at:\n # https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long\n\n max_elements = tf.math.maximum(row_negative_maximums,\n tf.transpose(row_negative_maximums))\n diff_tiled = tf.tile(diff, [batch_size, 1])\n mask_tiled = tf.tile(mask, [batch_size, 1])\n max_elements_vect = tf.reshape(tf.transpose(max_elements), [-1, 1])\n\n loss_exp_left = tf.reshape(\n tf.math.reduce_sum(\n tf.math.multiply(\n tf.math.exp(diff_tiled - max_elements_vect), mask_tiled),\n 1,\n keepdims=True), [batch_size, batch_size])\n\n loss_mat = max_elements + tf.math.log(loss_exp_left +\n tf.transpose(loss_exp_left))\n # Add the positive distance.\n loss_mat += pairwise_distances\n\n mask_positives = tf.cast(\n adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(\n tf.ones([batch_size]))\n\n # *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.\n num_positives = tf.math.reduce_sum(mask_positives) / 2.0\n\n lifted_loss = tf.math.truediv(\n 0.25 * tf.math.reduce_sum(\n tf.math.square(\n tf.math.maximum(\n tf.math.multiply(loss_mat, mask_positives), 0.0))),\n num_positives)\n return lifted_loss\n\n\[email protected]_keras_serializable(package='Addons')\nclass LiftedStructLoss(tf.keras.losses.Loss):\n \"\"\"Computes the lifted structured loss.\n\n The loss encourages the positive distances (between a pair of embeddings\n with the same labels) to be smaller than any negative distances (between\n a pair of embeddings with different labels) in the mini-batch in a way\n that is differentiable with respect to the embedding vectors.\n See: https://arxiv.org/abs/1511.06452.\n\n Args:\n margin: Float, margin term in the loss definition.\n name: Optional name for the op.\n \"\"\"\n\n def __init__(self, margin=1.0, name=None):\n super(LiftedStructLoss, self).__init__(\n name=name, reduction=tf.keras.losses.Reduction.NONE)\n self.margin = margin\n\n def call(self, y_true, y_pred):\n return lifted_struct_loss(y_true, y_pred, self.margin)\n\n def get_config(self):\n config = {\n \"margin\": self.margin,\n }\n base_config = super(LiftedStructLoss, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"path": "tensorflow_addons/losses/lifted.py"
}
] | diff --git a/tensorflow_addons/losses/lifted.py b/tensorflow_addons/losses/lifted.py
index de2fb07de3..ef69f5cab7 100644
--- a/tensorflow_addons/losses/lifted.py
+++ b/tensorflow_addons/losses/lifted.py
@@ -39,7 +39,6 @@ def lifted_struct_loss(labels, embeddings, margin=1.0):
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = tf.shape(labels)
- assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
diff --git a/tensorflow_addons/losses/lifted_test.py b/tensorflow_addons/losses/lifted_test.py
index 0f2b9fda88..2191639eea 100644
--- a/tensorflow_addons/losses/lifted_test.py
+++ b/tensorflow_addons/losses/lifted_test.py
@@ -64,10 +64,14 @@ def testLiftedStruct(self):
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
+ # pylint: disable=E1136
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
+ # pylint: enable=E1136
# Compute the loss in NP
+ # pylint: disable=E1111
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
+ # pylint: enable=E1111
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
@@ -102,6 +106,13 @@ def testLiftedStruct(self):
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), loss_np, 3)
+ def test_keras_model_compile(self):
+ model = tf.keras.models.Sequential([
+ tf.keras.layers.Input(shape=(784,)),
+ tf.keras.layers.Dense(10),
+ ])
+ model.compile(loss="Addons>lifted_struct_loss", optimizer="adam")
+
if __name__ == '__main__':
tf.test.main()
|
liqd__a4-meinberlin-1653 | add "gesamtstädtisch" as null-option to projects
| [
{
"content": "from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects import models\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = (\n '__str__', 'organisation', 'is_draft', 'is_archived', 'created'\n )\n list_filter = ('is_draft', 'is_archived', 'organisation')\n search_fields = ('name',)\n raw_id_fields = ('moderators', 'participants')\n date_hierarchy = 'created'\n\n fieldsets = (\n (None, {\n 'fields': ('name', 'organisation')\n }),\n (_('Topic and location'), {\n 'fields': ('topic', 'point', 'administrative_district'),\n }),\n (_('Information and result'), {\n 'fields': ('description', 'information', 'result'),\n }),\n (_('Settings'), {\n 'classes': ('collapse',),\n 'fields': ('is_public', 'is_draft', 'is_archived',\n 'moderators', 'participants')\n }),\n (_('Images'), {\n 'classes': ('collapse',),\n 'fields': ('image', 'image_copyright', 'tile_image',\n 'tile_image_copyright')\n }),\n (_('Contact'), {\n 'classes': ('collapse',),\n 'fields': ('contact_name', 'contact_address_text',\n 'contact_phone', 'contact_email', 'contact_url'),\n }),\n )\n\n\n# Overwrite adhocracy4.projects.admin\nadmin.site.unregister(models.Project)\nadmin.site.register(models.Project, ProjectAdmin)\n",
"path": "meinberlin/apps/projects/admin.py"
}
] | [
{
"content": "from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects import models\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = (\n '__str__', 'organisation', 'is_draft', 'is_archived', 'created'\n )\n list_filter = ('is_draft', 'is_archived', 'organisation')\n search_fields = ('name',)\n raw_id_fields = ('moderators', 'participants')\n date_hierarchy = 'created'\n\n fieldsets = (\n (None, {\n 'fields': ('name', 'organisation')\n }),\n (_('Topic and location'), {\n 'fields': ('topic', 'point', 'administrative_district'),\n }),\n (_('Information and result'), {\n 'fields': ('description', 'information', 'result'),\n }),\n (_('Settings'), {\n 'classes': ('collapse',),\n 'fields': ('is_public', 'is_draft', 'is_archived',\n 'moderators', 'participants')\n }),\n (_('Images'), {\n 'classes': ('collapse',),\n 'fields': ('image', 'image_copyright', 'tile_image',\n 'tile_image_copyright')\n }),\n (_('Contact'), {\n 'classes': ('collapse',),\n 'fields': ('contact_name', 'contact_address_text',\n 'contact_phone', 'contact_email', 'contact_url'),\n }),\n )\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'administrative_district':\n kwargs['empty_label'] = _('City wide')\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\n# Overwrite adhocracy4.projects.admin\nadmin.site.unregister(models.Project)\nadmin.site.register(models.Project, ProjectAdmin)\n",
"path": "meinberlin/apps/projects/admin.py"
}
] | diff --git a/meinberlin/apps/projects/admin.py b/meinberlin/apps/projects/admin.py
index 28797ae473..8abc1d60c0 100644
--- a/meinberlin/apps/projects/admin.py
+++ b/meinberlin/apps/projects/admin.py
@@ -40,6 +40,11 @@ class ProjectAdmin(admin.ModelAdmin):
}),
)
+ def formfield_for_foreignkey(self, db_field, request, **kwargs):
+ if db_field.name == 'administrative_district':
+ kwargs['empty_label'] = _('City wide')
+ return super().formfield_for_foreignkey(db_field, request, **kwargs)
+
# Overwrite adhocracy4.projects.admin
admin.site.unregister(models.Project)
|
googleapis__python-bigquery-135 | A new release of rsa dependency breaks Python 2.7 tests
Recent `rsa` releases are not compatible with Python 2.7 anymore, the last compatible version is 4.0. We need to bound its version in order to preserve Python 2.7 compatibility.
> Major changes in 4.1
Version 4.0 was the last version to support Python 2 and 3.4. Version 4.1 is compatible with Python 3.5+ only.
| [
{
"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.25.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83\n \"grpcio >= 1.8.2, < 2.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\n \"fastparquet\",\n \"python-snappy\",\n # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below\n # (building the wheel fails), thus needs to be restricted.\n # See: https://github.com/googleapis/python-bigquery/issues/78\n \"llvmlite <= 0.31.0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.25.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n # rsa >= 4.1 is not compatible with Python 2\n # https://github.com/sybrenstuvel/python-rsa/issues/152\n 'rsa <4.1; python_version < \"3\"',\n 'rsa >=3.1.4, <5; python_version >= \"3\"',\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83\n \"grpcio >= 1.8.2, < 2.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\n \"fastparquet\",\n \"python-snappy\",\n # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below\n # (building the wheel fails), thus needs to be restricted.\n # See: https://github.com/googleapis/python-bigquery/issues/78\n \"llvmlite <= 0.31.0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 422584d12..ad2bcf68d 100644
--- a/setup.py
+++ b/setup.py
@@ -36,6 +36,10 @@
"google-resumable-media >= 0.5.0, < 0.6dev",
"protobuf >= 3.6.0",
"six >=1.13.0,< 2.0.0dev",
+ # rsa >= 4.1 is not compatible with Python 2
+ # https://github.com/sybrenstuvel/python-rsa/issues/152
+ 'rsa <4.1; python_version < "3"',
+ 'rsa >=3.1.4, <5; python_version >= "3"',
]
extras = {
"bqstorage": [
|
streamlit__streamlit-2611 | st.expander cuts off sliders
# Summary
Expander in sidebar cuts off sliders and potentially other stuff
# Steps to reproduce
Run the following code
```
import streamlit as st
sidebar_expander = st.sidebar.beta_expander("Bad sliders underneath!")
with sidebar_expander:
st.slider("Bad layout slider 1", 0, 100, value=0)
st.slider("Bad layout slider 2", 0, 100, value=(0,100))
st.sidebar.slider("Good layout slider")
```
## Expected behavior:
Sliders appear nicely under the expander
## Actual behavior:
Sliders ends are cut off under the expander (see picture)

## Is this a regression?
No
# Debug info
- Streamlit version: 0.69.1
- Python version: 3.8.2
- Using PipEnv
- OS version: Windows 10
- Browser version: Firefox and Chrome, recent versions
# Additional information
May also happen in containers outside of sidebar? Have not checked it though..
| [
{
"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ncontainer = st.beta_container()\ncontainer.write(\"I cannot collapse\")\n\nexpander = st.beta_expander(\"Collapse me!\", expanded=True)\nexpander.write(\"I can collapse\")\n\ncollapsed = st.beta_expander(\"Expand me!\")\ncollapsed.write(\"I am already collapsed\")\n\nsidebar = st.sidebar.beta_expander(\"Expand me!\")\nsidebar.write(\"I am in the sidebar\")\n",
"path": "e2e/scripts/st_expander.py"
}
] | [
{
"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ncontainer = st.beta_container()\ncontainer.write(\"I cannot collapse\")\n\nexpander = st.beta_expander(\"Collapse me!\", expanded=True)\nexpander.write(\"I can collapse\")\nexpander.slider(\"I don't get cut off\")\nexpander.button(\"I'm also not cut off (while focused)\")\n\ncollapsed = st.beta_expander(\"Expand me!\")\ncollapsed.write(\"I am already collapsed\")\n\nsidebar = st.sidebar.beta_expander(\"Expand me!\")\nsidebar.write(\"I am in the sidebar\")\n",
"path": "e2e/scripts/st_expander.py"
}
] | diff --git a/e2e/scripts/st_expander.py b/e2e/scripts/st_expander.py
index f0752baf2bd4..b36439148699 100644
--- a/e2e/scripts/st_expander.py
+++ b/e2e/scripts/st_expander.py
@@ -19,6 +19,8 @@
expander = st.beta_expander("Collapse me!", expanded=True)
expander.write("I can collapse")
+expander.slider("I don't get cut off")
+expander.button("I'm also not cut off (while focused)")
collapsed = st.beta_expander("Expand me!")
collapsed.write("I am already collapsed")
diff --git a/e2e/specs/st_expander.spec.js b/e2e/specs/st_expander.spec.js
index 9464996662d1..b7af55f85e9d 100644
--- a/e2e/specs/st_expander.spec.js
+++ b/e2e/specs/st_expander.spec.js
@@ -47,6 +47,9 @@ describe("st.expander", () => {
});
it("displays correctly", () => {
+ // Focus the button, then ensure it's not cut off
+ // See https://github.com/streamlit/streamlit/issues/2437
+ cy.get(".stButton button").focus();
cy.get(".main").matchImageSnapshot("expanders-in-main");
cy.get("[data-testid='stSidebar']").matchImageSnapshot(
"expanders-in-sidebar"
diff --git a/frontend/cypress/snapshots/linux/2x/st_expander.spec.js/expanders-in-main.snap.png b/frontend/cypress/snapshots/linux/2x/st_expander.spec.js/expanders-in-main.snap.png
index ceb76dbe3f77..092cf5f9f6bd 100644
Binary files a/frontend/cypress/snapshots/linux/2x/st_expander.spec.js/expanders-in-main.snap.png and b/frontend/cypress/snapshots/linux/2x/st_expander.spec.js/expanders-in-main.snap.png differ
diff --git a/frontend/src/hocs/withExpandable/withExpandable.tsx b/frontend/src/hocs/withExpandable/withExpandable.tsx
index 8fa41f5918ea..34ae5e4e7ebb 100644
--- a/frontend/src/hocs/withExpandable/withExpandable.tsx
+++ b/frontend/src/hocs/withExpandable/withExpandable.tsx
@@ -71,6 +71,7 @@ function withExpandable(
marginRight: spacing.none,
marginTop: spacing.none,
marginBottom: spacing.none,
+ overflow: "visible",
paddingLeft: spacing.none,
paddingRight: spacing.none,
paddingTop: $expanded ? "1em" : 0,
|
django-extensions__django-extensions-335 | Auto populating UUID field to existing object throws exception
Reproduction Steps:
1. Create model without UUID field
2. Create instance of that model
3. Add automatic UUID field and sync DB
4. Modify existing instance
Error:
TypeError: coercing to Unicode: need string or buffer, UUID found
| [
{
"content": "\"\"\"\nDjango Extensions additional model fields\n\"\"\"\nimport re\nimport six\ntry:\n import uuid\n HAS_UUID = True\nexcept ImportError:\n HAS_UUID = False\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.template.defaultfilters import slugify\nfrom django.db.models import DateTimeField, CharField, SlugField\n\ntry:\n from django.utils.timezone import now as datetime_now\n assert datetime_now\nexcept ImportError:\n import datetime\n datetime_now = datetime.datetime.now\n\ntry:\n from django.utils.encoding import force_unicode # NOQA\nexcept ImportError:\n from django.utils.encoding import force_text as force_unicode # NOQA\n\n\nclass AutoSlugField(SlugField):\n \"\"\" AutoSlugField\n\n By default, sets editable=False, blank=True.\n\n Required arguments:\n\n populate_from\n Specifies which field or list of fields the slug is populated from.\n\n Optional arguments:\n\n separator\n Defines the used separator (default: '-')\n\n overwrite\n If set to True, overwrites the slug on every save (default: False)\n\n Inspired by SmileyChris' Unique Slugify snippet:\n http://www.djangosnippets.org/snippets/690/\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('blank', True)\n kwargs.setdefault('editable', False)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n self.separator = kwargs.pop('separator', six.u('-'))\n self.overwrite = kwargs.pop('overwrite', False)\n self.allow_duplicates = kwargs.pop('allow_duplicates', False)\n super(AutoSlugField, self).__init__(*args, **kwargs)\n\n def _slug_strip(self, value):\n \"\"\"\n Cleans up a slug by removing slug separator characters that occur at\n the beginning or end of a slug.\n\n If an alternate separator is used, it will also replace any instances\n of the default '-' separator with the new separator.\n \"\"\"\n re_sep = '(?:-|%s)' % re.escape(self.separator)\n value = re.sub('%s+' % re_sep, self.separator, value)\n return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n\n def get_queryset(self, model_cls, slug_field):\n for field, model in model_cls._meta.get_fields_with_model():\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def slugify_func(self, content):\n if content:\n return slugify(content)\n return ''\n\n def create_slug(self, model_instance, add):\n # get fields to populate from and slug field to set\n if not isinstance(self._populate_from, (list, tuple)):\n self._populate_from = (self._populate_from, )\n slug_field = model_instance._meta.get_field(self.attname)\n\n if add or self.overwrite:\n # slugify the original field content and set next step to 2\n slug_for_field = lambda field: self.slugify_func(getattr(model_instance, field))\n slug = self.separator.join(map(slug_for_field, self._populate_from))\n next = 2\n else:\n # get slug from the current model instance\n slug = getattr(model_instance, self.attname)\n # model_instance is being modified, and overwrite is False,\n # so instead of doing anything, just return the current slug\n return slug\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n slug = self._slug_strip(slug)\n original_slug = slug\n\n if self.allow_duplicates:\n return slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to impliment any unique_together contraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs):\n slug = original_slug\n end = '%s%s' % (self.separator, next)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = self._slug_strip(slug)\n slug = '%s%s' % (slug, end)\n kwargs[self.attname] = slug\n next += 1\n return slug\n\n def pre_save(self, model_instance, add):\n value = force_unicode(self.create_slug(model_instance, add))\n setattr(model_instance, self.attname, value)\n return value\n\n def get_internal_type(self):\n return \"SlugField\"\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect the _actual_ field.\n from south.modelsinspector import introspector\n field_class = '%s.AutoSlugField' % self.__module__\n args, kwargs = introspector(self)\n kwargs.update({\n 'populate_from': repr(self._populate_from),\n 'separator': repr(self.separator),\n 'overwrite': repr(self.overwrite),\n 'allow_duplicates': repr(self.allow_duplicates),\n })\n # That's our definition!\n return (field_class, args, kwargs)\n\n\nclass CreationDateTimeField(DateTimeField):\n \"\"\" CreationDateTimeField\n\n By default, sets editable=False, blank=True, default=datetime.now\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('editable', False)\n kwargs.setdefault('blank', True)\n kwargs.setdefault('default', datetime_now)\n DateTimeField.__init__(self, *args, **kwargs)\n\n def get_internal_type(self):\n return \"DateTimeField\"\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.DateTimeField\"\n args, kwargs = introspector(self)\n return (field_class, args, kwargs)\n\n\nclass ModificationDateTimeField(CreationDateTimeField):\n \"\"\" ModificationDateTimeField\n\n By default, sets editable=False, blank=True, default=datetime.now\n\n Sets value to datetime.now() on each save of the model.\n \"\"\"\n\n def pre_save(self, model, add):\n value = datetime_now()\n setattr(model, self.attname, value)\n return value\n\n def get_internal_type(self):\n return \"DateTimeField\"\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.DateTimeField\"\n args, kwargs = introspector(self)\n return (field_class, args, kwargs)\n\n\nclass UUIDVersionError(Exception):\n pass\n\n\nclass UUIDField(CharField):\n \"\"\" UUIDField\n\n By default uses UUID version 1 (generate from host ID, sequence number and current time)\n\n The field support all uuid versions which are natively supported by the uuid python module.\n For more information see: http://docs.python.org/lib/module-uuid.html\n \"\"\"\n\n def __init__(self, verbose_name=None, name=None, auto=True, version=1, node=None, clock_seq=None, namespace=None, **kwargs):\n if not HAS_UUID:\n raise ImproperlyConfigured(\"'uuid' module is required for UUIDField. (Do you have Python 2.5 or higher installed ?)\")\n kwargs.setdefault('max_length', 36)\n if auto:\n self.empty_strings_allowed = False\n kwargs['blank'] = True\n kwargs.setdefault('editable', False)\n self.auto = auto\n self.version = version\n if version == 1:\n self.node, self.clock_seq = node, clock_seq\n elif version == 3 or version == 5:\n self.namespace, self.name = namespace, name\n CharField.__init__(self, verbose_name, name, **kwargs)\n\n def get_internal_type(self):\n return CharField.__name__\n\n def create_uuid(self):\n if not self.version or self.version == 4:\n return uuid.uuid4()\n elif self.version == 1:\n return uuid.uuid1(self.node, self.clock_seq)\n elif self.version == 2:\n raise UUIDVersionError(\"UUID version 2 is not supported.\")\n elif self.version == 3:\n return uuid.uuid3(self.namespace, self.name)\n elif self.version == 5:\n return uuid.uuid5(self.namespace, self.name)\n else:\n raise UUIDVersionError(\"UUID version %s is not valid.\" % self.version)\n\n def pre_save(self, model_instance, add):\n value = super(UUIDField, self).pre_save(model_instance, add)\n if self.auto and add and value is None:\n value = force_unicode(self.create_uuid())\n setattr(model_instance, self.attname, value)\n return value\n else:\n if self.auto and not value:\n value = six.u(self.create_uuid())\n setattr(model_instance, self.attname, value)\n return value\n\n def formfield(self, **kwargs):\n if self.auto:\n return None\n return super(UUIDField, self).formfield(**kwargs)\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect the _actual_ field.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.CharField\"\n args, kwargs = introspector(self)\n # That's our definition!\n return (field_class, args, kwargs)\n",
"path": "django_extensions/db/fields/__init__.py"
}
] | [
{
"content": "\"\"\"\nDjango Extensions additional model fields\n\"\"\"\nimport re\nimport six\ntry:\n import uuid\n HAS_UUID = True\nexcept ImportError:\n HAS_UUID = False\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.template.defaultfilters import slugify\nfrom django.db.models import DateTimeField, CharField, SlugField\n\ntry:\n from django.utils.timezone import now as datetime_now\n assert datetime_now\nexcept ImportError:\n import datetime\n datetime_now = datetime.datetime.now\n\ntry:\n from django.utils.encoding import force_unicode # NOQA\nexcept ImportError:\n from django.utils.encoding import force_text as force_unicode # NOQA\n\n\nclass AutoSlugField(SlugField):\n \"\"\" AutoSlugField\n\n By default, sets editable=False, blank=True.\n\n Required arguments:\n\n populate_from\n Specifies which field or list of fields the slug is populated from.\n\n Optional arguments:\n\n separator\n Defines the used separator (default: '-')\n\n overwrite\n If set to True, overwrites the slug on every save (default: False)\n\n Inspired by SmileyChris' Unique Slugify snippet:\n http://www.djangosnippets.org/snippets/690/\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('blank', True)\n kwargs.setdefault('editable', False)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n self.separator = kwargs.pop('separator', six.u('-'))\n self.overwrite = kwargs.pop('overwrite', False)\n self.allow_duplicates = kwargs.pop('allow_duplicates', False)\n super(AutoSlugField, self).__init__(*args, **kwargs)\n\n def _slug_strip(self, value):\n \"\"\"\n Cleans up a slug by removing slug separator characters that occur at\n the beginning or end of a slug.\n\n If an alternate separator is used, it will also replace any instances\n of the default '-' separator with the new separator.\n \"\"\"\n re_sep = '(?:-|%s)' % re.escape(self.separator)\n value = re.sub('%s+' % re_sep, self.separator, value)\n return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n\n def get_queryset(self, model_cls, slug_field):\n for field, model in model_cls._meta.get_fields_with_model():\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def slugify_func(self, content):\n if content:\n return slugify(content)\n return ''\n\n def create_slug(self, model_instance, add):\n # get fields to populate from and slug field to set\n if not isinstance(self._populate_from, (list, tuple)):\n self._populate_from = (self._populate_from, )\n slug_field = model_instance._meta.get_field(self.attname)\n\n if add or self.overwrite:\n # slugify the original field content and set next step to 2\n slug_for_field = lambda field: self.slugify_func(getattr(model_instance, field))\n slug = self.separator.join(map(slug_for_field, self._populate_from))\n next = 2\n else:\n # get slug from the current model instance\n slug = getattr(model_instance, self.attname)\n # model_instance is being modified, and overwrite is False,\n # so instead of doing anything, just return the current slug\n return slug\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n slug = self._slug_strip(slug)\n original_slug = slug\n\n if self.allow_duplicates:\n return slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to impliment any unique_together contraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs):\n slug = original_slug\n end = '%s%s' % (self.separator, next)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = self._slug_strip(slug)\n slug = '%s%s' % (slug, end)\n kwargs[self.attname] = slug\n next += 1\n return slug\n\n def pre_save(self, model_instance, add):\n value = force_unicode(self.create_slug(model_instance, add))\n setattr(model_instance, self.attname, value)\n return value\n\n def get_internal_type(self):\n return \"SlugField\"\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect the _actual_ field.\n from south.modelsinspector import introspector\n field_class = '%s.AutoSlugField' % self.__module__\n args, kwargs = introspector(self)\n kwargs.update({\n 'populate_from': repr(self._populate_from),\n 'separator': repr(self.separator),\n 'overwrite': repr(self.overwrite),\n 'allow_duplicates': repr(self.allow_duplicates),\n })\n # That's our definition!\n return (field_class, args, kwargs)\n\n\nclass CreationDateTimeField(DateTimeField):\n \"\"\" CreationDateTimeField\n\n By default, sets editable=False, blank=True, default=datetime.now\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('editable', False)\n kwargs.setdefault('blank', True)\n kwargs.setdefault('default', datetime_now)\n DateTimeField.__init__(self, *args, **kwargs)\n\n def get_internal_type(self):\n return \"DateTimeField\"\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.DateTimeField\"\n args, kwargs = introspector(self)\n return (field_class, args, kwargs)\n\n\nclass ModificationDateTimeField(CreationDateTimeField):\n \"\"\" ModificationDateTimeField\n\n By default, sets editable=False, blank=True, default=datetime.now\n\n Sets value to datetime.now() on each save of the model.\n \"\"\"\n\n def pre_save(self, model, add):\n value = datetime_now()\n setattr(model, self.attname, value)\n return value\n\n def get_internal_type(self):\n return \"DateTimeField\"\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect ourselves, since we inherit.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.DateTimeField\"\n args, kwargs = introspector(self)\n return (field_class, args, kwargs)\n\n\nclass UUIDVersionError(Exception):\n pass\n\n\nclass UUIDField(CharField):\n \"\"\" UUIDField\n\n By default uses UUID version 1 (generate from host ID, sequence number and current time)\n\n The field support all uuid versions which are natively supported by the uuid python module.\n For more information see: http://docs.python.org/lib/module-uuid.html\n \"\"\"\n\n def __init__(self, verbose_name=None, name=None, auto=True, version=1, node=None, clock_seq=None, namespace=None, **kwargs):\n if not HAS_UUID:\n raise ImproperlyConfigured(\"'uuid' module is required for UUIDField. (Do you have Python 2.5 or higher installed ?)\")\n kwargs.setdefault('max_length', 36)\n if auto:\n self.empty_strings_allowed = False\n kwargs['blank'] = True\n kwargs.setdefault('editable', False)\n self.auto = auto\n self.version = version\n if version == 1:\n self.node, self.clock_seq = node, clock_seq\n elif version == 3 or version == 5:\n self.namespace, self.name = namespace, name\n CharField.__init__(self, verbose_name, name, **kwargs)\n\n def get_internal_type(self):\n return CharField.__name__\n\n def create_uuid(self):\n if not self.version or self.version == 4:\n return uuid.uuid4()\n elif self.version == 1:\n return uuid.uuid1(self.node, self.clock_seq)\n elif self.version == 2:\n raise UUIDVersionError(\"UUID version 2 is not supported.\")\n elif self.version == 3:\n return uuid.uuid3(self.namespace, self.name)\n elif self.version == 5:\n return uuid.uuid5(self.namespace, self.name)\n else:\n raise UUIDVersionError(\"UUID version %s is not valid.\" % self.version)\n\n def pre_save(self, model_instance, add):\n value = super(UUIDField, self).pre_save(model_instance, add)\n if self.auto and add and value is None:\n value = force_unicode(self.create_uuid())\n setattr(model_instance, self.attname, value)\n return value\n else:\n if self.auto and not value:\n value = force_unicode(self.create_uuid())\n setattr(model_instance, self.attname, value)\n return value\n\n def formfield(self, **kwargs):\n if self.auto:\n return None\n return super(UUIDField, self).formfield(**kwargs)\n\n def south_field_triple(self):\n \"Returns a suitable description of this field for South.\"\n # We'll just introspect the _actual_ field.\n from south.modelsinspector import introspector\n field_class = \"django.db.models.fields.CharField\"\n args, kwargs = introspector(self)\n # That's our definition!\n return (field_class, args, kwargs)\n",
"path": "django_extensions/db/fields/__init__.py"
}
] | diff --git a/django_extensions/db/fields/__init__.py b/django_extensions/db/fields/__init__.py
index 9cde3d0fd..2553819d5 100644
--- a/django_extensions/db/fields/__init__.py
+++ b/django_extensions/db/fields/__init__.py
@@ -268,7 +268,7 @@ def pre_save(self, model_instance, add):
return value
else:
if self.auto and not value:
- value = six.u(self.create_uuid())
+ value = force_unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
|
buildbot__buildbot-3859 | Authentication problem Buildbot 0.9.14
I created the class below
```
class MyAuth(CustomAuth):
def check_credentials(user, password):
if user == 'snow' and password == 'white':
return True
else:
return False
```
and set it as my auth class.
```
c['www']['auth']=MyAuth()
```
But it throws following exception.
```
web.Server Traceback (most recent call last):
exceptions.AttributeError: 'str' object has no attribute 'providedBy'
/home/buildbot/virtualenv.buildbot/local/lib/python2.7/site-packages/twisted/web/server.py:195 in process
194 self._encoder = encoder
195 self.render(resrc)
196 except:
/home/buildbot/virtualenv.buildbot/local/lib/python2.7/site-packages/twisted/web/server.py:255 in render
254 try:
255 body = resrc.render(self)
256 except UnsupportedMethod as e:
/home/buildbot/virtualenv.buildbot/local/lib/python2.7/site-packages/twisted/web/_auth/wrapper.py:138 in render
137 """
138 return self._authorizedResource(request).render(request)
139
/home/buildbot/virtualenv.buildbot/local/lib/python2.7/site-packages/twisted/web/_auth/wrapper.py:116 in _authorizedResource
115 if not authheader:
116 return util.DeferredResource(self._login(Anonymous()))
117
/home/buildbot/virtualenv.buildbot/local/lib/python2.7/site-packages/twisted/web/_auth/wrapper.py:162 in _login
161 """
162 d = self._portal.login(credentials, None, IResource)
163 d.addCallbacks(self._loginSucceeded, self._loginFailed)
/home/buildbot/virtualenv.buildbot/local/lib/python2.7/site-packages/twisted/cred/portal.py:118 in login
117 for i in self.checkers:
118 if i.providedBy(credentials):
119 return maybeDeferred(self.checkers[i].requestAvatarId, credentials
exceptions.AttributeError: 'str' object has no attribute 'providedBy'
```
| [
{
"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport re\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nfrom twisted.cred.checkers import FilePasswordDB\nfrom twisted.cred.checkers import ICredentialsChecker\nfrom twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse\nfrom twisted.cred.credentials import IUsernamePassword\nfrom twisted.cred.error import UnauthorizedLogin\nfrom twisted.cred.portal import IRealm\nfrom twisted.cred.portal import Portal\nfrom twisted.internet import defer\nfrom twisted.web.error import Error\nfrom twisted.web.guard import BasicCredentialFactory\nfrom twisted.web.guard import DigestCredentialFactory\nfrom twisted.web.guard import HTTPAuthSessionWrapper\nfrom twisted.web.resource import IResource\nfrom zope.interface import implementer\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import config\nfrom buildbot.util import unicode2bytes\nfrom buildbot.www import resource\n\n\nclass AuthRootResource(resource.Resource):\n\n def getChild(self, path, request):\n # return dynamically generated resources\n if path == b'login':\n return self.master.www.auth.getLoginResource()\n elif path == b'logout':\n return self.master.www.auth.getLogoutResource()\n return resource.Resource.getChild(self, path, request)\n\n\nclass AuthBase(config.ConfiguredMixin):\n\n def __init__(self, userInfoProvider=None):\n self.userInfoProvider = userInfoProvider\n\n def reconfigAuth(self, master, new_config):\n self.master = master\n\n def maybeAutoLogin(self, request):\n return defer.succeed(None)\n\n def getLoginResource(self):\n raise Error(501, b\"not implemented\")\n\n def getLogoutResource(self):\n return LogoutResource(self.master)\n\n @defer.inlineCallbacks\n def updateUserInfo(self, request):\n session = request.getSession()\n if self.userInfoProvider is not None:\n infos = yield self.userInfoProvider.getUserInfo(session.user_info['username'])\n session.user_info.update(infos)\n session.updateSession(request)\n\n def getConfigDict(self):\n return {'name': type(self).__name__}\n\n\nclass UserInfoProviderBase(config.ConfiguredMixin):\n name = \"noinfo\"\n\n def getUserInfo(self, username):\n return defer.succeed({'email': username})\n\n\nclass LoginResource(resource.Resource):\n\n def render_GET(self, request):\n return self.asyncRenderHelper(request, self.renderLogin)\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n raise NotImplementedError\n\n\nclass NoAuth(AuthBase):\n pass\n\n\nclass RemoteUserAuth(AuthBase):\n header = b\"REMOTE_USER\"\n headerRegex = re.compile(br\"(?P<username>[^ @]+)@(?P<realm>[^ @]+)\")\n\n def __init__(self, header=None, headerRegex=None, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if self.userInfoProvider is None:\n self.userInfoProvider = UserInfoProviderBase()\n if header is not None:\n self.header = header\n if headerRegex is not None:\n self.headerRegex = re.compile(headerRegex)\n\n @defer.inlineCallbacks\n def maybeAutoLogin(self, request):\n header = request.getHeader(self.header)\n if header is None:\n raise Error(403, b\"missing http header \" + self.header + b\". Check your reverse proxy config!\")\n res = self.headerRegex.match(header)\n if res is None:\n raise Error(\n 403, b'http header does not match regex! \"' + header + b'\" not matching ' + self.headerRegex.pattern)\n session = request.getSession()\n if session.user_info != dict(res.groupdict()):\n session.user_info = dict(res.groupdict())\n yield self.updateUserInfo(request)\n\n\n@implementer(IRealm)\nclass AuthRealm(object):\n\n def __init__(self, master, auth):\n self.auth = auth\n self.master = master\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n if IResource in interfaces:\n return (IResource,\n PreAuthenticatedLoginResource(self.master, avatarId),\n lambda: None)\n raise NotImplementedError()\n\n\nclass TwistedICredAuthBase(AuthBase):\n\n def __init__(self, credentialFactories, checkers, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if self.userInfoProvider is None:\n self.userInfoProvider = UserInfoProviderBase()\n self.credentialFactories = credentialFactories\n self.checkers = checkers\n\n def getLoginResource(self):\n return HTTPAuthSessionWrapper(\n Portal(AuthRealm(self.master, self), self.checkers),\n self.credentialFactories)\n\n\nclass HTPasswdAuth(TwistedICredAuthBase):\n\n def __init__(self, passwdFile, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [FilePasswordDB(passwdFile)],\n **kwargs)\n\n\nclass UserPasswordAuth(TwistedICredAuthBase):\n\n def __init__(self, users, **kwargs):\n if isinstance(users, dict):\n users = {user: unicode2bytes(pw) for user, pw in users.items()}\n elif isinstance(users, list):\n users = [(user, unicode2bytes(pw)) for user, pw in users]\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [InMemoryUsernamePasswordDatabaseDontUse(**dict(users))],\n **kwargs)\n\n\n@implementer(ICredentialsChecker)\nclass CustomAuth(TwistedICredAuthBase):\n __metaclass__ = ABCMeta\n credentialInterfaces = IUsernamePassword\n\n def __init__(self, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [BasicCredentialFactory(b\"buildbot\")],\n [self],\n **kwargs)\n\n def requestAvatarId(self, cred):\n if self.check_credentials(cred.username, cred.password):\n return defer.succeed(cred.username)\n return defer.fail(UnauthorizedLogin())\n\n @abstractmethod\n def check_credentials(username, password):\n return False\n\n\ndef _redirect(master, request):\n url = request.args.get(b\"redirect\", [b\"/\"])[0]\n url = bytes2unicode(url)\n return resource.Redirect(master.config.buildbotURL + \"#\" + url)\n\n\nclass PreAuthenticatedLoginResource(LoginResource):\n # a LoginResource which is already authenticated via a\n # HTTPAuthSessionWrapper\n\n def __init__(self, master, username):\n LoginResource.__init__(self, master)\n self.username = username\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n session = request.getSession()\n session.user_info = dict(username=bytes2unicode(self.username))\n yield self.master.www.auth.updateUserInfo(request)\n raise _redirect(self.master, request)\n\n\nclass LogoutResource(resource.Resource):\n\n def render_GET(self, request):\n session = request.getSession()\n session.expire()\n session.updateSession(request)\n request.redirect(_redirect(self.master, request).url)\n return b''\n",
"path": "master/buildbot/www/auth.py"
}
] | [
{
"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport re\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nfrom twisted.cred.checkers import FilePasswordDB\nfrom twisted.cred.checkers import ICredentialsChecker\nfrom twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse\nfrom twisted.cred.credentials import IUsernamePassword\nfrom twisted.cred.error import UnauthorizedLogin\nfrom twisted.cred.portal import IRealm\nfrom twisted.cred.portal import Portal\nfrom twisted.internet import defer\nfrom twisted.web.error import Error\nfrom twisted.web.guard import BasicCredentialFactory\nfrom twisted.web.guard import DigestCredentialFactory\nfrom twisted.web.guard import HTTPAuthSessionWrapper\nfrom twisted.web.resource import IResource\nfrom zope.interface import implementer\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import config\nfrom buildbot.util import unicode2bytes\nfrom buildbot.www import resource\n\n\nclass AuthRootResource(resource.Resource):\n\n def getChild(self, path, request):\n # return dynamically generated resources\n if path == b'login':\n return self.master.www.auth.getLoginResource()\n elif path == b'logout':\n return self.master.www.auth.getLogoutResource()\n return resource.Resource.getChild(self, path, request)\n\n\nclass AuthBase(config.ConfiguredMixin):\n\n def __init__(self, userInfoProvider=None):\n self.userInfoProvider = userInfoProvider\n\n def reconfigAuth(self, master, new_config):\n self.master = master\n\n def maybeAutoLogin(self, request):\n return defer.succeed(None)\n\n def getLoginResource(self):\n raise Error(501, b\"not implemented\")\n\n def getLogoutResource(self):\n return LogoutResource(self.master)\n\n @defer.inlineCallbacks\n def updateUserInfo(self, request):\n session = request.getSession()\n if self.userInfoProvider is not None:\n infos = yield self.userInfoProvider.getUserInfo(session.user_info['username'])\n session.user_info.update(infos)\n session.updateSession(request)\n\n def getConfigDict(self):\n return {'name': type(self).__name__}\n\n\nclass UserInfoProviderBase(config.ConfiguredMixin):\n name = \"noinfo\"\n\n def getUserInfo(self, username):\n return defer.succeed({'email': username})\n\n\nclass LoginResource(resource.Resource):\n\n def render_GET(self, request):\n return self.asyncRenderHelper(request, self.renderLogin)\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n raise NotImplementedError\n\n\nclass NoAuth(AuthBase):\n pass\n\n\nclass RemoteUserAuth(AuthBase):\n header = b\"REMOTE_USER\"\n headerRegex = re.compile(br\"(?P<username>[^ @]+)@(?P<realm>[^ @]+)\")\n\n def __init__(self, header=None, headerRegex=None, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if self.userInfoProvider is None:\n self.userInfoProvider = UserInfoProviderBase()\n if header is not None:\n self.header = header\n if headerRegex is not None:\n self.headerRegex = re.compile(headerRegex)\n\n @defer.inlineCallbacks\n def maybeAutoLogin(self, request):\n header = request.getHeader(self.header)\n if header is None:\n raise Error(403, b\"missing http header \" + self.header + b\". Check your reverse proxy config!\")\n res = self.headerRegex.match(header)\n if res is None:\n raise Error(\n 403, b'http header does not match regex! \"' + header + b'\" not matching ' + self.headerRegex.pattern)\n session = request.getSession()\n if session.user_info != dict(res.groupdict()):\n session.user_info = dict(res.groupdict())\n yield self.updateUserInfo(request)\n\n\n@implementer(IRealm)\nclass AuthRealm(object):\n\n def __init__(self, master, auth):\n self.auth = auth\n self.master = master\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n if IResource in interfaces:\n return (IResource,\n PreAuthenticatedLoginResource(self.master, avatarId),\n lambda: None)\n raise NotImplementedError()\n\n\nclass TwistedICredAuthBase(AuthBase):\n\n def __init__(self, credentialFactories, checkers, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if self.userInfoProvider is None:\n self.userInfoProvider = UserInfoProviderBase()\n self.credentialFactories = credentialFactories\n self.checkers = checkers\n\n def getLoginResource(self):\n return HTTPAuthSessionWrapper(\n Portal(AuthRealm(self.master, self), self.checkers),\n self.credentialFactories)\n\n\nclass HTPasswdAuth(TwistedICredAuthBase):\n\n def __init__(self, passwdFile, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [FilePasswordDB(passwdFile)],\n **kwargs)\n\n\nclass UserPasswordAuth(TwistedICredAuthBase):\n\n def __init__(self, users, **kwargs):\n if isinstance(users, dict):\n users = {user: unicode2bytes(pw) for user, pw in users.items()}\n elif isinstance(users, list):\n users = [(user, unicode2bytes(pw)) for user, pw in users]\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [InMemoryUsernamePasswordDatabaseDontUse(**dict(users))],\n **kwargs)\n\n\n@implementer(ICredentialsChecker)\nclass CustomAuth(TwistedICredAuthBase):\n __metaclass__ = ABCMeta\n credentialInterfaces = [IUsernamePassword]\n\n def __init__(self, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [BasicCredentialFactory(b\"buildbot\")],\n [self],\n **kwargs)\n\n def requestAvatarId(self, cred):\n if self.check_credentials(cred.username, cred.password):\n return defer.succeed(cred.username)\n return defer.fail(UnauthorizedLogin())\n\n @abstractmethod\n def check_credentials(username, password):\n return False\n\n\ndef _redirect(master, request):\n url = request.args.get(b\"redirect\", [b\"/\"])[0]\n url = bytes2unicode(url)\n return resource.Redirect(master.config.buildbotURL + \"#\" + url)\n\n\nclass PreAuthenticatedLoginResource(LoginResource):\n # a LoginResource which is already authenticated via a\n # HTTPAuthSessionWrapper\n\n def __init__(self, master, username):\n LoginResource.__init__(self, master)\n self.username = username\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n session = request.getSession()\n session.user_info = dict(username=bytes2unicode(self.username))\n yield self.master.www.auth.updateUserInfo(request)\n raise _redirect(self.master, request)\n\n\nclass LogoutResource(resource.Resource):\n\n def render_GET(self, request):\n session = request.getSession()\n session.expire()\n session.updateSession(request)\n request.redirect(_redirect(self.master, request).url)\n return b''\n",
"path": "master/buildbot/www/auth.py"
}
] | diff --git a/master/buildbot/www/auth.py b/master/buildbot/www/auth.py
index 3df7c32ad228..061e09850bf6 100644
--- a/master/buildbot/www/auth.py
+++ b/master/buildbot/www/auth.py
@@ -189,7 +189,7 @@ def __init__(self, users, **kwargs):
@implementer(ICredentialsChecker)
class CustomAuth(TwistedICredAuthBase):
__metaclass__ = ABCMeta
- credentialInterfaces = IUsernamePassword
+ credentialInterfaces = [IUsernamePassword]
def __init__(self, **kwargs):
TwistedICredAuthBase.__init__(
diff --git a/master/docs/manual/cfg-www.rst b/master/docs/manual/cfg-www.rst
index 31cbfe743774..615706641a88 100644
--- a/master/docs/manual/cfg-www.rst
+++ b/master/docs/manual/cfg-www.rst
@@ -331,7 +331,7 @@ The available classes are described here:
from buildbot.plugins import util
class MyAuth(util.CustomAuth):
- def check_credentials(user, password):
+ def check_credentials(self, user, password):
if user == 'snow' and password == 'white':
return True
else:
|
wemake-services__wemake-python-styleguide-834 | Bump mypy from 0.720 to 0.730
Bumps [mypy](https://github.com/python/mypy) from 0.720 to 0.730.
<details>
<summary>Commits</summary>
- [`7ad7f8b`](https://github.com/python/mypy/commit/7ad7f8bbe61e5e67aa7fd6f2efe280931dd2f620) Bump version to 0.730
- [`90776b8`](https://github.com/python/mypy/commit/90776b8b49dee8b5c84a7e90c1b563f2fd88f4f1) Document error codes ([#7451](https://github-redirect.dependabot.com/python/mypy/issues/7451))
- [`99475b2`](https://github.com/python/mypy/commit/99475b21705816a48a5f2cc0380907d21a93442f) Don't serialize redefined symbol nodes ([#7499](https://github-redirect.dependabot.com/python/mypy/issues/7499))
- [`8c17dd8`](https://github.com/python/mypy/commit/8c17dd863121138b20d92184786ed3777d4c574c) Don't compile mypyc/lib-rt/setup.py ([#7497](https://github-redirect.dependabot.com/python/mypy/issues/7497))
- [`41db9a0`](https://github.com/python/mypy/commit/41db9a0c570a3e190f3749cf0b681a31823dc0f7) Pass is_classmethod to bind_self() also for superype ([#7491](https://github-redirect.dependabot.com/python/mypy/issues/7491))
- [`2bdbacf`](https://github.com/python/mypy/commit/2bdbacf32a2b5201200dc2ed8ef5c7175b8de739) Attempt to fix travis on Python 3.8 beta ([#7492](https://github-redirect.dependabot.com/python/mypy/issues/7492))
- [`09c243d`](https://github.com/python/mypy/commit/09c243dcc12935b989367f31d1d25d7fd0ec634c) Point error to incompatible argument instead of call expression ([#7470](https://github-redirect.dependabot.com/python/mypy/issues/7470))
- [`88e2b67`](https://github.com/python/mypy/commit/88e2b67c4c2e8590dbee4aec272b3727b9566f0b) Support pickling of extension classes ([#7481](https://github-redirect.dependabot.com/python/mypy/issues/7481))
- [`9f1b8e9`](https://github.com/python/mypy/commit/9f1b8e930b812385fc866b3145785f7bb59361ef) Fix missing quotes in sample python snippet ([#7487](https://github-redirect.dependabot.com/python/mypy/issues/7487))
- [`37e5be1`](https://github.com/python/mypy/commit/37e5be10c845be3c036721c9462ef9cd90469236) Add http:// in front of the docs url for strict-optional ([#7485](https://github-redirect.dependabot.com/python/mypy/issues/7485))
- Additional commits viewable in [compare view](https://github.com/python/mypy/compare/v0.720...v0.730)
</details>
<br />
[](https://dependabot.com/compatibility-score.html?dependency-name=mypy&package-manager=pip&previous-version=0.720&new-version=0.730)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language
- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language
- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language
- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language
- `@dependabot badge me` will comment on this PR with code to add a "Dependabot enabled" badge to your readme
Additionally, you can set the following in your Dependabot [dashboard](https://app.dependabot.com):
- Update frequency (including time of day and day of week)
- Pull request limits (per update run and/or open at any time)
- Automerge options (never/patch/minor, and dev/runtime dependencies)
- Out-of-range updates (receive only lockfile updates, if desired)
- Security updates (receive only security updates, if desired)
Finally, you can contact us by mentioning @dependabot.
</details>
| [
{
"content": "# -*- coding: utf-8 -*-\n\nimport ast\n\ntry: # pragma: no cover\n from ast import Constant as Constant # type: ignore # noqa: WPS433, WPS113\nexcept ImportError: # pragma: no cover\n class Constant(ast.AST): # type: ignore # noqa: WPS440\n \"\"\"\n Fallback for pythons that do not have ``ast.Constant``.\n\n In this case ``Constant`` is replaced with:\n\n - ``ast.Num``\n - ``ast.Str`` and ``ast.Bytes``\n - ``ast.NameConstant``\n\n Only ``python3.8+`` has this node.\n \"\"\"\n",
"path": "wemake_python_styleguide/compat/nodes.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nimport ast\n\ntry: # pragma: no cover\n from ast import Constant as Constant # noqa: WPS433, WPS113\nexcept ImportError: # pragma: no cover\n class Constant(ast.AST): # type: ignore # noqa: WPS440\n \"\"\"\n Fallback for pythons that do not have ``ast.Constant``.\n\n In this case ``Constant`` is replaced with:\n\n - ``ast.Num``\n - ``ast.Str`` and ``ast.Bytes``\n - ``ast.NameConstant``\n\n Only ``python3.8+`` has this node.\n \"\"\"\n",
"path": "wemake_python_styleguide/compat/nodes.py"
}
] | diff --git a/poetry.lock b/poetry.lock
index ccd30200d..8c4e98bfc 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -104,7 +104,7 @@ description = "Python package for providing Mozilla's CA Bundle."
name = "certifi"
optional = false
python-versions = "*"
-version = "2019.6.16"
+version = "2019.9.11"
[[package]]
category = "dev"
@@ -499,7 +499,7 @@ description = "Git Object Database"
name = "gitdb2"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "2.0.5"
+version = "2.0.6"
[package.dependencies]
smmap2 = ">=2.0.0"
@@ -557,10 +557,11 @@ grimp = "1.0b12"
[[package]]
category = "dev"
description = "Read metadata from Python packages"
+marker = "python_version < \"3.8\""
name = "importlib-metadata"
optional = false
python-versions = ">=2.7,!=3.0,!=3.1,!=3.2,!=3.3"
-version = "0.19"
+version = "0.23"
[package.dependencies]
zipp = ">=0.5"
@@ -684,7 +685,7 @@ description = "A lightweight library for converting complex datatypes to and fro
name = "marshmallow"
optional = false
python-versions = ">=3.5"
-version = "3.0.1"
+version = "3.2.1"
[[package]]
category = "dev"
@@ -727,8 +728,8 @@ category = "dev"
description = "Optional static typing for Python"
name = "mypy"
optional = false
-python-versions = "*"
-version = "0.720"
+python-versions = ">=3.5"
+version = "0.730"
[package.dependencies]
mypy-extensions = ">=0.4.0,<0.5.0"
@@ -790,10 +791,9 @@ description = "Core utilities for Python packages"
name = "packaging"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "19.1"
+version = "19.2"
[package.dependencies]
-attrs = "*"
pyparsing = ">=2.0.2"
six = "*"
@@ -811,7 +811,7 @@ description = "Python Build Reasonableness"
name = "pbr"
optional = false
python-versions = "*"
-version = "5.4.2"
+version = "5.4.3"
[[package]]
category = "main"
@@ -850,10 +850,12 @@ description = "plugin and hook calling mechanisms for python"
name = "pluggy"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "0.12.0"
+version = "0.13.0"
[package.dependencies]
-importlib-metadata = ">=0.12"
+[package.dependencies.importlib-metadata]
+python = "<3.8"
+version = ">=0.12"
[[package]]
category = "dev"
@@ -979,10 +981,10 @@ description = "A Python Slugify application that handles Unicode"
name = "python-slugify"
optional = false
python-versions = "*"
-version = "3.0.3"
+version = "3.0.4"
[package.dependencies]
-text-unidecode = "1.2"
+text-unidecode = ">=1.3"
[[package]]
category = "dev"
@@ -1058,7 +1060,7 @@ marker = "platform_python_implementation == \"CPython\" and python_version < \"3
name = "ruamel.yaml.clib"
optional = false
python-versions = "*"
-version = "0.1.2"
+version = "0.2.0"
[[package]]
category = "dev"
@@ -1106,11 +1108,11 @@ termcolor = "*"
[[package]]
category = "main"
-description = "This package provides 23 stemmers for 22 languages generated from Snowball algorithms."
+description = "This package provides 26 stemmers for 25 languages generated from Snowball algorithms."
name = "snowballstemmer"
optional = false
python-versions = "*"
-version = "1.9.0"
+version = "1.9.1"
[[package]]
category = "dev"
@@ -1220,7 +1222,7 @@ description = "Manage dynamic plugins for Python applications"
name = "stevedore"
optional = false
python-versions = "*"
-version = "1.30.1"
+version = "1.31.0"
[package.dependencies]
pbr = ">=2.0.0,<2.1.0 || >2.1.0"
@@ -1248,7 +1250,7 @@ description = "The most basic Text::Unidecode port"
name = "text-unidecode"
optional = false
python-versions = "*"
-version = "1.2"
+version = "1.3"
[[package]]
category = "dev"
@@ -1312,7 +1314,7 @@ description = "HTTP library with thread-safe connection pooling, file post, and
name = "urllib3"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4"
-version = "1.25.3"
+version = "1.25.6"
[[package]]
category = "dev"
@@ -1347,6 +1349,7 @@ requests = ">=2.0,<3.0"
[[package]]
category = "dev"
description = "Backport of pathlib-compatible object wrapper for zip files"
+marker = "python_version < \"3.8\""
name = "zipp"
optional = false
python-versions = ">=2.7"
@@ -1356,7 +1359,7 @@ version = "0.6.0"
more-itertools = "*"
[metadata]
-content-hash = "40564d538e04d8995ffe31d0d2572a76117ff955e82305a5c46b662166096839"
+content-hash = "81fbdf2ed9bb8048a473c2ce4deaf6079da44a9a278e0594419a95a0abd3cf58"
python-versions = "^3.6"
[metadata.hashes]
@@ -1370,7 +1373,7 @@ autopep8 = ["4d8eec30cc81bc5617dbf1218201d770dc35629363547f17577c61683ccfb3ee"]
babel = ["af92e6106cb7c55286b25b38ad7695f8b4efb36a90ba483d7f7a6628c46158ab", "e86135ae101e31e2c8ec20a4e0c5220f4eed12487d5cf3f78be7e98d3a57fc28"]
backcall = ["38ecd85be2c1e78f77fd91700c76e14667dc21e2713b63876c0eb901196e01e4", "bbbf4b1e5cd2bdb08f915895b51081c041bac22394fdfcfdfbe9f14b77c08bf2"]
bandit = ["336620e220cf2d3115877685e264477ff9d9abaeb0afe3dc7264f55fa17a3952", "41e75315853507aa145d62a78a2a6c5e3240fe14ee7c601459d0df9418196065"]
-certifi = ["046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939", "945e3ba63a0b9f577b1395204e13c3a231f9bc0223888be653286534e5873695"]
+certifi = ["e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50", "fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef"]
chardet = ["84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", "fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"]
click = ["2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13", "5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7"]
colorama = ["463f8483208e921368c9f306094eb6f725c6ca42b0f97e313cb5d5512459feda", "48eb22f4f8461b1df5734a074b57042430fb06e1d61bd1e11b078c0fe6d7a1f1"]
@@ -1407,13 +1410,13 @@ flake8-pytest-style = ["1c2303998c509cd65c3fb047cd536787ddf953e8113bc7f086c0cd74
flake8-quotes = ["5dbaf668887873f28346fb87943d6da2e4b9f77ce9f2169cff21764a0a4934ed"]
flake8-rst-docstrings = ["a2fa35c6ef978422234afae8c345f23ff721571d43f2895e29817e94be92dd6c"]
flake8-string-format = ["68ea72a1a5b75e7018cae44d14f32473c798cf73d75cbaed86c6a9a907b770b2", "774d56103d9242ed968897455ef49b7d6de272000cfa83de5814273a868832f1"]
-gitdb2 = ["83361131a1836661a155172932a13c08bda2db3674e4caa32368aa6eb02f38c2", "e3a0141c5f2a3f635c7209d56c496ebe1ad35da82fe4d3ec4aaa36278d70648a"]
+gitdb2 = ["1b6df1433567a51a4a9c1a5a0de977aa351a405cc56d7d35f3388bad1f630350", "96bbb507d765a7f51eb802554a9cfe194a174582f772e0d89f4e87288c288b7b"]
gitpython = ["947cc75913e7b6da108458136607e2ee0e40c20be1e12d4284e7c6c12956c276", "d2f4945f8260f6981d724f5957bc076398ada55cb5d25aaee10108bcdc894100"]
grimp = ["e8a5e004362b70d37a154e9daba5b1232ed58201e04630fe5d3e8839d72acefd"]
idna = ["c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", "ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"]
imagesize = ["3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8", "f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5"]
import-linter = ["1dadd9ca39414f3e76ea0ff9cced10b15bf294451a6ab0aba02aa87ec712d5b9"]
-importlib-metadata = ["23d3d873e008a513952355379d93cbcab874c58f4f034ff657c7a87422fa64e8", "80d2de76188eabfbfcf27e6a37342c2827801e59c4cc14b0371c56fed43820e3"]
+importlib-metadata = ["aa18d7378b00b40847790e7c27e11673d7fed219354109d0e7b9e5b25dc3ad26", "d5f18a79777f3aa179c145737780282e27b508fc8fd688cb17c7a813e8bd39af"]
ipdb = ["473fdd798a099765f093231a8b1fabfa95b0b682fce12de0c74b61a4b4d8ee57"]
ipython = ["c4ab005921641e40a68e405e286e7a1fcc464497e14d81b6914b4fd95e5dee9b", "dd76831f065f17bddd7eaa5c781f5ea32de5ef217592cf019e34043b56895aa1"]
ipython-genutils = ["72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8", "eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"]
@@ -1424,23 +1427,23 @@ jmespath = ["3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6",
m2r = ["bf90bad66cda1164b17e5ba4a037806d2443f2a4d5ddc9f6a5554a0322aaed99"]
mando = ["4ce09faec7e5192ffc3c57830e26acba0fd6cd11e1ee81af0d4df0657463bd1c", "79feb19dc0f097daa64a1243db578e7674909b75f88ac2220f1c065c10a0d960"]
markupsafe = ["00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", "09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", "09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", "1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", "24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", "43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", "46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", "500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", "535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", "62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", "6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", "717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", "79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", "7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", "88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", "8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", "98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", "9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", "9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", "ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", "b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", "b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", "b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", "ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", "c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", "cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", "e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"]
-marshmallow = ["23f684b54b1955ebd5bdfbdda4062e438ef86218f14f1a356f570cdf0c016ab3", "fcfc9ffd75a883da06f30f604a4e81dd0b56eb9438f4d0a8de6bbaa163ce9ec3"]
+marshmallow = ["077b4612f5d3b9333b736fdc6b963d2b46d409070f44ff3e6c4109645c673e83", "9a2f3e8ea5f530a9664e882d7d04b58650f46190178b2264c72b7d20399d28f0"]
marshmallow-polyfield = ["963a01e80bca5cb4da42b8d2f7e6e90946257ae22d22ff2ed104a8a863eeb0c6"]
mccabe = ["ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", "dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"]
mistune = ["59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e", "88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"]
more-itertools = ["409cd48d4db7052af495b09dec721011634af3753ae1ef92d2b32f73a745f832", "92b8c4b06dac4f0611c0729b2f2ede52b2e1bac1ab48f089c7ddc12e26bb60c4"]
-mypy = ["0107bff4f46a289f0e4081d59b77cef1c48ea43da5a0dbf0005d54748b26df2a", "07957f5471b3bb768c61f08690c96d8a09be0912185a27a68700f3ede99184e4", "10af62f87b6921eac50271e667cc234162a194e742d8e02fc4ddc121e129a5b0", "11fd60d2f69f0cefbe53ce551acf5b1cec1a89e7ce2d47b4e95a84eefb2899ae", "15e43d3b1546813669bd1a6ec7e6a11d2888db938e0607f7b5eef6b976671339", "352c24ba054a89bb9a35dd064ee95ab9b12903b56c72a8d3863d882e2632dc76", "437020a39417e85e22ea8edcb709612903a9924209e10b3ec6d8c9f05b79f498", "49925f9da7cee47eebf3420d7c0e00ec662ec6abb2780eb0a16260a7ba25f9c4", "6724fcd5777aa6cebfa7e644c526888c9d639bd22edd26b2a8038c674a7c34bd", "7a17613f7ea374ab64f39f03257f22b5755335b73251d0d253687a69029701ba", "cdc1151ced496ca1496272da7fc356580e95f2682be1d32377c22ddebdf73c91"]
+mypy = ["1d98fd818ad3128a5408148c9e4a5edce6ed6b58cc314283e631dd5d9216527b", "22ee018e8fc212fe601aba65d3699689dd29a26410ef0d2cc1943de7bec7e3ac", "3a24f80776edc706ec8d05329e854d5b9e464cd332e25cde10c8da2da0a0db6c", "42a78944e80770f21609f504ca6c8173f7768043205b5ac51c9144e057dcf879", "4b2b20106973548975f0c0b1112eceb4d77ed0cafe0a231a1318f3b3a22fc795", "591a9625b4d285f3ba69f541c84c0ad9e7bffa7794da3fa0585ef13cf95cb021", "5b4b70da3d8bae73b908a90bb2c387b977e59d484d22c604a2131f6f4397c1a3", "84edda1ffeda0941b2ab38ecf49302326df79947fa33d98cdcfbf8ca9cf0bb23", "b2b83d29babd61b876ae375786960a5374bba0e4aba3c293328ca6ca5dc448dd", "cc4502f84c37223a1a5ab700649b5ab1b5e4d2bf2d426907161f20672a21930b", "e29e24dd6e7f39f200a5bb55dcaa645d38a397dd5a6674f6042ef02df5795046"]
mypy-extensions = ["37e0e956f41369209a3d5f34580150bcacfabaa57b33a15c0b25f4b5725e0812", "b16cabe759f55e3409a7d231ebd2841378fb0c27a5d1994719e340e4f429ac3e"]
natsort = ["83a8c36b1b2321705d4d7814a7aaf91d0e1bcb7bff119a6ebfe5c9ce3b332d0e", "ff3effb5618232866de8d26e5af4081a4daa9bb0dfed49ac65170e28e45f2776"]
networkx = ["8311ddef63cf5c5c5e7c1d0212dd141d9a1fe3f474915281b73597ed5f1d4e3d"]
nitpick = ["fb2f5f6e5c1ae8f351b0f9c01bafe68c35d87480e9bac7064cf7d15491d70db9", "ff451b41fbfe50ecda0522756a5628d6861c8c4f25c183d65bcd4efede0c8c95"]
-packaging = ["a7ac867b97fdc07ee80a8058fe4435ccd274ecc3b0ed61d852d7d53055528cf9", "c491ca87294da7cc01902edbe30a5bc6c4c28172b5138ab4e4aa1b9d7bfaeafe"]
+packaging = ["28b924174df7a2fa32c1953825ff29c61e2f5e082343165438812f00d3a7fc47", "d9551545c6d761f3def1677baf08ab2a3ca17c56879e70fecba2fc4dde4ed108"]
parso = ["63854233e1fadb5da97f2744b6b24346d2750b85965e7e399bec1620232797dc", "666b0ee4a7a1220f65d367617f2cd3ffddff3e205f3f16a0284df30e774c2a9c"]
-pbr = ["56e52299170b9492513c64be44736d27a512fa7e606f21942160b68ce510b4bc", "9b321c204a88d8ab5082699469f52cc94c5da45c51f114113d01b3d993c24cdf"]
+pbr = ["2c8e420cd4ed4cec4e7999ee47409e876af575d4c35a45840d59e8b5f3155ab8", "b32c8ccaac7b1a20c0ce00ce317642e6cf231cf038f9875e0280e28af5bf7ac9"]
pep8-naming = ["01cb1dab2f3ce9045133d08449f1b6b93531dceacb9ef04f67087c11c723cea9", "0ec891e59eea766efd3059c3d81f1da304d858220678bdc351aab73c533f2fbb"]
pexpect = ["2094eefdfcf37a1fdbfb9aa090862c1a4878e5c7e0e7e7088bdb511c558e5cd1", "9e2c1fd0e6ee3a49b28f95d4b33bc389c89b20af6a1255906e90ff1262ce62eb"]
pickleshare = ["87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca", "9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"]
-pluggy = ["0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc", "b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c"]
+pluggy = ["0db4b7601aae1d35b4a033282da476845aa19185c1e6964b25cf324b5e4ec3e6", "fa5fa1622fa6dd5c030e9cad086fa19ef6a0cf6d7a2d12318e10cb49d6d68f34"]
prompt-toolkit = ["11adf3389a996a6d45cc277580d0d53e8a5afd281d0c9ec71b28e6f121463780", "2519ad1d8038fd5fc8e770362237ad0364d16a7650fb5724af6997ed5515e3c1", "977c6583ae813a37dc1c2e1b715892461fcbdaa57f6fc62f33a528c4886c8f55"]
ptyprocess = ["923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0", "d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"]
py = ["64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa", "dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53"]
@@ -1452,19 +1455,19 @@ pyparsing = ["6f98a7b9397e206d78cc01df10131398f1c8b8510a2f4d97d9abd82e1aacdd80",
pytest = ["13c1c9b22127a77fc684eee24791efafcef343335d855e3573791c68588fe1a5", "d8ba7be9466f55ef96ba203fc0f90d0cf212f2f927e69186e1353e30bc7f62e5"]
pytest-cov = ["2b097cde81a302e1047331b48cadacf23577e431b61e9c6f49a1170bbe3d3da6", "e00ea4fdde970725482f1f35630d12f074e121a23801aabf2ae154ec6bdd343a"]
pytest-randomly = ["5facc2b5ac56e36b9c4bf14f49cc7b4c95427835bbf4a3c739b71a5f5f82d58a", "9256c9ff88466f7bf9794d2eeeea8fbf1cd1bc8df1b3575df59e89b8813bffaa"]
-python-slugify = ["a9f468227cb11e20e251670d78e1b5f6b0b15dd37bbd5c9814a25a904e44ff66"]
+python-slugify = ["575d03256a132fc1efb4c52966c6eb11c57a13b071618f0b26076057a23f6937"]
pytz = ["26c0b32e437e54a18161324a2fca3c4b9846b74a8dccddd843113109e1116b32", "c894d57500a4cd2d5c71114aaab77dbab5eabd9022308ce5ac9bb93a60a6f0c7"]
pyyaml = ["1cbc199009e78f92d9edf554be4fe40fb7b0bef71ba688602a00e97a51909110", "254bf6fda2b7c651837acb2c718e213df29d531eebf00edb54743d10bcb694eb", "3108529b78577327d15eec243f0ff348a0640b0c3478d67ad7f5648f93bac3e2", "3c17fb92c8ba2f525e4b5f7941d850e7a48c3a59b32d331e2502a3cdc6648e76", "6f89b5c95e93945b597776163403d47af72d243f366bf4622ff08bdfd1c950b7", "8d6d96001aa7f0a6a4a95e8143225b5d06e41b1131044913fecb8f85a125714b", "be622cc81696e24d0836ba71f6272a2b5767669b0d79fdcf0295d51ac2e156c8", "c8a88edd93ee29ede719080b2be6cb2333dfee1dccba213b422a9c8e97f2967b", "f39411e380e2182ad33be039e8ee5770a5d9efe01a2bfb7ae58d9ba31c4a2a9d"]
radon = ["38e495a4aa4c1d7293d3c1733393961fb52209c9bc2d75163c3ba8124d8bbbaa", "f893f2faa632a060f6d0f01843d10a0395515bde865c759c0dd3f15239caf11b"]
requests = ["11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4", "9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"]
restructuredtext-lint = ["97b3da356d5b3a8514d8f1f9098febd8b41463bed6a1d9f126cf0a048b6fd908"]
"ruamel.yaml" = ["0db639b1b2742dae666c6fc009b8d1931ef15c9276ef31c0673cc6dcf766cf40", "412a6f5cfdc0525dee6a27c08f5415c7fd832a7afcb7a0ed7319628aed23d408"]
-"ruamel.yaml.clib" = ["0bbe19d3e099f8ba384e1846e6b54f245f58aeec8700edbbf9abb87afa54fd82", "2f38024592613f3a8772bbc2904be027d9abf463518ba145f2d0c8e6da27009f", "44449b3764a3f75815eea8ae5930b98e8326be64a90b0f782747318f861abfe0", "5710be9a357801c31c1eaa37b9bc92d38176d785af5b2f0c9751385c5dc9659a", "5a089acb6833ed5f412e24cbe3e665683064c1429824d2819137b5ade54435c3", "6143386ddd61599ea081c012a69a16e5bdd7b3c6c231bd039534365a48940f30", "6726aaf851f5f9e4cbdd3e1e414bc700bdd39220e8bc386415fd41c87b1b53c2", "68fbc3b5d94d145a391452f886ae5fca240cb7e3ab6bd66e1a721507cdaac28a", "75ebddf99ba9e0b48f32b5bdcf9e5a2b84c017da9e0db7bf11995fa414aa09cd", "79948a6712baa686773a43906728e20932c923f7b2a91be7347993be2d745e55", "8a2dd8e8b08d369558cade05731172c4b5e2f4c5097762c6b352bd28fd9f9dc4", "c747acdb5e8c242ab2280df6f0c239e62838af4bee647031d96b3db2f9cefc04", "cadc8eecd27414dca30366b2535cb5e3f3b47b4e2d6be7a0b13e4e52e459ff9f", "cee86ecc893a6a8ecaa7c6a9c2d06f75f614176210d78a5f155f8e78d6989509", "e59af39e895aff28ee5f55515983cab3466d1a029c91c04db29da1c0f09cf333", "eee7ecd2eee648884fae6c51ae50c814acdcc5d6340dc96c970158aebcd25ac6", "ef8d4522d231cb9b29f6cdd0edc8faac9d9715c60dc7becbd6eb82c915a98e5b", "f504d45230cc9abf2810623b924ae048b224a90adb01f97db4e766cfdda8e6eb"]
+"ruamel.yaml.clib" = ["1e77424825caba5553bbade750cec2277ef130647d685c2b38f68bc03453bac6", "392b7c371312abf27fb549ec2d5e0092f7ef6e6c9f767bfb13e83cb903aca0fd", "4d55386129291b96483edcb93b381470f7cd69f97585829b048a3d758d31210a", "550168c02d8de52ee58c3d8a8193d5a8a9491a5e7b2462d27ac5bf63717574c9", "57933a6986a3036257ad7bf283529e7c19c2810ff24c86f4a0cfeb49d2099919", "615b0396a7fad02d1f9a0dcf9f01202bf9caefee6265198f252c865f4227fcc6", "77556a7aa190be9a2bd83b7ee075d3df5f3c5016d395613671487e79b082d784", "7aee724e1ff424757b5bd8f6c5bbdb033a570b2b4683b17ace4dbe61a99a657b", "8073c8b92b06b572e4057b583c3d01674ceaf32167801fe545a087d7a1e8bf52", "9c6d040d0396c28d3eaaa6cb20152cb3b2f15adf35a0304f4f40a3cf9f1d2448", "a0ff786d2a7dbe55f9544b3f6ebbcc495d7e730df92a08434604f6f470b899c5", "b1b7fcee6aedcdc7e62c3a73f238b3d080c7ba6650cd808bce8d7761ec484070", "b66832ea8077d9b3f6e311c4a53d06273db5dc2db6e8a908550f3c14d67e718c", "d0d3ac228c9bbab08134b4004d748cf9f8743504875b3603b3afbb97e3472947", "d10e9dd744cf85c219bf747c75194b624cc7a94f0c80ead624b06bfa9f61d3bc", "ea4362548ee0cbc266949d8a441238d9ad3600ca9910c3fe4e82ee3a50706973", "ed5b3698a2bb241b7f5cbbe277eaa7fe48b07a58784fba4f75224fd066d253ad", "f9dcc1ae73f36e8059589b601e8e4776b9976effd76c21ad6a855a74318efd6e"]
safety = ["0a3a8a178a9c96242b224f033ee8d1d130c0448b0e6622d12deaf37f6c3b4e59", "5059f3ffab3648330548ea9c7403405bbfaf085b11235770825d14c58f24cb78"]
six = ["3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"]
smmap2 = ["0555a7bf4df71d1ef4218e4807bbf9b201f910174e6e08af2e138d4e517b4dde", "29a9ffa0497e7f2be94ca0ed1ca1aa3cd4cf25a1f6b4f5f87f74b46ed91d609a"]
snapshottest = ["2cc7157e77674ea8ebeb2351466ff50cd4b5ad8e213adc06df9c16a75ab5bafc"]
-snowballstemmer = ["9f3b9ffe0809d174f7047e121431acf99c89a7040f0ca84f94ba53a498e6d0c9"]
+snowballstemmer = ["713e53b79cbcf97bc5245a06080a33d54a77e7cce2f789c835a143bcdb5c033e"]
sortedcontainers = ["974e9a32f56b17c1bac2aebd9dcf197f3eb9cd30553c5852a3187ad162e1a03a", "d9e96492dd51fae31e60837736b38fe42a187b5404c16606ff7ee7cd582d4c60"]
sphinx = ["0d586b0f8c2fc3cc6559c5e8fd6124628110514fda0e5d7c82e682d749d2e845", "839a3ed6f6b092bb60f492024489cc9e6991360fb9f52ed6361acd510d261069"]
sphinx-autodoc-typehints = ["0d968ec3ee4f7fe7695ab6facf5cd2d74d3cea67584277458ad9b2788ebbcc3b", "8edca714fd3de8e43467d7e51dd3812fe999f8874408a639f7c38a9e1a5a4eb3"]
@@ -1475,17 +1478,17 @@ sphinxcontrib-jsmath = ["2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c
sphinxcontrib-mermaid = ["d2e33529c63c12724193b210dcbd4285ca6cff17b8f91f9dbcb8b4b7d07595e7"]
sphinxcontrib-qthelp = ["513049b93031beb1f57d4daea74068a4feb77aa5630f856fcff2e50de14e9a20", "79465ce11ae5694ff165becda529a600c754f4bc459778778c7017374d4d406f"]
sphinxcontrib-serializinghtml = ["c0efb33f8052c04fd7a26c0a07f1678e8512e0faec19f4aa8f2473a8b81d5227", "db6615af393650bf1151a6cd39120c29abaf93cc60db8c48eb2dddbfdc3a9768"]
-stevedore = ["7be098ff53d87f23d798a7ce7ae5c31f094f3deb92ba18059b1aeb1ca9fec0a0", "7d1ce610a87d26f53c087da61f06f9b7f7e552efad2a7f6d2322632b5f932ea2"]
+stevedore = ["01d9f4beecf0fbd070ddb18e5efb10567801ba7ef3ddab0074f54e3cd4e91730", "e0739f9739a681c7a1fda76a102b65295e96a144ccdb552f2ae03c5f0abe8a14"]
termcolor = ["1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"]
testfixtures = ["665a298976c8d77f311b65c46f16b7cda7229a47dff5ad7c822e5b3371a439e2", "9d230c5c80746f9f86a16a1f751a5cf5d8e317d4cc48243a19fb180d22303bce"]
-text-unidecode = ["5a1375bb2ba7968740508ae38d92e1f889a0832913cb1c447d5e2046061a396d", "801e38bd550b943563660a91de8d4b6fa5df60a542be9093f7abf819f86050cc"]
+text-unidecode = ["1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8", "bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"]
toml = ["229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c", "235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e", "f1db651f9657708513243e61e6cc67d101a39bad662eaa9b5546f789338e07a3"]
tomlkit = ["a8d806f3a453c2d292afe97918398354e405b93919e2e68771a3fd0a90e89576", "c6b0c11b85e888c12330c7605d43c1446aa148cd421163f90ca46ea813f2c336"]
traitlets = ["9c4bd2d267b7153df9152698efb1050a5d84982d3384a37b2c1f7723ba3e7835", "c6cb5e6f57c5a9bdaa40fa71ce7b4af30298fbab9ece9815b5d995ab6217c7d9"]
typed-ast = ["18511a0b3e7922276346bcb47e2ef9f38fb90fd31cb9223eed42c85d1312344e", "262c247a82d005e43b5b7f69aff746370538e176131c32dda9cb0f324d27141e", "2b907eb046d049bcd9892e3076c7a6456c93a25bebfe554e931620c90e6a25b0", "354c16e5babd09f5cb0ee000d54cfa38401d8b8891eefa878ac772f827181a3c", "4e0b70c6fc4d010f8107726af5fd37921b666f5b31d9331f0bd24ad9a088e631", "630968c5cdee51a11c05a30453f8cd65e0cc1d2ad0d9192819df9978984529f4", "66480f95b8167c9c5c5c87f32cf437d585937970f3fc24386f313a4c97b44e34", "71211d26ffd12d63a83e079ff258ac9d56a1376a25bc80b1cdcdf601b855b90b", "95bd11af7eafc16e829af2d3df510cecfd4387f6453355188342c3e79a2ec87a", "bc6c7d3fa1325a0c6613512a093bc2a2a15aeec350451cbdf9e1d4bffe3e3233", "cc34a6f5b426748a507dd5d1de4c1978f2eb5626d51326e43280941206c209e1", "d755f03c1e4a51e9b24d899561fec4ccaf51f210d52abdf8c07ee2849b212a36", "d7c45933b1bdfaf9f36c579671fec15d25b06c8398f113dab64c18ed1adda01d", "d896919306dd0aa22d0132f62a1b78d11aaf4c9fc5b3410d3c666b818191630a", "ffde2fbfad571af120fcbfbbc61c72469e72f550d676c3342492a9dfdefb8f12"]
typing = ["91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23", "c8cabb5ab8945cd2f54917be357d134db9cc1eb039e59d1606dc1e60cb1d9d36", "f38d83c5a7a7086543a0f649564d661859c5146a85775ab90c0d2f93ffaa9714"]
typing-extensions = ["2ed632b30bb54fc3941c382decfd0ee4148f5c591651c9272473fea2c6397d95", "b1edbbf0652660e32ae780ac9433f4231e7339c7f9a8057d0f042fcbcea49b87", "d8179012ec2c620d3791ca6fe2bf7979d979acdbef1fca0bc56b37411db682ed"]
-urllib3 = ["b246607a25ac80bedac05c6f282e3cdaf3afb65420fd024ac94435cabe6e18d1", "dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232"]
+urllib3 = ["3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398", "9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86"]
wasmer = ["1d2c337425721fd9ac6c6b17698ef8a9795b236a38b0e3c85872a5845ffb0d90", "2edb87608daa3b46bd2520e0b5b90580fde9c805be4d92eeb98c22b29a21abc6", "4fe592b764fc09d535757682d0ced6da1037976a7eb97986fce3523779a89682", "75d854cb5acdc32f289ceb310a72d66190fa531dd126eac970ed6788939a5d40", "7b5235dd4ee1cf48d054e7216a1fefe15b8b1a48ffe5e9bb2655724cf84d7a31", "e547b1074e52c10f0581de415b509aa61e577f5248340a68b356938393d773c8", "fcfe2c7a9fbf323f3520ef9766b82e80cd433d7f8c87ff084b18bcde716923af"]
wcwidth = ["3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e", "f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c"]
xenon = ["26f65adb5d411ba3efc361dc5ab6cd341a243a33b5a526c89350240c765899b9", "ff4bbecf0da99a7f60033e5e9616e28eb2a52d78dc154d90736f8c0124ec3e76"]
diff --git a/pyproject.toml b/pyproject.toml
index 312570c34..f6573bbbb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -89,7 +89,7 @@ pytest-cov = "^2.7"
pytest-randomly = "^3.1"
pytest = "^5.2"
snapshottest = "^0.5.1"
-mypy = "^0.720"
+mypy = "^0.730"
docutils = "^0.13"
sphinx = "^2.1"
sphinx-autodoc-typehints = "<1.9"
diff --git a/wemake_python_styleguide/compat/nodes.py b/wemake_python_styleguide/compat/nodes.py
index 0555980e7..45165a360 100644
--- a/wemake_python_styleguide/compat/nodes.py
+++ b/wemake_python_styleguide/compat/nodes.py
@@ -3,7 +3,7 @@
import ast
try: # pragma: no cover
- from ast import Constant as Constant # type: ignore # noqa: WPS433, WPS113
+ from ast import Constant as Constant # noqa: WPS433, WPS113
except ImportError: # pragma: no cover
class Constant(ast.AST): # type: ignore # noqa: WPS440
"""
|
pfnet__pytorch-pfn-extras-12 | Failed to rename a file in writing.py
The following error happens.


I fixed it.

| [
{
"content": "import multiprocessing\nimport io\nimport os\nimport queue\nimport shutil\nimport sys\nimport threading\n\nimport torch\n\n\ndef open_wrapper(func):\n def wrapper(self, file_path, mode='rb',\n buffering=-1, encoding=None,\n errors=None, newline=None,\n closefd=True,\n opener=None):\n file_obj = func(self, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return self._wrap_fileobject(\n file_obj, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return wrapper\n\n\nclass _PosixFileStat:\n def __init__(self, _stat, filename):\n self.filename = filename\n self.last_modified = _stat.st_mtime\n self.last_accessed = _stat.st_atime\n self.created = _stat.st_ctime\n self.mode = _stat.st_mode\n self.size = _stat.st_size\n\n\nclass _PosixFileSystem(object):\n \"\"\"Class to abstract the calls to the FileSystem\n\n This class obeys the same interface as PFIO's POSIX\n Filesystems declarations. When using HDFS, PFIO\n handler can be used instead (requires PFIO>1.0).\n\n This class currently abstracts POSIX\n \"\"\"\n def __init__(self):\n pass\n\n def get_actual_path(self, path):\n return os.path.join(self.root, path)\n\n def _wrap_fileobject(self, file_obj, file_path, *args, **kwargs):\n return file_obj\n\n @property\n def root(self):\n return self._root\n\n @root.setter\n def root(self, root):\n self._root = root\n\n @open_wrapper\n def open(self, file_path, mode='r',\n buffering=-1, encoding=None, errors=None,\n newline=None, closefd=True, opener=None):\n\n return io.open(file_path, mode,\n buffering, encoding, errors,\n newline, closefd, opener)\n\n def list(self, path_or_prefix: str = None, recursive=False):\n if recursive:\n path_or_prefix = path_or_prefix.rstrip(\"/\")\n # plus 1 to include the trailing slash\n prefix_end_index = len(path_or_prefix) + 1\n yield from self._recursive_list(prefix_end_index, path_or_prefix)\n else:\n for file in os.scandir(path_or_prefix):\n yield file.name\n\n def _recursive_list(self, prefix_end_index: int, path: str):\n for file in os.scandir(path):\n yield file.path[prefix_end_index:]\n\n if file.is_dir():\n yield from self._recursive_list(prefix_end_index,\n file.path)\n\n def stat(self, path):\n return _PosixFileStat(os.stat(path), path)\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def isdir(self, file_path):\n return os.path.isdir(file_path)\n\n def mkdir(self, file_path, mode=0o777, *args, dir_fd=None):\n return os.mkdir(file_path, mode, *args, dir_fd=None)\n\n def makedirs(self, file_path, mode=0o777, exist_ok=False):\n return os.makedirs(file_path, mode, exist_ok)\n\n def exists(self, file_path):\n return os.path.exists(file_path)\n\n def rename(self, src, dst):\n try:\n return os.replace(src, dst)\n except OSError:\n print('Destination {} is a directory '\n 'but source is not'.format(src),\n file=sys.stderr)\n raise\n\n def remove(self, file_path, recursive=False):\n if recursive:\n return shutil.rmtree(file_path)\n if os.path.isdir(file_path):\n return os.rmdir(file_path)\n\n return os.remove(file_path)\n\n\nclass Writer:\n\n \"\"\"Base class of snapshot writers.\n\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot`\n invokes ``__call__`` of this class every time when taking a snapshot.\n This class determines how the actual saving function will be invoked.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, fs=None, out_dir=None):\n self._post_save_hooks = []\n self.fs = fs\n self.out_dir = out_dir\n if fs is None:\n self.fs = _PosixFileSystem()\n\n self._initialized = False\n\n def __call__(self, filename, out_dir, target):\n \"\"\"Invokes the actual snapshot function.\n\n This method is invoked by a\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot` object\n every time it takes a snapshot.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is a concrete file name, i.e. not a pre-formatted\n template string.\n out_dir (str): Output directory. Corresponds to\n :py:attr:`ExtensionsManager.out\n <pytorch_pfn_extras.training.ExtensionsManager.out>`.\n target (dict): Serialized object which will be saved.\n \"\"\"\n raise NotImplementedError\n\n def initialize(self, out_dir):\n if not self.fs.exists(out_dir):\n self.fs.makedirs(out_dir)\n self._initialized = True\n\n def __del__(self):\n self.finalize()\n\n def finalize(self):\n \"\"\"Finalizes the writer.\n\n this method is invoked at the end of the training in\n :class:`~pytorch_pfn_extras.training.ExtensionsManager`,\n\n \"\"\"\n pass\n\n def save(self, filename, out_dir, target, savefun, **kwds):\n if self.out_dir is not None:\n out_dir = self.out_dir\n if not self._initialized:\n self.initialize(out_dir)\n # Some filesystems are not compatible with temp folders, etc\n # so we rely on raw temp files\n prefix = 'tmp_{}'.format(filename)\n dest = os.path.join(out_dir, filename)\n tmppath = os.path.join(out_dir, prefix)\n make_backup = self.fs.exists(dest)\n if make_backup:\n bak = '{}.bak'.format(dest)\n self.fs.rename(dest, bak)\n with self.fs.open(tmppath, 'wb') as f:\n # HDFS does not support overwrite\n savefun(target, f)\n self.fs.rename(tmppath, dest)\n if make_backup:\n self.fs.remove(bak)\n\n self._post_save()\n\n def _add_cleanup_hook(self, hook_fun):\n \"\"\"Adds cleanup hook function.\n\n Technically, arbitrary user-defined hook can be called, but\n this is intended for cleaning up stale snapshots.\n\n Args:\n hook_fun (callable): callable function to be called\n right after save is done. It takes no arguments.\n\n \"\"\"\n self._post_save_hooks.append(hook_fun)\n\n def _post_save(self):\n for hook in self._post_save_hooks:\n hook()\n\n\nclass SimpleWriter(Writer):\n \"\"\"The most simple snapshot writer.\n\n This class just passes the arguments to the actual saving function.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n self.save(filename, out_dir, target, savefun, **self._kwds)\n\n\nclass StandardWriter(Writer):\n \"\"\"Base class of snapshot writers which use thread or process.\n\n This class creates a new thread or a process every time when ``__call__``\n is invoked.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _worker = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n self._started = False\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n if self._started:\n self._worker.join()\n self._started = False\n self._filename = filename\n self._worker = self.create_worker(filename, out_dir, target,\n savefun, **self._kwds)\n self._worker.start()\n self._started = True\n\n def create_worker(self, filename, out_dir, target, savefun, **kwds):\n \"\"\"Creates a worker for the snapshot.\n\n This method creates a thread or a process to take a snapshot. The\n created worker must have :meth:`start` and :meth:`join` methods.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is already formated string.\n out_dir (str): Output directory. Passed by `manager.out`.\n target (dict): Serialized object which will be saved.\n kwds: Keyword arguments for the ``savefun``.\n\n \"\"\"\n raise NotImplementedError\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._worker.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate thread.\n\n This class creates a new thread that invokes the actual saving function.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return threading.Thread(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass ProcessWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate process.\n\n This class creates a new process that invokes the actual saving function.\n\n .. note::\n Forking a new process from a MPI process might be danger. Consider\n using :class:`ThreadWriter` instead of ``ProcessWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return multiprocessing.Process(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass QueueWriter(Writer):\n \"\"\"Base class of queue snapshot writers.\n\n This class is a base class of snapshot writers that use a queue.\n A Queue is created when this class is constructed, and every time when\n ``__call__`` is invoked, a snapshot task is put into the queue.\n\n Args:\n savefun: Callable object which is passed to the :meth:`create_task`\n if the task is ``None``. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n task: Callable object. Its ``__call__`` must have a same interface to\n ``Writer.__call__``. This object is directly put into the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _queue = None\n _consumer = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(fs=fs, out_dir=out_dir)\n if task is None:\n self._task = self.create_task(savefun)\n else:\n self._task = task\n self._queue = self.create_queue()\n self._consumer = self.create_consumer(self._queue)\n self._consumer.start()\n self._started = True\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n self._queue.put([self._task, filename, out_dir, target, savefun])\n\n def create_task(self, savefun):\n return SimpleWriter(savefun=savefun)\n\n def create_queue(self):\n raise NotImplementedError\n\n def create_consumer(self, q):\n raise NotImplementedError\n\n def consume(self, q):\n while True:\n task = q.get()\n if task is None:\n q.task_done()\n return\n else:\n task[0](task[1], task[2], task[3], savefun=task[4])\n q.task_done()\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._queue.put(None)\n self._queue.join()\n self._consumer.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses a thread queue.\n\n This class creates a thread and a queue by :mod:`threading` and\n :mod:`queue` modules\n respectively. The thread will be a consumer of the queue, and the main\n thread will be a producer of the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, task=task, out_dir=out_dir)\n\n def create_queue(self):\n return queue.Queue()\n\n def create_consumer(self, q):\n return threading.Thread(target=self.consume, args=(q,))\n\n\nclass ProcessQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses process queue.\n\n This class creates a process and a queue by :mod:`multiprocessing` module.\n The process will be a consumer of this queue, and the main process will be\n a producer of this queue.\n\n .. note::\n Forking a new process from MPI process might be danger. Consider using\n :class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, task=task)\n\n def create_queue(self):\n return multiprocessing.JoinableQueue()\n\n def create_consumer(self, q):\n return multiprocessing.Process(target=self.consume, args=(q,))\n",
"path": "pytorch_pfn_extras/writing.py"
}
] | [
{
"content": "import multiprocessing\nimport io\nimport os\nimport queue\nimport shutil\nimport sys\nimport threading\n\nimport torch\n\n\ndef open_wrapper(func):\n def wrapper(self, file_path, mode='rb',\n buffering=-1, encoding=None,\n errors=None, newline=None,\n closefd=True,\n opener=None):\n file_obj = func(self, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return self._wrap_fileobject(\n file_obj, file_path, mode, buffering, encoding,\n errors, newline, closefd, opener)\n return wrapper\n\n\nclass _PosixFileStat:\n def __init__(self, _stat, filename):\n self.filename = filename\n self.last_modified = _stat.st_mtime\n self.last_accessed = _stat.st_atime\n self.created = _stat.st_ctime\n self.mode = _stat.st_mode\n self.size = _stat.st_size\n\n\nclass _PosixFileSystem(object):\n \"\"\"Class to abstract the calls to the FileSystem\n\n This class obeys the same interface as PFIO's POSIX\n Filesystems declarations. When using HDFS, PFIO\n handler can be used instead (requires PFIO>1.0).\n\n This class currently abstracts POSIX\n \"\"\"\n def __init__(self):\n pass\n\n def get_actual_path(self, path):\n return os.path.join(self.root, path)\n\n def _wrap_fileobject(self, file_obj, file_path, *args, **kwargs):\n return file_obj\n\n @property\n def root(self):\n return self._root\n\n @root.setter\n def root(self, root):\n self._root = root\n\n @open_wrapper\n def open(self, file_path, mode='r',\n buffering=-1, encoding=None, errors=None,\n newline=None, closefd=True, opener=None):\n\n return io.open(file_path, mode,\n buffering, encoding, errors,\n newline, closefd, opener)\n\n def list(self, path_or_prefix: str = None, recursive=False):\n if recursive:\n path_or_prefix = path_or_prefix.rstrip(\"/\")\n # plus 1 to include the trailing slash\n prefix_end_index = len(path_or_prefix) + 1\n yield from self._recursive_list(prefix_end_index, path_or_prefix)\n else:\n for file in os.scandir(path_or_prefix):\n yield file.name\n\n def _recursive_list(self, prefix_end_index: int, path: str):\n for file in os.scandir(path):\n yield file.path[prefix_end_index:]\n\n if file.is_dir():\n yield from self._recursive_list(prefix_end_index,\n file.path)\n\n def stat(self, path):\n return _PosixFileStat(os.stat(path), path)\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def isdir(self, file_path):\n return os.path.isdir(file_path)\n\n def mkdir(self, file_path, mode=0o777, *args, dir_fd=None):\n return os.mkdir(file_path, mode, *args, dir_fd=None)\n\n def makedirs(self, file_path, mode=0o777, exist_ok=False):\n return os.makedirs(file_path, mode, exist_ok)\n\n def exists(self, file_path):\n return os.path.exists(file_path)\n\n def rename(self, src, dst):\n try:\n return os.replace(src, dst)\n except OSError:\n print('Destination {} is a directory '\n 'but source is not'.format(src),\n file=sys.stderr)\n raise\n\n def remove(self, file_path, recursive=False):\n if recursive:\n return shutil.rmtree(file_path)\n if os.path.isdir(file_path):\n return os.rmdir(file_path)\n\n return os.remove(file_path)\n\n\nclass Writer:\n\n \"\"\"Base class of snapshot writers.\n\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot`\n invokes ``__call__`` of this class every time when taking a snapshot.\n This class determines how the actual saving function will be invoked.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, fs=None, out_dir=None):\n self._post_save_hooks = []\n self.fs = fs\n self.out_dir = out_dir\n if fs is None:\n self.fs = _PosixFileSystem()\n\n self._initialized = False\n\n def __call__(self, filename, out_dir, target):\n \"\"\"Invokes the actual snapshot function.\n\n This method is invoked by a\n :class:`~pytorch_pfn_extras.training.extensions.Snapshot` object\n every time it takes a snapshot.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is a concrete file name, i.e. not a pre-formatted\n template string.\n out_dir (str): Output directory. Corresponds to\n :py:attr:`ExtensionsManager.out\n <pytorch_pfn_extras.training.ExtensionsManager.out>`.\n target (dict): Serialized object which will be saved.\n \"\"\"\n raise NotImplementedError\n\n def initialize(self, out_dir):\n if not self.fs.exists(out_dir):\n self.fs.makedirs(out_dir)\n self._initialized = True\n\n def __del__(self):\n self.finalize()\n\n def finalize(self):\n \"\"\"Finalizes the writer.\n\n this method is invoked at the end of the training in\n :class:`~pytorch_pfn_extras.training.ExtensionsManager`,\n\n \"\"\"\n pass\n\n def save(self, filename, out_dir, target, savefun, **kwds):\n if self.out_dir is not None:\n out_dir = self.out_dir\n if not self._initialized:\n self.initialize(out_dir)\n # Some filesystems are not compatible with temp folders, etc\n # so we rely on raw temp files\n prefix = 'tmp_{}'.format(filename)\n dest = os.path.join(out_dir, filename)\n tmppath = os.path.join(out_dir, prefix)\n make_backup = self.fs.exists(dest)\n if make_backup:\n bak = '{}.bak'.format(dest)\n self.fs.rename(dest, bak)\n with self.fs.open(tmppath, 'wb') as f:\n # HDFS does not support overwrite\n savefun(target, f)\n self.fs.rename(tmppath, dest)\n if make_backup:\n self.fs.remove(bak)\n\n self._post_save()\n\n def _add_cleanup_hook(self, hook_fun):\n \"\"\"Adds cleanup hook function.\n\n Technically, arbitrary user-defined hook can be called, but\n this is intended for cleaning up stale snapshots.\n\n Args:\n hook_fun (callable): callable function to be called\n right after save is done. It takes no arguments.\n\n \"\"\"\n self._post_save_hooks.append(hook_fun)\n\n def _post_save(self):\n for hook in self._post_save_hooks:\n hook()\n\n\nclass SimpleWriter(Writer):\n \"\"\"The most simple snapshot writer.\n\n This class just passes the arguments to the actual saving function.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n self.save(filename, out_dir, target, savefun, **self._kwds)\n\n\nclass StandardWriter(Writer):\n \"\"\"Base class of snapshot writers which use thread or process.\n\n This class creates a new thread or a process every time when ``__call__``\n is invoked.\n\n Args:\n savefun: Callable object. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n kwds: Keyword arguments for the ``savefun``.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _worker = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(fs=fs, out_dir=out_dir)\n self._savefun = savefun\n self._kwds = kwds\n self._started = False\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n if savefun is None:\n savefun = self._savefun\n if self._started:\n self._worker.join()\n self._started = False\n self._filename = filename\n self._worker = self.create_worker(filename, out_dir, target,\n savefun, **self._kwds)\n self._worker.start()\n self._started = True\n\n def create_worker(self, filename, out_dir, target, savefun, **kwds):\n \"\"\"Creates a worker for the snapshot.\n\n This method creates a thread or a process to take a snapshot. The\n created worker must have :meth:`start` and :meth:`join` methods.\n\n Args:\n filename (str): Name of the file into which the serialized target\n is saved. It is already formated string.\n out_dir (str): Output directory. Passed by `manager.out`.\n target (dict): Serialized object which will be saved.\n kwds: Keyword arguments for the ``savefun``.\n\n \"\"\"\n raise NotImplementedError\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._worker.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate thread.\n\n This class creates a new thread that invokes the actual saving function.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return threading.Thread(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass ProcessWriter(StandardWriter):\n \"\"\"Snapshot writer that uses a separate process.\n\n This class creates a new process that invokes the actual saving function.\n\n .. note::\n Forking a new process from a MPI process might be danger. Consider\n using :class:`ThreadWriter` instead of ``ProcessWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, **kwds):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, **kwds)\n\n def create_worker(self, filename, out_dir, target, **kwds):\n return multiprocessing.Process(\n target=self.save,\n args=(filename, out_dir, target, self._savefun),\n kwargs=self._kwds)\n\n\nclass QueueWriter(Writer):\n \"\"\"Base class of queue snapshot writers.\n\n This class is a base class of snapshot writers that use a queue.\n A Queue is created when this class is constructed, and every time when\n ``__call__`` is invoked, a snapshot task is put into the queue.\n\n Args:\n savefun: Callable object which is passed to the :meth:`create_task`\n if the task is ``None``. It takes three arguments: the output file\n path, the serialized dictionary object, and the optional keyword\n arguments.\n fs: FileSystem abstracting interface to implement all the operations.\n optional, defaults to None\n out_dir: str. Specifies the directory this writer will use.\n It takes precedence over the one specified in `__call__`\n optional, defaults to None\n task: Callable object. Its ``__call__`` must have a same interface to\n ``Writer.__call__``. This object is directly put into the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n _started = False\n _finalized = False\n _queue = None\n _consumer = None\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(fs=fs, out_dir=out_dir)\n if task is None:\n self._task = self.create_task(savefun)\n else:\n self._task = task\n self._queue = self.create_queue()\n self._consumer = self.create_consumer(self._queue)\n self._consumer.start()\n self._started = True\n self._finalized = False\n\n def __call__(self, filename, out_dir, target, *, savefun=None):\n self._queue.put([self._task, filename, out_dir, target, savefun])\n\n def create_task(self, savefun):\n return SimpleWriter(savefun=savefun)\n\n def create_queue(self):\n raise NotImplementedError\n\n def create_consumer(self, q):\n raise NotImplementedError\n\n def consume(self, q):\n while True:\n task = q.get()\n if task is None:\n q.task_done()\n return\n else:\n task[0](task[1], task[2], task[3], savefun=task[4])\n q.task_done()\n\n def finalize(self):\n if self._started:\n if not self._finalized:\n self._queue.put(None)\n self._queue.join()\n self._consumer.join()\n self._started = False\n self._finalized = True\n\n\nclass ThreadQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses a thread queue.\n\n This class creates a thread and a queue by :mod:`threading` and\n :mod:`queue` modules\n respectively. The thread will be a consumer of the queue, and the main\n thread will be a producer of the queue.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, task=task, out_dir=out_dir)\n\n def create_queue(self):\n return queue.Queue()\n\n def create_consumer(self, q):\n return threading.Thread(target=self.consume, args=(q,))\n\n\nclass ProcessQueueWriter(QueueWriter):\n \"\"\"Snapshot writer that uses process queue.\n\n This class creates a process and a queue by :mod:`multiprocessing` module.\n The process will be a consumer of this queue, and the main process will be\n a producer of this queue.\n\n .. note::\n Forking a new process from MPI process might be danger. Consider using\n :class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are\n using MPI.\n\n .. seealso::\n\n - :meth:`pytorch_pfn_extras.training.extensions.snapshot`\n \"\"\"\n\n def __init__(self, savefun=torch.save, fs=None, out_dir=None, task=None):\n super().__init__(savefun=savefun, fs=fs, out_dir=out_dir, task=task)\n\n def create_queue(self):\n return multiprocessing.JoinableQueue()\n\n def create_consumer(self, q):\n return multiprocessing.Process(target=self.consume, args=(q,))\n",
"path": "pytorch_pfn_extras/writing.py"
}
] | diff --git a/pytorch_pfn_extras/writing.py b/pytorch_pfn_extras/writing.py
index 57c804a7e..37adadb13 100644
--- a/pytorch_pfn_extras/writing.py
+++ b/pytorch_pfn_extras/writing.py
@@ -208,7 +208,7 @@ def save(self, filename, out_dir, target, savefun, **kwds):
with self.fs.open(tmppath, 'wb') as f:
# HDFS does not support overwrite
savefun(target, f)
- self.fs.rename(tmppath, dest)
+ self.fs.rename(tmppath, dest)
if make_backup:
self.fs.remove(bak)
|
mkdocs__mkdocs-2069 | Install of mkdocs fails with nltk 3.5
Hi,
First of all, thank you for mkdocs. We're using this in our company and are very happy to have it.
I noticed that the installation of mkdocs started failing for us yesterday. After some investigation I found out that the [nltk](https://www.nltk.org/install.html) lib had a version bump (3.5) yesterday and that's were it goes wrong.
I was able to use the previous nltk version (3.4.5) by adding it directly to our ci file
```
pages:
image: python:3.7-alpine
stage: 🥾 Bootstrap
script:
- pip install nltk==3.4.5
- pip install mkdocs
- pip install mkdocs-material
- mkdocs build
```
This solves the issue for us.
I am not really used to python and its ecosystem, so perhaps I am overseeing an actual "fix".
But if this is a really issue, I guess the `3.5` nltk version should be _blacklisted_ from the install?
<details>
<summary>Output of the error</summary>
```
$ pip install mkdocs
18 Collecting mkdocs
19 Downloading mkdocs-1.1-py2.py3-none-any.whl (6.4 MB)
20 Collecting click>=3.3
21 Downloading click-7.1.1-py2.py3-none-any.whl (82 kB)
22 Collecting Jinja2>=2.10.1
23 Downloading Jinja2-2.11.1-py2.py3-none-any.whl (126 kB)
24 Collecting Markdown>=3.2.1
25 Downloading Markdown-3.2.1-py2.py3-none-any.whl (88 kB)
26 Collecting PyYAML>=3.10
27 Downloading PyYAML-5.3.1.tar.gz (269 kB)
28 Collecting tornado>=5.0
29 Downloading tornado-6.0.4.tar.gz (496 kB)
30 Collecting livereload>=2.5.1
31 Downloading livereload-2.6.1-py2.py3-none-any.whl (23 kB)
32 Collecting lunr[languages]==0.5.6
33 Downloading lunr-0.5.6-py2.py3-none-any.whl (36 kB)
34 Collecting MarkupSafe>=0.23
35 Downloading MarkupSafe-1.1.1.tar.gz (19 kB)
36 Requirement already satisfied: setuptools>=36 in /usr/local/lib/python3.8/site-packages (from Markdown>=3.2.1->mkdocs) (46.1.1)
37 Collecting six
38 Downloading six-1.14.0-py2.py3-none-any.whl (10 kB)
39 Collecting future>=0.16.0
40 Downloading future-0.18.2.tar.gz (829 kB)
41 Collecting nltk>=3.2.5; extra == "languages"
42 Downloading nltk-3.5.zip (1.4 MB)
43 Collecting joblib
44 Downloading joblib-0.14.1-py2.py3-none-any.whl (294 kB)
45 Collecting regex
46 Downloading regex-2020.4.4.tar.gz (695 kB)
47 Collecting tqdm
48 Downloading tqdm-4.45.0-py2.py3-none-any.whl (60 kB)
49 Building wheels for collected packages: PyYAML, tornado, MarkupSafe, future, nltk, regex
50 Building wheel for PyYAML (setup.py): started
51 Building wheel for PyYAML (setup.py): finished with status 'done'
52 Created wheel for PyYAML: filename=PyYAML-5.3.1-cp38-cp38-linux_x86_64.whl size=44617 sha256=e51f74894350a12a20b5ed28d79e6bf74a837020d884ed06a1e0a0ca03ec28fb
53 Stored in directory: /root/.cache/pip/wheels/13/90/db/290ab3a34f2ef0b5a0f89235dc2d40fea83e77de84ed2dc05c
54 Building wheel for tornado (setup.py): started
55 Building wheel for tornado (setup.py): finished with status 'done'
56 Created wheel for tornado: filename=tornado-6.0.4-cp38-cp38-linux_x86_64.whl size=415149 sha256=5b834e80e999d01b7fb8a67d6ccbf419fae7fb0920b9a8ddcf2d0fade7357613
57 Stored in directory: /root/.cache/pip/wheels/88/79/e5/598ba17e85eccf2626eab62e4ee8452895636cd542650d450d
58 Building wheel for MarkupSafe (setup.py): started
59 Building wheel for MarkupSafe (setup.py): finished with status 'done'
60 Created wheel for MarkupSafe: filename=MarkupSafe-1.1.1-py3-none-any.whl size=12629 sha256=a03aedf2140733fd4e1c3d47f1ffa26335ddd2a349b60e108ceb84e48612998b
61 Stored in directory: /root/.cache/pip/wheels/0c/61/d6/4db4f4c28254856e82305fdb1f752ed7f8482e54c384d8cb0e
62 Building wheel for future (setup.py): started
63 Building wheel for future (setup.py): finished with status 'done'
64 Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491058 sha256=6d781ad44d7230964ab437b2036b2b382186d0083039960587eeb76d1eb3f93b
65 Stored in directory: /root/.cache/pip/wheels/8e/70/28/3d6ccd6e315f65f245da085482a2e1c7d14b90b30f239e2cf4
66 Building wheel for nltk (setup.py): started
67 Building wheel for nltk (setup.py): finished with status 'done'
68 Created wheel for nltk: filename=nltk-3.5-py3-none-any.whl size=1434676 sha256=1f4703ad06f227692f94fed6c4564f56f36b982f86a502fab5af0a2e290f57b6
69 Stored in directory: /root/.cache/pip/wheels/ff/d5/7b/f1fb4e1e1603b2f01c2424dd60fbcc50c12ef918bafc44b155
70 Building wheel for regex (setup.py): started
71 Building wheel for regex (setup.py): finished with status 'error'
72 ERROR: Command errored out with exit status 1:
73 command: /usr/local/bin/python -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-7xy4ih14/regex/setup.py'"'"'; __file__='"'"'/tmp/pip-install-7xy4ih14/regex/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d /tmp/pip-wheel-crjf4cl9
74 cwd: /tmp/pip-install-7xy4ih14/regex/
75 Complete output (17 lines):
76 running bdist_wheel
77 running build
78 running build_py
79 creating build
80 creating build/lib.linux-x86_64-3.8
81 creating build/lib.linux-x86_64-3.8/regex
82 copying regex_3/__init__.py -> build/lib.linux-x86_64-3.8/regex
83 copying regex_3/regex.py -> build/lib.linux-x86_64-3.8/regex
84 copying regex_3/_regex_core.py -> build/lib.linux-x86_64-3.8/regex
85 copying regex_3/test_regex.py -> build/lib.linux-x86_64-3.8/regex
86 running build_ext
87 building 'regex._regex' extension
88 creating build/temp.linux-x86_64-3.8
89 creating build/temp.linux-x86_64-3.8/regex_3
90 gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -DTHREAD_STACK_SIZE=0x100000 -fPIC -I/usr/local/include/python3.8 -c regex_3/_regex.c -o build/temp.linux-x86_64-3.8/regex_3/_regex.o
91 unable to execute 'gcc': No such file or directory
92 error: command 'gcc' failed with exit status 1
93 ----------------------------------------
94 ERROR: Failed building wheel for regex
95 Running setup.py clean for regex
96 Successfully built PyYAML tornado MarkupSafe future nltk
97 Failed to build regex
98 Installing collected packages: click, MarkupSafe, Jinja2, Markdown, PyYAML, tornado, six, livereload, future, joblib, regex, tqdm, nltk, lunr, mkdocs
99 Running setup.py install for regex: started
100 Running setup.py install for regex: finished with status 'error'
101 ERROR: Command errored out with exit status 1:
102 command: /usr/local/bin/python -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-7xy4ih14/regex/setup.py'"'"'; __file__='"'"'/tmp/pip-install-7xy4ih14/regex/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record /tmp/pip-record-begr9ft9/install-record.txt --single-version-externally-managed --compile --install-headers /usr/local/include/python3.8/regex
103 cwd: /tmp/pip-install-7xy4ih14/regex/
104 Complete output (17 lines):
105 running install
106 running build
107 running build_py
108 creating build
109 creating build/lib.linux-x86_64-3.8
110 creating build/lib.linux-x86_64-3.8/regex
111 copying regex_3/__init__.py -> build/lib.linux-x86_64-3.8/regex
112 copying regex_3/regex.py -> build/lib.linux-x86_64-3.8/regex
113 copying regex_3/_regex_core.py -> build/lib.linux-x86_64-3.8/regex
114 copying regex_3/test_regex.py -> build/lib.linux-x86_64-3.8/regex
115 running build_ext
116 building 'regex._regex' extension
117 creating build/temp.linux-x86_64-3.8
118 creating build/temp.linux-x86_64-3.8/regex_3
119 gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -DTHREAD_STACK_SIZE=0x100000 -fPIC -I/usr/local/include/python3.8 -c regex_3/_regex.c -o build/temp.linux-x86_64-3.8/regex_3/_regex.o
120 unable to execute 'gcc': No such file or directory
121 error: command 'gcc' failed with exit status 1
122 ----------------------------------------
123 ERROR: Command errored out with exit status 1: /usr/local/bin/python -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-7xy4ih14/regex/setup.py'"'"'; __file__='"'"'/tmp/pip-install-7xy4ih14/regex/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record /tmp/pip-record-begr9ft9/install-record.txt --single-version-externally-managed --compile --install-headers /usr/local/include/python3.8/regex Check the logs for full command output.
127 ERROR: Job failed: exit code 1
```
</details>
| [
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='https://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.10.1',\n 'livereload>=2.5.1',\n 'lunr[languages]==0.5.6', # must match lunr.js version included in search\n 'Markdown>=3.2.1',\n 'PyYAML>=3.10',\n 'tornado>=5.0'\n ],\n python_requires='>=3.5',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='https://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.10.1',\n 'livereload>=2.5.1',\n 'lunr[languages]==0.5.8', # must support lunr.js version included in search\n 'Markdown>=3.2.1',\n 'PyYAML>=3.10',\n 'tornado>=5.0'\n ],\n python_requires='>=3.5',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n",
"path": "setup.py"
}
] | diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md
index 5e7ef6777f..2b81e42552 100644
--- a/docs/about/release-notes.md
+++ b/docs/about/release-notes.md
@@ -23,10 +23,11 @@ The current and past members of the MkDocs team.
## Version 1.1.1 (in development)
-Bugfix: Ensure wheel is Python 3 only.
-Bugfix: Clean up `dev_addr` validation and disallow `0.0.0.0`.
-Add support for `min_search_length` parameter for search plugin (#2014).
-Bugfix: `readthedocs` theme `code` colors (#2027).
+* Bugfix: Use `lunr[languages]==0.5.8` to avoid `nltk` incompatibilities (#2062).
+* Bugfix: Ensure wheel is Python 3 only (#2021).
+* Bugfix: Clean up `dev_addr` validation and disallow `0.0.0.0` (#2022).
+* Add support for `min_search_length` parameter for search plugin (#2014).
+* Bugfix: `readthedocs` theme `code` colors (#2027).
## Version 1.1 (2020-02-22)
diff --git a/setup.py b/setup.py
index f5db42dd24..2784814ec3 100755
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@ def get_packages(package):
'click>=3.3',
'Jinja2>=2.10.1',
'livereload>=2.5.1',
- 'lunr[languages]==0.5.6', # must match lunr.js version included in search
+ 'lunr[languages]==0.5.8', # must support lunr.js version included in search
'Markdown>=3.2.1',
'PyYAML>=3.10',
'tornado>=5.0'
|
MongoEngine__mongoengine-2431 | Version restriction on pillow
Do we still need to restrict pillow to less then 7.0.0? This looks to have been implemented because of python2 support, which mongoengine dropped with version 0.20.0
https://github.com/MongoEngine/mongoengine/blob/277b827d4dab4630145bc747fdab0df48a045273/setup.py#L118
| [
{
"content": "import os\nimport sys\n\nfrom pkg_resources import normalize_path\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n# Hack to silence atexit traceback in newer python versions\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\nDESCRIPTION = \"MongoEngine is a Python Object-Document Mapper for working with MongoDB.\"\n\ntry:\n with open(\"README.rst\") as fin:\n LONG_DESCRIPTION = fin.read()\nexcept Exception:\n LONG_DESCRIPTION = None\n\n\ndef get_version(version_tuple):\n \"\"\"Return the version tuple as a string, e.g. for (0, 10, 7),\n return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, version_tuple))\n\n\nclass PyTest(TestCommand):\n \"\"\"Will force pytest to search for tests inside the build directory\n for 2to3 converted code (used by tox), instead of the current directory.\n Required as long as we need 2to3\n\n Known Limitation: https://tox.readthedocs.io/en/latest/example/pytest.html#known-issues-and-limitations\n Source: https://www.hackzine.org/python-testing-with-pytest-and-2to3-plus-tox-and-travis-ci.html\n \"\"\"\n\n # https://pytest.readthedocs.io/en/2.7.3/goodpractises.html#integration-with-setuptools-test-commands\n # Allows to provide pytest command argument through the test runner command `python setup.py test`\n # e.g: `python setup.py test -a \"-k=test\"`\n # This only works for 1 argument though\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = [\"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n from pkg_resources import _namespace_packages\n import pytest\n\n # Purge modules under test from sys.modules. The test loader will\n # re-import them from the build location. Required when 2to3 is used\n # with namespace packages.\n if sys.version_info >= (3,) and getattr(self.distribution, \"use_2to3\", False):\n module = self.test_args[-1].split(\".\")[0]\n if module in _namespace_packages:\n del_modules = []\n if module in sys.modules:\n del_modules.append(module)\n module += \".\"\n for name in sys.modules:\n if name.startswith(module):\n del_modules.append(name)\n map(sys.modules.__delitem__, del_modules)\n\n # Run on the build directory for 2to3-built code\n # This will prevent the old 2.x code from being found\n # by py.test discovery mechanism, that apparently\n # ignores sys.path..\n ei_cmd = self.get_finalized_command(\"egg_info\")\n self.test_args = [normalize_path(ei_cmd.egg_base)]\n\n cmd_args = self.test_args + ([self.pytest_args] if self.pytest_args else [])\n errno = pytest.main(cmd_args)\n sys.exit(errno)\n\n\n# Dirty hack to get version number from monogengine/__init__.py - we can't\n# import it as it depends on PyMongo and PyMongo isn't installed until this\n# file is read\ninit = os.path.join(os.path.dirname(__file__), \"mongoengine\", \"__init__.py\")\nversion_line = list(filter(lambda l: l.startswith(\"VERSION\"), open(init)))[0]\n\nVERSION = get_version(eval(version_line.split(\"=\")[-1]))\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Database\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nextra_opts = {\n \"packages\": find_packages(exclude=[\"tests\", \"tests.*\"]),\n \"tests_require\": [\n \"pytest<5.0\",\n \"pytest-cov\",\n \"coverage<5.0\", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls\n \"blinker\",\n \"Pillow>=2.0.0, <7.0.0\", # 7.0.0 dropped Python2 support\n ],\n}\n\nif \"test\" in sys.argv:\n extra_opts[\"packages\"] = find_packages()\n extra_opts[\"package_data\"] = {\n \"tests\": [\"fields/mongoengine.png\", \"fields/mongodb_leaf.png\"]\n }\n\nsetup(\n name=\"mongoengine\",\n version=VERSION,\n author=\"Harry Marr\",\n author_email=\"[email protected]\",\n maintainer=\"Stefan Wojcik\",\n maintainer_email=\"[email protected]\",\n url=\"http://mongoengine.org/\",\n download_url=\"https://github.com/MongoEngine/mongoengine/tarball/master\",\n license=\"MIT\",\n include_package_data=True,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n platforms=[\"any\"],\n classifiers=CLASSIFIERS,\n python_requires=\">=3.5\",\n install_requires=[\"pymongo>=3.4, <4.0\"],\n cmdclass={\"test\": PyTest},\n **extra_opts\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nimport sys\n\nfrom pkg_resources import normalize_path\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n# Hack to silence atexit traceback in newer python versions\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\nDESCRIPTION = \"MongoEngine is a Python Object-Document Mapper for working with MongoDB.\"\n\ntry:\n with open(\"README.rst\") as fin:\n LONG_DESCRIPTION = fin.read()\nexcept Exception:\n LONG_DESCRIPTION = None\n\n\ndef get_version(version_tuple):\n \"\"\"Return the version tuple as a string, e.g. for (0, 10, 7),\n return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, version_tuple))\n\n\nclass PyTest(TestCommand):\n \"\"\"Will force pytest to search for tests inside the build directory\n for 2to3 converted code (used by tox), instead of the current directory.\n Required as long as we need 2to3\n\n Known Limitation: https://tox.readthedocs.io/en/latest/example/pytest.html#known-issues-and-limitations\n Source: https://www.hackzine.org/python-testing-with-pytest-and-2to3-plus-tox-and-travis-ci.html\n \"\"\"\n\n # https://pytest.readthedocs.io/en/2.7.3/goodpractises.html#integration-with-setuptools-test-commands\n # Allows to provide pytest command argument through the test runner command `python setup.py test`\n # e.g: `python setup.py test -a \"-k=test\"`\n # This only works for 1 argument though\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = [\"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n from pkg_resources import _namespace_packages\n import pytest\n\n # Purge modules under test from sys.modules. The test loader will\n # re-import them from the build location. Required when 2to3 is used\n # with namespace packages.\n if sys.version_info >= (3,) and getattr(self.distribution, \"use_2to3\", False):\n module = self.test_args[-1].split(\".\")[0]\n if module in _namespace_packages:\n del_modules = []\n if module in sys.modules:\n del_modules.append(module)\n module += \".\"\n for name in sys.modules:\n if name.startswith(module):\n del_modules.append(name)\n map(sys.modules.__delitem__, del_modules)\n\n # Run on the build directory for 2to3-built code\n # This will prevent the old 2.x code from being found\n # by py.test discovery mechanism, that apparently\n # ignores sys.path..\n ei_cmd = self.get_finalized_command(\"egg_info\")\n self.test_args = [normalize_path(ei_cmd.egg_base)]\n\n cmd_args = self.test_args + ([self.pytest_args] if self.pytest_args else [])\n errno = pytest.main(cmd_args)\n sys.exit(errno)\n\n\n# Dirty hack to get version number from monogengine/__init__.py - we can't\n# import it as it depends on PyMongo and PyMongo isn't installed until this\n# file is read\ninit = os.path.join(os.path.dirname(__file__), \"mongoengine\", \"__init__.py\")\nversion_line = list(filter(lambda l: l.startswith(\"VERSION\"), open(init)))[0]\n\nVERSION = get_version(eval(version_line.split(\"=\")[-1]))\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Database\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nextra_opts = {\n \"packages\": find_packages(exclude=[\"tests\", \"tests.*\"]),\n \"tests_require\": [\n \"pytest<5.0\",\n \"pytest-cov\",\n \"coverage<5.0\", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls\n \"blinker\",\n \"Pillow>=7.0.0\",\n ],\n}\n\nif \"test\" in sys.argv:\n extra_opts[\"packages\"] = find_packages()\n extra_opts[\"package_data\"] = {\n \"tests\": [\"fields/mongoengine.png\", \"fields/mongodb_leaf.png\"]\n }\n\nsetup(\n name=\"mongoengine\",\n version=VERSION,\n author=\"Harry Marr\",\n author_email=\"[email protected]\",\n maintainer=\"Stefan Wojcik\",\n maintainer_email=\"[email protected]\",\n url=\"http://mongoengine.org/\",\n download_url=\"https://github.com/MongoEngine/mongoengine/tarball/master\",\n license=\"MIT\",\n include_package_data=True,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n platforms=[\"any\"],\n classifiers=CLASSIFIERS,\n python_requires=\">=3.5\",\n install_requires=[\"pymongo>=3.4, <4.0\"],\n cmdclass={\"test\": PyTest},\n **extra_opts\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 393de9c72..80819b130 100644
--- a/setup.py
+++ b/setup.py
@@ -115,7 +115,7 @@ def run_tests(self):
"pytest-cov",
"coverage<5.0", # recent coverage switched to sqlite format for the .coverage file which isn't handled properly by coveralls
"blinker",
- "Pillow>=2.0.0, <7.0.0", # 7.0.0 dropped Python2 support
+ "Pillow>=7.0.0",
],
}
diff --git a/tests/fields/test_file_field.py b/tests/fields/test_file_field.py
index de10c9870..4f3f1d45d 100644
--- a/tests/fields/test_file_field.py
+++ b/tests/fields/test_file_field.py
@@ -429,7 +429,7 @@ class TestFile(Document):
@require_pil
def test_image_field_resize(self):
class TestImage(Document):
- image = ImageField(size=(185, 37))
+ image = ImageField(size=(185, 37, True))
TestImage.drop_collection()
@@ -471,7 +471,7 @@ class TestImage(Document):
@require_pil
def test_image_field_thumbnail(self):
class TestImage(Document):
- image = ImageField(thumbnail_size=(92, 18))
+ image = ImageField(thumbnail_size=(92, 18, True))
TestImage.drop_collection()
|
pallets__werkzeug-1415 | ProxyFix may trust wrong number of values for X-Forwarded-Prefix
Due to a typo, `werkzeug.contrib.fixers.ProxyFix` gets the number of values to trust for `X-Forwarded-Prefix` from `x_for`, rather than from `x_prefix`.
```diff
diff --git a/werkzeug/contrib/fixers.py b/werkzeug/contrib/fixers.py
index 9a304405..af229cef 100644
--- a/werkzeug/contrib/fixers.py
+++ b/werkzeug/contrib/fixers.py
@@ -269,7 +269,7 @@ class ProxyFix(object):
environ['SERVER_PORT'] = x_port
x_prefix = self._get_trusted_comma(
- self.x_for, environ_get('HTTP_X_FORWARDED_PREFIX'))
+ self.x_prefix, environ_get('HTTP_X_FORWARDED_PREFIX'))
if x_prefix:
environ['SCRIPT_NAME'] = x_prefix
```
ProxyFix may trust wrong number of values for X-Forwarded-Prefix
Due to a typo, `werkzeug.contrib.fixers.ProxyFix` gets the number of values to trust for `X-Forwarded-Prefix` from `x_for`, rather than from `x_prefix`.
```diff
diff --git a/werkzeug/contrib/fixers.py b/werkzeug/contrib/fixers.py
index 9a304405..af229cef 100644
--- a/werkzeug/contrib/fixers.py
+++ b/werkzeug/contrib/fixers.py
@@ -269,7 +269,7 @@ class ProxyFix(object):
environ['SERVER_PORT'] = x_port
x_prefix = self._get_trusted_comma(
- self.x_for, environ_get('HTTP_X_FORWARDED_PREFIX'))
+ self.x_prefix, environ_get('HTTP_X_FORWARDED_PREFIX'))
if x_prefix:
environ['SCRIPT_NAME'] = x_prefix
```
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.contrib.fixers\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n .. versionadded:: 0.5\n\n This module includes various helpers that fix bugs in web servers. They may\n be necessary for some versions of a buggy web server but not others. We try\n to stay updated with the status of the bugs as good as possible but you have\n to make sure whether they fix the problem you encounter.\n\n If you notice bugs in webservers not fixed in this module consider\n contributing a patch.\n\n :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport warnings\n\nfrom werkzeug.contrib import WerkzeugContribDeprecationWarning\n\ntry:\n from urllib import unquote\nexcept ImportError:\n from urllib.parse import unquote\n\nfrom werkzeug.http import parse_options_header, parse_cache_control_header, \\\n parse_set_header\nfrom werkzeug.useragents import UserAgent\nfrom werkzeug.datastructures import Headers, ResponseCacheControl\n\n\nclass CGIRootFix(object):\n \"\"\"Wrap the application in this middleware if you are using FastCGI\n or CGI and you have problems with your app root being set to the CGI\n script's path instead of the path users are going to visit.\n\n .. versionchanged:: 0.9\n Added `app_root` parameter and renamed from\n ``LighttpdCGIRootFix``.\n\n :param app: the WSGI application\n :param app_root: Defaulting to ``'/'``, you can set this to\n something else if your app is mounted somewhere else.\n \"\"\"\n\n def __init__(self, app, app_root='/'):\n self.app = app\n self.app_root = app_root.strip(\"/\")\n\n def __call__(self, environ, start_response):\n environ['SCRIPT_NAME'] = self.app_root\n return self.app(environ, start_response)\n\n\nclass LighttpdCGIRootFix(CGIRootFix):\n def __init__(self, *args, **kwargs):\n warnings.warn(\n \"LighttpdCGIRootFix is renamed CGIRootFix and will be\"\n \" removed in 1.0.\",\n WerkzeugContribDeprecationWarning,\n stacklevel=3,\n )\n super(LighttpdCGIRootFix, self).__init__(*args, **kwargs)\n\n\nclass PathInfoFromRequestUriFix(object):\n\n \"\"\"On windows environment variables are limited to the system charset\n which makes it impossible to store the `PATH_INFO` variable in the\n environment without loss of information on some systems.\n\n This is for example a problem for CGI scripts on a Windows Apache.\n\n This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,\n `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the\n fix can only be applied if the webserver supports either of these\n variables.\n\n :param app: the WSGI application\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':\n if key not in environ:\n continue\n request_uri = unquote(environ[key])\n script_name = unquote(environ.get('SCRIPT_NAME', ''))\n if request_uri.startswith(script_name):\n environ['PATH_INFO'] = request_uri[len(script_name):] \\\n .split('?', 1)[0]\n break\n return self.app(environ, start_response)\n\n\nclass ProxyFix(object):\n \"\"\"Adjust the WSGI environ based on ``Forwarded`` headers that\n proxies in front of the application may set.\n\n When the application is running behind a server like Nginx (or\n another server or proxy), WSGI will see the request as coming from\n that server rather than the real client. Proxies set various headers\n to track where the request actually came from.\n\n This middleware should only be applied if the application is\n actually behind such a proxy, and should be configured with the\n number of proxies that are chained in front of it. Not all proxies\n set all the headers. Since incoming headers can be faked, you must\n set how many proxies are setting each header so the middleware knows\n what to trust.\n\n The original values of the headers are stored in the WSGI\n environ as ``werkzeug.proxy_fix.orig``, a dict.\n\n :param app: The WSGI application.\n :param x_for: Number of values to trust for ``X-Forwarded-For``.\n :param x_proto: Number of values to trust for ``X-Forwarded-Proto``.\n :param x_host: Number of values to trust for ``X-Forwarded-Host``.\n :param x_port: Number of values to trust for ``X-Forwarded-Port``.\n :param x_prefix: Number of values to trust for\n ``X-Forwarded-Prefix``.\n :param num_proxies: Deprecated, use ``x_for`` instead.\n\n .. versionchanged:: 0.15\n Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.\n\n .. versionchanged:: 0.15\n All headers support multiple values. The ``num_proxies``\n argument is deprecated. Each header is configured with a\n separate number of trusted proxies.\n\n .. versionchanged:: 0.15\n Original WSGI environ values are stored in the\n ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,\n ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated.\n\n .. versionchanged:: 0.15\n ``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify\n ``SERVER_NAME`` and ``SERVER_PORT``.\n \"\"\"\n\n def __init__(\n self, app, num_proxies=None,\n x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0\n ):\n self.app = app\n self.x_for = x_for\n self.x_proto = x_proto\n self.x_host = x_host\n self.x_port = x_port\n self.x_prefix = x_prefix\n self.num_proxies = num_proxies\n\n @property\n def num_proxies(self):\n \"\"\"The number of proxies setting ``X-Forwarded-For`` in front\n of the application.\n\n .. deprecated:: 0.15\n A separate number of trusted proxies is configured for each\n header. ``num_proxies`` maps to ``x_for``.\n\n :internal:\n \"\"\"\n warnings.warn(DeprecationWarning(\n \"num_proxies is deprecated. Use x_for instead.\"))\n return self.x_for\n\n @num_proxies.setter\n def num_proxies(self, value):\n if value is not None:\n warnings.warn(DeprecationWarning(\n 'num_proxies is deprecated. Use x_for instead.'))\n self.x_for = value\n\n def get_remote_addr(self, forwarded_for):\n \"\"\"Get the real ``remote_addr`` by looking backwards ``x_for``\n number of values in the ``X-Forwarded-For`` header.\n\n :param forwarded_for: List of values parsed from the\n ``X-Forwarded-For`` header.\n :return: The real ``remote_addr``, or ``None`` if there were not\n at least ``x_for`` values.\n\n .. deprecated:: 0.15\n This is handled internally for each header.\n\n .. versionchanged:: 0.9\n Use ``num_proxies`` instead of always picking the first\n value.\n\n .. versionadded:: 0.8\n \"\"\"\n warnings.warn(DeprecationWarning(\"get_remote_addr is deprecated.\"))\n return self._get_trusted_comma(self.x_for, ','.join(forwarded_for))\n\n def _get_trusted_comma(self, trusted, value):\n \"\"\"Get the real value from a comma-separated header based on the\n configured number of trusted proxies.\n\n :param trusted: Number of values to trust in the header.\n :param value: Header value to parse.\n :return: The real value, or ``None`` if there are fewer values\n than the number of trusted proxies.\n\n .. versionadded:: 0.15\n \"\"\"\n if not (trusted and value):\n return\n values = [x.strip() for x in value.split(',')]\n if len(values) >= trusted:\n return values[-trusted]\n\n def __call__(self, environ, start_response):\n \"\"\"Modify the WSGI environ based on the various ``Forwarded``\n headers before calling the wrapped application. Store the\n original environ values in ``werkzeug.proxy_fix.orig_{key}``.\n \"\"\"\n environ_get = environ.get\n orig_remote_addr = environ_get('REMOTE_ADDR')\n orig_wsgi_url_scheme = environ_get('wsgi.url_scheme')\n orig_http_host = environ_get('HTTP_HOST')\n environ.update({\n 'werkzeug.proxy_fix.orig': {\n 'REMOTE_ADDR': orig_remote_addr,\n 'wsgi.url_scheme': orig_wsgi_url_scheme,\n 'HTTP_HOST': orig_http_host,\n 'SERVER_NAME': environ_get('SERVER_NAME'),\n 'SERVER_PORT': environ_get('SERVER_PORT'),\n 'SCRIPT_NAME': environ_get('SCRIPT_NAME'),\n },\n # todo: remove deprecated keys\n 'werkzeug.proxy_fix.orig_remote_addr': orig_remote_addr,\n 'werkzeug.proxy_fix.orig_wsgi_url_scheme': orig_wsgi_url_scheme,\n 'werkzeug.proxy_fix.orig_http_host': orig_http_host,\n })\n\n x_for = self._get_trusted_comma(\n self.x_for, environ_get('HTTP_X_FORWARDED_FOR'))\n if x_for:\n environ['REMOTE_ADDR'] = x_for\n\n x_proto = self._get_trusted_comma(\n self.x_proto, environ_get('HTTP_X_FORWARDED_PROTO'))\n if x_proto:\n environ['wsgi.url_scheme'] = x_proto\n\n x_host = self._get_trusted_comma(\n self.x_host, environ_get('HTTP_X_FORWARDED_HOST'))\n if x_host:\n environ['HTTP_HOST'] = x_host\n parts = x_host.split(':', 1)\n environ['SERVER_NAME'] = parts[0]\n if len(parts) == 2:\n environ['SERVER_PORT'] = parts[1]\n\n x_port = self._get_trusted_comma(\n self.x_port, environ_get('HTTP_X_FORWARDED_PORT'))\n if x_port:\n host = environ.get('HTTP_HOST')\n if host:\n parts = host.split(':', 1)\n host = parts[0] if len(parts) == 2 else host\n environ['HTTP_HOST'] = '%s:%s' % (host, x_port)\n environ['SERVER_PORT'] = x_port\n\n x_prefix = self._get_trusted_comma(\n self.x_for, environ_get('HTTP_X_FORWARDED_PREFIX'))\n if x_prefix:\n environ['SCRIPT_NAME'] = x_prefix\n\n return self.app(environ, start_response)\n\n\nclass HeaderRewriterFix(object):\n\n \"\"\"This middleware can remove response headers and add others. This\n is for example useful to remove the `Date` header from responses if you\n are using a server that adds that header, no matter if it's present or\n not or to add `X-Powered-By` headers::\n\n app = HeaderRewriterFix(app, remove_headers=['Date'],\n add_headers=[('X-Powered-By', 'WSGI')])\n\n :param app: the WSGI application\n :param remove_headers: a sequence of header keys that should be\n removed.\n :param add_headers: a sequence of ``(key, value)`` tuples that should\n be added.\n \"\"\"\n\n def __init__(self, app, remove_headers=None, add_headers=None):\n self.app = app\n self.remove_headers = set(x.lower() for x in (remove_headers or ()))\n self.add_headers = list(add_headers or ())\n\n def __call__(self, environ, start_response):\n def rewriting_start_response(status, headers, exc_info=None):\n new_headers = []\n for key, value in headers:\n if key.lower() not in self.remove_headers:\n new_headers.append((key, value))\n new_headers += self.add_headers\n return start_response(status, new_headers, exc_info)\n return self.app(environ, rewriting_start_response)\n\n\nclass InternetExplorerFix(object):\n\n \"\"\"This middleware fixes a couple of bugs with Microsoft Internet\n Explorer. Currently the following fixes are applied:\n\n - removing of `Vary` headers for unsupported mimetypes which\n causes troubles with caching. Can be disabled by passing\n ``fix_vary=False`` to the constructor.\n see: http://support.microsoft.com/kb/824847/en-us\n\n - removes offending headers to work around caching bugs in\n Internet Explorer if `Content-Disposition` is set. Can be\n disabled by passing ``fix_attach=False`` to the constructor.\n\n If it does not detect affected Internet Explorer versions it won't touch\n the request / response.\n \"\"\"\n\n # This code was inspired by Django fixers for the same bugs. The\n # fix_vary and fix_attach fixers were originally implemented in Django\n # by Michael Axiak and is available as part of the Django project:\n # https://code.djangoproject.com/ticket/4148\n\n def __init__(self, app, fix_vary=True, fix_attach=True):\n self.app = app\n self.fix_vary = fix_vary\n self.fix_attach = fix_attach\n\n def fix_headers(self, environ, headers, status=None):\n if self.fix_vary:\n header = headers.get('content-type', '')\n mimetype, options = parse_options_header(header)\n if mimetype not in ('text/html', 'text/plain', 'text/sgml'):\n headers.pop('vary', None)\n\n if self.fix_attach and 'content-disposition' in headers:\n pragma = parse_set_header(headers.get('pragma', ''))\n pragma.discard('no-cache')\n header = pragma.to_header()\n if not header:\n headers.pop('pragma', '')\n else:\n headers['Pragma'] = header\n header = headers.get('cache-control', '')\n if header:\n cc = parse_cache_control_header(header,\n cls=ResponseCacheControl)\n cc.no_cache = None\n cc.no_store = False\n header = cc.to_header()\n if not header:\n headers.pop('cache-control', '')\n else:\n headers['Cache-Control'] = header\n\n def run_fixed(self, environ, start_response):\n def fixing_start_response(status, headers, exc_info=None):\n headers = Headers(headers)\n self.fix_headers(environ, headers, status)\n return start_response(status, headers.to_wsgi_list(), exc_info)\n return self.app(environ, fixing_start_response)\n\n def __call__(self, environ, start_response):\n ua = UserAgent(environ)\n if ua.browser != 'msie':\n return self.app(environ, start_response)\n return self.run_fixed(environ, start_response)\n",
"path": "werkzeug/contrib/fixers.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.contrib.fixers\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n .. versionadded:: 0.5\n\n This module includes various helpers that fix bugs in web servers. They may\n be necessary for some versions of a buggy web server but not others. We try\n to stay updated with the status of the bugs as good as possible but you have\n to make sure whether they fix the problem you encounter.\n\n If you notice bugs in webservers not fixed in this module consider\n contributing a patch.\n\n :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport warnings\n\nfrom werkzeug.contrib import WerkzeugContribDeprecationWarning\n\ntry:\n from urllib import unquote\nexcept ImportError:\n from urllib.parse import unquote\n\nfrom werkzeug.http import parse_options_header, parse_cache_control_header, \\\n parse_set_header\nfrom werkzeug.useragents import UserAgent\nfrom werkzeug.datastructures import Headers, ResponseCacheControl\n\n\nclass CGIRootFix(object):\n \"\"\"Wrap the application in this middleware if you are using FastCGI\n or CGI and you have problems with your app root being set to the CGI\n script's path instead of the path users are going to visit.\n\n .. versionchanged:: 0.9\n Added `app_root` parameter and renamed from\n ``LighttpdCGIRootFix``.\n\n :param app: the WSGI application\n :param app_root: Defaulting to ``'/'``, you can set this to\n something else if your app is mounted somewhere else.\n \"\"\"\n\n def __init__(self, app, app_root='/'):\n self.app = app\n self.app_root = app_root.strip(\"/\")\n\n def __call__(self, environ, start_response):\n environ['SCRIPT_NAME'] = self.app_root\n return self.app(environ, start_response)\n\n\nclass LighttpdCGIRootFix(CGIRootFix):\n def __init__(self, *args, **kwargs):\n warnings.warn(\n \"LighttpdCGIRootFix is renamed CGIRootFix and will be\"\n \" removed in 1.0.\",\n WerkzeugContribDeprecationWarning,\n stacklevel=3,\n )\n super(LighttpdCGIRootFix, self).__init__(*args, **kwargs)\n\n\nclass PathInfoFromRequestUriFix(object):\n\n \"\"\"On windows environment variables are limited to the system charset\n which makes it impossible to store the `PATH_INFO` variable in the\n environment without loss of information on some systems.\n\n This is for example a problem for CGI scripts on a Windows Apache.\n\n This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,\n `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the\n fix can only be applied if the webserver supports either of these\n variables.\n\n :param app: the WSGI application\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':\n if key not in environ:\n continue\n request_uri = unquote(environ[key])\n script_name = unquote(environ.get('SCRIPT_NAME', ''))\n if request_uri.startswith(script_name):\n environ['PATH_INFO'] = request_uri[len(script_name):] \\\n .split('?', 1)[0]\n break\n return self.app(environ, start_response)\n\n\nclass ProxyFix(object):\n \"\"\"Adjust the WSGI environ based on ``Forwarded`` headers that\n proxies in front of the application may set.\n\n When the application is running behind a server like Nginx (or\n another server or proxy), WSGI will see the request as coming from\n that server rather than the real client. Proxies set various headers\n to track where the request actually came from.\n\n This middleware should only be applied if the application is\n actually behind such a proxy, and should be configured with the\n number of proxies that are chained in front of it. Not all proxies\n set all the headers. Since incoming headers can be faked, you must\n set how many proxies are setting each header so the middleware knows\n what to trust.\n\n The original values of the headers are stored in the WSGI\n environ as ``werkzeug.proxy_fix.orig``, a dict.\n\n :param app: The WSGI application.\n :param x_for: Number of values to trust for ``X-Forwarded-For``.\n :param x_proto: Number of values to trust for ``X-Forwarded-Proto``.\n :param x_host: Number of values to trust for ``X-Forwarded-Host``.\n :param x_port: Number of values to trust for ``X-Forwarded-Port``.\n :param x_prefix: Number of values to trust for\n ``X-Forwarded-Prefix``.\n :param num_proxies: Deprecated, use ``x_for`` instead.\n\n .. versionchanged:: 0.15\n Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.\n\n .. versionchanged:: 0.15\n All headers support multiple values. The ``num_proxies``\n argument is deprecated. Each header is configured with a\n separate number of trusted proxies.\n\n .. versionchanged:: 0.15\n Original WSGI environ values are stored in the\n ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,\n ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated.\n\n .. versionchanged:: 0.15\n ``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify\n ``SERVER_NAME`` and ``SERVER_PORT``.\n \"\"\"\n\n def __init__(\n self, app, num_proxies=None,\n x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0\n ):\n self.app = app\n self.x_for = x_for\n self.x_proto = x_proto\n self.x_host = x_host\n self.x_port = x_port\n self.x_prefix = x_prefix\n self.num_proxies = num_proxies\n\n @property\n def num_proxies(self):\n \"\"\"The number of proxies setting ``X-Forwarded-For`` in front\n of the application.\n\n .. deprecated:: 0.15\n A separate number of trusted proxies is configured for each\n header. ``num_proxies`` maps to ``x_for``.\n\n :internal:\n \"\"\"\n warnings.warn(DeprecationWarning(\n \"num_proxies is deprecated. Use x_for instead.\"))\n return self.x_for\n\n @num_proxies.setter\n def num_proxies(self, value):\n if value is not None:\n warnings.warn(DeprecationWarning(\n 'num_proxies is deprecated. Use x_for instead.'))\n self.x_for = value\n\n def get_remote_addr(self, forwarded_for):\n \"\"\"Get the real ``remote_addr`` by looking backwards ``x_for``\n number of values in the ``X-Forwarded-For`` header.\n\n :param forwarded_for: List of values parsed from the\n ``X-Forwarded-For`` header.\n :return: The real ``remote_addr``, or ``None`` if there were not\n at least ``x_for`` values.\n\n .. deprecated:: 0.15\n This is handled internally for each header.\n\n .. versionchanged:: 0.9\n Use ``num_proxies`` instead of always picking the first\n value.\n\n .. versionadded:: 0.8\n \"\"\"\n warnings.warn(DeprecationWarning(\"get_remote_addr is deprecated.\"))\n return self._get_trusted_comma(self.x_for, ','.join(forwarded_for))\n\n def _get_trusted_comma(self, trusted, value):\n \"\"\"Get the real value from a comma-separated header based on the\n configured number of trusted proxies.\n\n :param trusted: Number of values to trust in the header.\n :param value: Header value to parse.\n :return: The real value, or ``None`` if there are fewer values\n than the number of trusted proxies.\n\n .. versionadded:: 0.15\n \"\"\"\n if not (trusted and value):\n return\n values = [x.strip() for x in value.split(',')]\n if len(values) >= trusted:\n return values[-trusted]\n\n def __call__(self, environ, start_response):\n \"\"\"Modify the WSGI environ based on the various ``Forwarded``\n headers before calling the wrapped application. Store the\n original environ values in ``werkzeug.proxy_fix.orig_{key}``.\n \"\"\"\n environ_get = environ.get\n orig_remote_addr = environ_get('REMOTE_ADDR')\n orig_wsgi_url_scheme = environ_get('wsgi.url_scheme')\n orig_http_host = environ_get('HTTP_HOST')\n environ.update({\n 'werkzeug.proxy_fix.orig': {\n 'REMOTE_ADDR': orig_remote_addr,\n 'wsgi.url_scheme': orig_wsgi_url_scheme,\n 'HTTP_HOST': orig_http_host,\n 'SERVER_NAME': environ_get('SERVER_NAME'),\n 'SERVER_PORT': environ_get('SERVER_PORT'),\n 'SCRIPT_NAME': environ_get('SCRIPT_NAME'),\n },\n # todo: remove deprecated keys\n 'werkzeug.proxy_fix.orig_remote_addr': orig_remote_addr,\n 'werkzeug.proxy_fix.orig_wsgi_url_scheme': orig_wsgi_url_scheme,\n 'werkzeug.proxy_fix.orig_http_host': orig_http_host,\n })\n\n x_for = self._get_trusted_comma(\n self.x_for, environ_get('HTTP_X_FORWARDED_FOR'))\n if x_for:\n environ['REMOTE_ADDR'] = x_for\n\n x_proto = self._get_trusted_comma(\n self.x_proto, environ_get('HTTP_X_FORWARDED_PROTO'))\n if x_proto:\n environ['wsgi.url_scheme'] = x_proto\n\n x_host = self._get_trusted_comma(\n self.x_host, environ_get('HTTP_X_FORWARDED_HOST'))\n if x_host:\n environ['HTTP_HOST'] = x_host\n parts = x_host.split(':', 1)\n environ['SERVER_NAME'] = parts[0]\n if len(parts) == 2:\n environ['SERVER_PORT'] = parts[1]\n\n x_port = self._get_trusted_comma(\n self.x_port, environ_get('HTTP_X_FORWARDED_PORT'))\n if x_port:\n host = environ.get('HTTP_HOST')\n if host:\n parts = host.split(':', 1)\n host = parts[0] if len(parts) == 2 else host\n environ['HTTP_HOST'] = '%s:%s' % (host, x_port)\n environ['SERVER_PORT'] = x_port\n\n x_prefix = self._get_trusted_comma(\n self.x_prefix, environ_get('HTTP_X_FORWARDED_PREFIX'))\n if x_prefix:\n environ['SCRIPT_NAME'] = x_prefix\n\n return self.app(environ, start_response)\n\n\nclass HeaderRewriterFix(object):\n\n \"\"\"This middleware can remove response headers and add others. This\n is for example useful to remove the `Date` header from responses if you\n are using a server that adds that header, no matter if it's present or\n not or to add `X-Powered-By` headers::\n\n app = HeaderRewriterFix(app, remove_headers=['Date'],\n add_headers=[('X-Powered-By', 'WSGI')])\n\n :param app: the WSGI application\n :param remove_headers: a sequence of header keys that should be\n removed.\n :param add_headers: a sequence of ``(key, value)`` tuples that should\n be added.\n \"\"\"\n\n def __init__(self, app, remove_headers=None, add_headers=None):\n self.app = app\n self.remove_headers = set(x.lower() for x in (remove_headers or ()))\n self.add_headers = list(add_headers or ())\n\n def __call__(self, environ, start_response):\n def rewriting_start_response(status, headers, exc_info=None):\n new_headers = []\n for key, value in headers:\n if key.lower() not in self.remove_headers:\n new_headers.append((key, value))\n new_headers += self.add_headers\n return start_response(status, new_headers, exc_info)\n return self.app(environ, rewriting_start_response)\n\n\nclass InternetExplorerFix(object):\n\n \"\"\"This middleware fixes a couple of bugs with Microsoft Internet\n Explorer. Currently the following fixes are applied:\n\n - removing of `Vary` headers for unsupported mimetypes which\n causes troubles with caching. Can be disabled by passing\n ``fix_vary=False`` to the constructor.\n see: http://support.microsoft.com/kb/824847/en-us\n\n - removes offending headers to work around caching bugs in\n Internet Explorer if `Content-Disposition` is set. Can be\n disabled by passing ``fix_attach=False`` to the constructor.\n\n If it does not detect affected Internet Explorer versions it won't touch\n the request / response.\n \"\"\"\n\n # This code was inspired by Django fixers for the same bugs. The\n # fix_vary and fix_attach fixers were originally implemented in Django\n # by Michael Axiak and is available as part of the Django project:\n # https://code.djangoproject.com/ticket/4148\n\n def __init__(self, app, fix_vary=True, fix_attach=True):\n self.app = app\n self.fix_vary = fix_vary\n self.fix_attach = fix_attach\n\n def fix_headers(self, environ, headers, status=None):\n if self.fix_vary:\n header = headers.get('content-type', '')\n mimetype, options = parse_options_header(header)\n if mimetype not in ('text/html', 'text/plain', 'text/sgml'):\n headers.pop('vary', None)\n\n if self.fix_attach and 'content-disposition' in headers:\n pragma = parse_set_header(headers.get('pragma', ''))\n pragma.discard('no-cache')\n header = pragma.to_header()\n if not header:\n headers.pop('pragma', '')\n else:\n headers['Pragma'] = header\n header = headers.get('cache-control', '')\n if header:\n cc = parse_cache_control_header(header,\n cls=ResponseCacheControl)\n cc.no_cache = None\n cc.no_store = False\n header = cc.to_header()\n if not header:\n headers.pop('cache-control', '')\n else:\n headers['Cache-Control'] = header\n\n def run_fixed(self, environ, start_response):\n def fixing_start_response(status, headers, exc_info=None):\n headers = Headers(headers)\n self.fix_headers(environ, headers, status)\n return start_response(status, headers.to_wsgi_list(), exc_info)\n return self.app(environ, fixing_start_response)\n\n def __call__(self, environ, start_response):\n ua = UserAgent(environ)\n if ua.browser != 'msie':\n return self.app(environ, start_response)\n return self.run_fixed(environ, start_response)\n",
"path": "werkzeug/contrib/fixers.py"
}
] | diff --git a/tests/contrib/test_fixers.py b/tests/contrib/test_fixers.py
index 70f0ff306..f722a90d0 100644
--- a/tests/contrib/test_fixers.py
+++ b/tests/contrib/test_fixers.py
@@ -121,7 +121,13 @@ def test_path_info_from_request_uri_fix(self):
'REMOTE_ADDR': '192.168.0.1',
'HTTP_HOST': 'spam',
'HTTP_X_FORWARDED_FOR': ', 192.168.0.3'
- }, 'http://spam/', id='ignore empty')
+ }, 'http://spam/', id='ignore empty'),
+ pytest.param({'x_for': 2, 'x_prefix': 1}, {
+ 'REMOTE_ADDR': '192.168.0.2',
+ 'HTTP_HOST': 'spam',
+ 'HTTP_X_FORWARDED_FOR': '192.168.0.1, 192.168.0.3',
+ 'HTTP_X_FORWARDED_PREFIX': '/ham, /eggs',
+ }, 'http://spam/eggs/', id='prefix < for')
))
def test_proxy_fix_new(self, kwargs, base, url_root):
@Request.application
diff --git a/werkzeug/contrib/fixers.py b/werkzeug/contrib/fixers.py
index 9a3044057..af229cef5 100644
--- a/werkzeug/contrib/fixers.py
+++ b/werkzeug/contrib/fixers.py
@@ -269,7 +269,7 @@ def __call__(self, environ, start_response):
environ['SERVER_PORT'] = x_port
x_prefix = self._get_trusted_comma(
- self.x_for, environ_get('HTTP_X_FORWARDED_PREFIX'))
+ self.x_prefix, environ_get('HTTP_X_FORWARDED_PREFIX'))
if x_prefix:
environ['SCRIPT_NAME'] = x_prefix
|
secdev__scapy-2094 | TLS dissection error
Occurred during a sniff on master.
```
Z:\Coding\github\scapy\scapy\layers\tls\cert.py in __call__(cls, cert_path)
574 cert = X509_Cert(obj.der)
575 except Exception:
--> 576 raise Exception("Unable to import certificate")
577 obj.import_from_asn1pkt(cert)
578 return obj
Exception: Unable to import certificate
```
To reproduce, from `debug.crashed_on`:
```
pkt = Ether(b'\xd0P\x99V\xdd\xf9\x14\x0cv\x8f\xfe(\x86\xdd`\x0f\xb1\xbe\x04\xd8\x06{*\x00\x14P@\x07\x08\x16\x00\x00\x00\x00\x00\x00 \x04*\x01\x0e5/\x17\xfe`\x18\xb5\x9b\xfc \xc8\xc5\xa9\x01\xbb\xd7o{\xc0\x89\n\x91\xec\xf2\x9eP\x10\x00\x8d\xb3\x1b\x00\x00\xc00\x04J<L\xe3\xe7la\x8f\xf8\xb8\xaeC\xaa\xfe\x98\xd9\xb3b\xa3\xc3\x04\x10\xa0\xb3\x19U\xfaX\xd3\x86\x92D\xd1Ti\x07\x00N\x9e\x0b8~\x1d\xfa\xdc\xfc\x8dA\xaf\x12b\xbe\x19\x8a\xd8,\xe4nb2x\xca\x0bk\xd7~\x07\x11\x19\xe1\xc5x\xbf\xa9C\xa7[\x10\xd2\xbe\xd7\xdf\x18xh\xcc6\x06V\xfb\x0e\xf3\xdd\xb3\x11\xde,\xfd\xc6\x16\x04\xf0MB\xac\x05\xb4f\xa39*eD\xdd\xaa\t\x8f\x99\x16h\xa33\xc71\xc8\xeb\xf7W\xd1\xec\xe6\x13\x96XHq~\x8f\xf3\xfcR\xfc\x06\xb9\r\xe9\xe6\xfa=J\xb1\x81\xfc\xd2\xc8\xec\xdc\x93\x9b%\x86\xa7\\\xe3G\x8d\x0f\xab\xf5b\x97\xac\xe6\xa6\xcb\xdf.e\x00\x0cQ\xf7\x9c\xd8e\xc6\x06\x8b\x9e\xdbm\xd8N\x00\xc4\x1f\x1f\xeb@\x12\xbb`z\xbcB\xf5\x1e\xf0rJQ\xd7?\x9e#_\t\x15\x0c\n\xc0\xc5l\x14\xcat\x8a\xb7\x8c\x9b\xf49\x85=\x0c\x9e\xa2\x87\x8f\xb8\xa5H\x8e\xc4;]\x8e\xe3\xaf\x9e\xbf(\xc3\xed}\xc2\x96\x01\xf7$\x04g#\x93\xa6\xbb\xd5\xbb0\xe9\xabW\xd6\xc0,\x14\xa0\xe9\xb6\xa5\xdb\x8d^\xdeU}\n\xea\xbe\xe0~\xe4\xd6\x85\xa2,x\x8c\x02\xb5\x84D\x8c\xa9\xb7\xff\x14\xe9$\xa9n\xcb\x95KmTe\x9a$\x9a\xbbH\xcb\x80(W\x8c\xc4\x0cw1\xf4\xde\x84\x8d\'\xdf\x8c\x83zF\xf7\xefl\x9e\xa6=\x85\x02h\x16\x8c\xe7Z\x8a\xc7v\x84"v\x81\x17.\xe3\xa1\xd9]\x8as\xc6\\\x05>\xa5w\x9b\xd2\xbd`\xfex\\j1J\\\xe8\xf5\x90K\xf8\x15\xaa\xb5D\x19\x8c\xfdR~\xef\xde\r\xbe\x93_\x99 U\xb6\xcf\xf76\xa0\xd1\x9069.\xe8g|\x83V\xf9zsb\xcd\x91\xf8\x80\xcfV\n\xd6\x93\xc1\xea\x84\xe7RC\x85G\xf5\x16@\x99\x8d\xd8\x04qf;R\xad\xe3\xdc\xf9\x9a\xe7Uii\xef\x084\xfd\x9e\xe1\xe0\x14\xf9\xe5\xe77\x9c5\xa3)\xe3\xe8\x8e\xb2\xe5\xdb:R"\x99\xb3\xfb\xc9\xa2\x9d+\x0c\x83\x13\x81-%f\xbfa\x8b2\xeeX\xfe\xfb\xd7d|\xdfN\x04\xe2\xc3\x96\xab\xebV\xe2*\x8aT\x86\xea"?H=\x9d\xe5V\xc1\x97\xb1\xd6P\xe6m\xf4\x088*\x93B\xf8\xf8s\x90,\xa7PHh\x13w\x97\x84\xfdO\x9a\x16\xed\x883\x11\x81\xcb\nA\x0by\xa7\xd3a2&*\xd4\x97\r\xb5^n\x82b\xe1\\U\xe7L?\xbc\xee\xd9\x08\x9dt\x18$\xe5O\xab\x87Z\xe8R%\xde\xe1\x82\xf8\x92Z\x0c\x83K\x84\t\n\xb4~HU\x02_\xe2\xb8\x80\xdaj\tg\x04l\xf2\xf6\xac\xa2\x9c\x0fg\x1b\xe1\x99\xf6\x9e\xdd:\xde\x17\x03\x03\x04\xbf\x00\x00\x00\x00\x00\x00\x00h+\x069D\x81\x9a\xa6\x93\x1ffn\xa9\x87\xe3d\x96\x92\xd8*\x15\xf6G\xf0\xf951\xc9\xa1\xc7R\xe8c\xa2\xae\x9ei$\x86\x8f\xc6\xbc\x9f\xa2!~\x8dn\x18\xb1\xc5\x1d\x01#\x98 ?\xa2c{\xa5XD\xa1No.Q\x0c\xb0\x14\x01q\xba\xe3\x97E\n\xbdH\xcb\x91a\xf23\x82P\x05\xcaY\xe0\x03\x95\x85]\xaf#\xd5_\x8e\x13\x83ht\n}\x934\xd3\xe3l\xf3o$$\x96}\x00m\x08|Il[\x1f\x89\xf1\x03\x19?#\xe3\xcfJ\xb6\x83\xd9\xd9\xdc\x1a\xbbd\xc0;\x0e*\x11O\xa6O\x9c)\x10\r\x0e\x89\xfc\xb7\xfa\\\xb4M\xc5\xba\x8d\xee\x8f\x80\x1f{\xe9L\xda\x95\xe4%\xff\xe73$\xb47\xd4\x97\x8a\xec\x11\xee\xa8-<\xac\xe8\xacR\x84\xf2\xcb\x02\x02mI\x94Hy\xaaFlK\x11\xbaf\xb5CI1\x9er\xf7\x7f\xa5\x8bkf@O\xdd7[\xed\x04i\x93\xc3\xde\xfb\xdaCf\xa1\x8ft\xf2\xa4U^[\xe7\xff\x13?\x90e\xa1\xae9\xd8\xc0y\x0f\xf1\xf6u\ro^t\xc8[*\xcd\x00\xc6\x87\xac%4\xb6\x98\xc8\xc2-\x97qC3&\x82\xdd\xe2\x0f\xd19\xb9\xa8\x90\x96\xa3\xb2\xe6\xc8#\x86\xdaT\xa2\xd6n\x8f\xbf\x91\x9f\xeb\xbd\xce:J\xd9aP\xd4<8p\x92\xc2\x13Eo&\x85\xcfu\xe3\'D\t\xfaM`\x01x\xe06\x18T\xa4\xd9\x858\xbe\xa0w\xa2wq\x1c\xf6\xd43a\x00\xbd\xa0o\xfd\x06\xd90U\x8e\xb2V\x14\x08 <\xff\xa9y:\xde\xd1p\xc9}#\x0e\xa5\xfbA"7\x89\xd5\xd1\xaf\xd6\x82\xbe \x8a\x92E\x01\xb0T\xea:U\xa4t\xbcm><j\xc3\xb0\'\x19g\x95S\x83\x93\x12\x8b\x1eF;TCJ\x95n\x01\xfdt\xe8\x84CM\xc5\x01\x9a\x8c\xdf\x84{\x96e\xee\x83\xd3>7\xb3\x9af\xc6aQN]\xf3\x93b\x95%q\xf9\xe653\x96I\xe9FO\x92\xa7\xec\xb1w\xbd\x01.v\xdb\x03\xad\xa2\xba\xb3\xea\xfa\xaf\xd7L\xaa\xb7\x0eZ\xba\x97\xb8s\x922\xf2\xd8[B\x8e\xd5\xdd$\xa2\xac\xec\xf4\xca\xe0\xf3\x18\xff\x8c\x17\xb4\xe2K\xb08)\xc3\xcfW\xc1te\xb31\x05,\xc8\x03i%{')
```
Another one triggered by an [AppVeyor build](https://ci.appveyor.com/project/secdev/scapy/builds/23837282/job/t0k08m71ledk5e32) & thanks to UTscapy analysis:
```
Ether(hex_bytes('00155dfb587a00155dfb58430800450005dc54d3400070065564400410d40a00000d01bb044e8b86744e16063ac45010faf06ba9000016030317c30200005503035cb336a067d53a5d2cedbdfec666ac740afbd0637ddd13eddeab768c3c63abee20981a0000d245f1c905b329323ad67127cd4b907a49f775c331d0794149aca7cdc02800000d0005000000170000ff010001000b000ec6000ec300090530820901308206e9a00302010202132000036e72aded906765595fae000000036e72300d06092a864886f70d01010b050030818b310b3009060355040613025553311330110603550408130a57617368696e67746f6e3110300e060355040713075265646d6f6e64311e301c060355040a13154d6963726f736f667420436f72706f726174696f6e31153013060355040b130c4d6963726f736f6674204954311e301c060355040313154d6963726f736f667420495420544c532043412032301e170d3138303632393139303635355a170d3230303632393139303635355a301b3119301706035504030c102a2e6c6f67696e2e6c6976652e636f6d30820122300d06092a864886f70d01010105000382010f003082010a0282010100bd1d4afe8064b0b1296b6de1b1983ed1a9468770757eea26ff0a2ecde5c789a745becfdc9d07318c113cf8840f57c9379be097563dfbda7d124a4b3c2aea139ab5d9c281e19f74288aa0b33a078bc67ae70fa02083addf4d0a49e36764238d396b718d227cc5965d5f5e22aa40e0d8c6a45595f0ac0b173af5559fc83abf31d16433f29b91b61e5dabad5c8c92110a80209796937cf2836c35815d9d6b5527972a94608e03a24faf2dacc9f17a177e34061e3ef321602bf92809e473bf08ae5a97faae517f7f78e5908aa4c0a278b2ebfb3e15c544d8aa7e12d5e655f9d9bdc02e0b2adcdb3c641d94d04c32ffe10f569bf5ca48cda8b7da14bd7dea95089a210203010001a38204cb308204c7308201f3060a2b06010401d679020402048201e3048201df01dd007500a4b90990b418581487bb13a2cc67700a3c359804f91bdfb8e377cd0ec80ddc10000001644cfa2b4f00000403004630440220638bcbfa1bf0c762296edac11109c8fc53c08b04c5c7e33065467b09dbb1ffb602207c9f0ea8006081f217d8fd1d81ea62589f5a464aa04c9b0bb0c2d67706d3a57d007500ddeb1d2b7a0d4fa6208b81ad8168707e2e8e9d01d55c888d3d11c4cdb6ecbecc000001644cfa2d91000004030046304402200c24fb3b1f4ac6776924568e15d8f57492f7822af4a6cb1a2fce3361634c946802202a24cf48ec44b189700713d95a9237b7b64f067306df29e686eb0e14b285451c0075005ea773f9df56c0e7b536487dd049e0327a919a0c84a112128418759681714558000001644cfa30c90000040300463044022036b8efe060ff0e40d343953e2dcfc5d78126cbc8fa30c6e7c4a5bf3335da29c002207d6002e4f618df32c583af2669da4e16e05b590b622bf89a0a3d3dfdf615e50e007600bc78e1dfc5f63c684649334da10fa15f0979692009c081b4f3f6917f3ed9b8a5000001644cfa2c630000040300473045022027f9b69d95d18a378830b5c2a1b1df0e84d89bf7da52fffe9f7157d624733638022100d65fba5f0e74d63579be1cd5c7a2bce6b4195400539297329911384d7918c72b302706092b060104018237150a041a3018300a06082b06010505070302300a06082b06010505070301303e06092b06010401823715070431302f06272b060104018237150887da867583eed90182c9851b81b59e6185f4eb60815d84d2df4282e7937a02016402011d30818506082b0601050507010104793077305106082b060105050730028645687474703a2f2f7777772e6d6963726f736f66742e636f6d2f706b692f6d73636f72702f4d6963726f736f66742532304954253230544c532532304341253230322e637274302206082b060105050730018616687474703a2f2f6f6373702e6d736f6373702e636f6d301d0603551d0e041604141154417dbc650ff6a11bb27d015e559bce915a6b300b0603551d0f0404030204b030730603551d11046c306a822070'))
```
| [
{
"content": "# This file is part of Scapy\n# Copyright (C) 2017 Maxence Tury\n# This program is published under a GPLv2 license\n\n\"\"\"\nTLS handshake extensions.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport struct\n\nfrom scapy.fields import ByteEnumField, ByteField, EnumField, FieldLenField, \\\n FieldListField, IntField, PacketField, PacketListField, ShortEnumField, \\\n ShortField, StrFixedLenField, StrLenField, XStrLenField\nfrom scapy.packet import Packet, Raw, Padding\nfrom scapy.layers.x509 import X509_Extensions\nfrom scapy.layers.tls.basefields import _tls_version\nfrom scapy.layers.tls.keyexchange import (SigAndHashAlgsLenField,\n SigAndHashAlgsField, _tls_hash_sig)\nfrom scapy.layers.tls.session import _GenericTLSSessionInheritance\nfrom scapy.layers.tls.crypto.groups import _tls_named_groups\nfrom scapy.themes import AnsiColorTheme\nfrom scapy.compat import raw\nfrom scapy.config import conf\n\n\n_tls_ext = {0: \"server_name\", # RFC 4366\n 1: \"max_fragment_length\", # RFC 4366\n 2: \"client_certificate_url\", # RFC 4366\n 3: \"trusted_ca_keys\", # RFC 4366\n 4: \"truncated_hmac\", # RFC 4366\n 5: \"status_request\", # RFC 4366\n 6: \"user_mapping\", # RFC 4681\n 7: \"client_authz\", # RFC 5878\n 8: \"server_authz\", # RFC 5878\n 9: \"cert_type\", # RFC 6091\n # 10: \"elliptic_curves\", # RFC 4492\n 10: \"supported_groups\",\n 11: \"ec_point_formats\", # RFC 4492\n 13: \"signature_algorithms\", # RFC 5246\n 0x0f: \"heartbeat\", # RFC 6520\n 0x10: \"alpn\", # RFC 7301\n 0x12: \"signed_certificate_timestamp\", # RFC 6962\n 0x15: \"padding\", # RFC 7685\n 0x16: \"encrypt_then_mac\", # RFC 7366\n 0x17: \"extended_master_secret\", # RFC 7627\n 0x23: \"session_ticket\", # RFC 5077\n 0x28: \"key_share\",\n 0x29: \"pre_shared_key\",\n 0x2a: \"early_data\",\n 0x2b: \"supported_versions\",\n 0x2c: \"cookie\",\n 0x2d: \"psk_key_exchange_modes\",\n 0x2e: \"ticket_early_data_info\",\n 0x2f: \"certificate_authorities\",\n 0x30: \"oid_filters\",\n 0x3374: \"next_protocol_negotiation\",\n # RFC-draft-agl-tls-nextprotoneg-03\n 0xff01: \"renegotiation_info\" # RFC 5746\n }\n\n\nclass TLS_Ext_Unknown(_GenericTLSSessionInheritance):\n \"\"\"\n We put this here rather than in extensions.py in order to avoid\n circular imports...\n \"\"\"\n name = \"TLS Extension - Scapy Unknown\"\n fields_desc = [ShortEnumField(\"type\", None, _tls_ext),\n FieldLenField(\"len\", None, fmt=\"!H\", length_of=\"val\"),\n StrLenField(\"val\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n def post_build(self, p, pay):\n if self.len is None:\n tmp_len = len(p) - 4\n p = p[:2] + struct.pack(\"!H\", tmp_len) + p[4:]\n return p + pay\n\n\n###############################################################################\n# ClientHello/ServerHello extensions #\n###############################################################################\n\n# We provide these extensions mostly for packet manipulation purposes.\n# For now, most of them are not considered by our automaton.\n\nclass TLS_Ext_PrettyPacketList(TLS_Ext_Unknown):\n \"\"\"\n Dummy extension used for server_name/ALPN/NPN for a lighter representation:\n the final field is showed as a 1-line list rather than as lots of packets.\n XXX Define a new condition for packet lists in Packet._show_or_dump?\n \"\"\"\n\n def _show_or_dump(self, dump=False, indent=3,\n lvl=\"\", label_lvl=\"\", first_call=True):\n \"\"\" Reproduced from packet.py \"\"\"\n ct = AnsiColorTheme() if dump else conf.color_theme\n s = \"%s%s %s %s \\n\" % (label_lvl, ct.punct(\"###[\"),\n ct.layer_name(self.name), ct.punct(\"]###\"))\n for f in self.fields_desc[:-1]:\n ncol = ct.field_name\n vcol = ct.field_value\n fvalue = self.getfieldval(f.name)\n begn = \"%s %-10s%s \" % (label_lvl + lvl, ncol(f.name),\n ct.punct(\"=\"),)\n reprval = f.i2repr(self, fvalue)\n if isinstance(reprval, str):\n reprval = reprval.replace(\"\\n\", \"\\n\" + \" \" * (len(label_lvl) +\n len(lvl) +\n len(f.name) +\n 4))\n s += \"%s%s\\n\" % (begn, vcol(reprval))\n f = self.fields_desc[-1]\n ncol = ct.field_name\n vcol = ct.field_value\n fvalue = self.getfieldval(f.name)\n begn = \"%s %-10s%s \" % (label_lvl + lvl, ncol(f.name), ct.punct(\"=\"),)\n reprval = f.i2repr(self, fvalue)\n if isinstance(reprval, str):\n reprval = reprval.replace(\"\\n\", \"\\n\" + \" \" * (len(label_lvl) +\n len(lvl) +\n len(f.name) +\n 4))\n s += \"%s%s\\n\" % (begn, vcol(reprval))\n if self.payload:\n s += self.payload._show_or_dump(dump=dump, indent=indent,\n lvl=lvl + (\" \" * indent * self.show_indent), # noqa: E501\n label_lvl=label_lvl, first_call=False) # noqa: E501\n\n if first_call and not dump:\n print(s)\n else:\n return s\n\n\n_tls_server_name_types = {0: \"host_name\"}\n\n\nclass ServerName(Packet):\n name = \"HostName\"\n fields_desc = [ByteEnumField(\"nametype\", 0, _tls_server_name_types),\n FieldLenField(\"namelen\", None, length_of=\"servername\"),\n StrLenField(\"servername\", \"\",\n length_from=lambda pkt: pkt.namelen)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass ServerListField(PacketListField):\n def i2repr(self, pkt, x):\n res = [p.servername for p in x]\n return \"[%s]\" % b\", \".join(res)\n\n\nclass ServerLenField(FieldLenField):\n \"\"\"\n There is no length when there are no servernames (as in a ServerHello).\n \"\"\"\n\n def addfield(self, pkt, s, val):\n if not val:\n if not pkt.servernames:\n return s\n return super(ServerLenField, self).addfield(pkt, s, val)\n\n\nclass TLS_Ext_ServerName(TLS_Ext_PrettyPacketList): # RFC 4366\n name = \"TLS Extension - Server Name\"\n fields_desc = [ShortEnumField(\"type\", 0, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"servernames\",\n adjust=lambda pkt, x: x + 2),\n ServerLenField(\"servernameslen\", None,\n length_of=\"servernames\"),\n ServerListField(\"servernames\", [], ServerName,\n length_from=lambda pkt: pkt.servernameslen)]\n\n\nclass TLS_Ext_MaxFragLen(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Max Fragment Length\"\n fields_desc = [ShortEnumField(\"type\", 1, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"maxfraglen\", 4, {1: \"2^9\",\n 2: \"2^10\",\n 3: \"2^11\",\n 4: \"2^12\"})]\n\n\nclass TLS_Ext_ClientCertURL(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Client Certificate URL\"\n fields_desc = [ShortEnumField(\"type\", 2, _tls_ext),\n ShortField(\"len\", None)]\n\n\n_tls_trusted_authority_types = {0: \"pre_agreed\",\n 1: \"key_sha1_hash\",\n 2: \"x509_name\",\n 3: \"cert_sha1_hash\"}\n\n\nclass TAPreAgreed(Packet):\n name = \"Trusted authority - pre_agreed\"\n fields_desc = [ByteEnumField(\"idtype\", 0, _tls_trusted_authority_types)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass TAKeySHA1Hash(Packet):\n name = \"Trusted authority - key_sha1_hash\"\n fields_desc = [ByteEnumField(\"idtype\", 1, _tls_trusted_authority_types),\n StrFixedLenField(\"id\", None, 20)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass TAX509Name(Packet):\n \"\"\"\n XXX Section 3.4 of RFC 4366. Implement a more specific DNField\n rather than current StrLenField.\n \"\"\"\n name = \"Trusted authority - x509_name\"\n fields_desc = [ByteEnumField(\"idtype\", 2, _tls_trusted_authority_types),\n FieldLenField(\"dnlen\", None, length_of=\"dn\"),\n StrLenField(\"dn\", \"\", length_from=lambda pkt: pkt.dnlen)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass TACertSHA1Hash(Packet):\n name = \"Trusted authority - cert_sha1_hash\"\n fields_desc = [ByteEnumField(\"idtype\", 3, _tls_trusted_authority_types),\n StrFixedLenField(\"id\", None, 20)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\n_tls_trusted_authority_cls = {0: TAPreAgreed,\n 1: TAKeySHA1Hash,\n 2: TAX509Name,\n 3: TACertSHA1Hash}\n\n\nclass _TAListField(PacketListField):\n \"\"\"\n Specific version that selects the right Trusted Authority (previous TA*)\n class to be used for dissection based on idtype.\n \"\"\"\n\n def m2i(self, pkt, m):\n idtype = ord(m[0])\n cls = self.cls\n if idtype in _tls_trusted_authority_cls:\n cls = _tls_trusted_authority_cls[idtype]\n return cls(m)\n\n\nclass TLS_Ext_TrustedCAInd(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Trusted CA Indication\"\n fields_desc = [ShortEnumField(\"type\", 3, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"talen\", None, length_of=\"ta\"),\n _TAListField(\"ta\", [], Raw,\n length_from=lambda pkt: pkt.talen)]\n\n\nclass TLS_Ext_TruncatedHMAC(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Truncated HMAC\"\n fields_desc = [ShortEnumField(\"type\", 4, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass ResponderID(Packet):\n name = \"Responder ID structure\"\n fields_desc = [FieldLenField(\"respidlen\", None, length_of=\"respid\"),\n StrLenField(\"respid\", \"\",\n length_from=lambda pkt: pkt.respidlen)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass OCSPStatusRequest(Packet):\n \"\"\"\n This is the structure defined in RFC 6066, not in RFC 6960!\n \"\"\"\n name = \"OCSPStatusRequest structure\"\n fields_desc = [FieldLenField(\"respidlen\", None, length_of=\"respid\"),\n PacketListField(\"respid\", [], ResponderID,\n length_from=lambda pkt: pkt.respidlen),\n FieldLenField(\"reqextlen\", None, length_of=\"reqext\"),\n PacketField(\"reqext\", \"\", X509_Extensions)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\n_cert_status_type = {1: \"ocsp\"}\n_cert_status_req_cls = {1: OCSPStatusRequest}\n\n\nclass _StatusReqField(PacketListField):\n def m2i(self, pkt, m):\n idtype = pkt.stype\n cls = self.cls\n if idtype in _cert_status_req_cls:\n cls = _cert_status_req_cls[idtype]\n return cls(m)\n\n\nclass TLS_Ext_CSR(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Certificate Status Request\"\n fields_desc = [ShortEnumField(\"type\", 5, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"stype\", None, _cert_status_type),\n _StatusReqField(\"req\", [], Raw,\n length_from=lambda pkt: pkt.len - 1)]\n\n\nclass TLS_Ext_UserMapping(TLS_Ext_Unknown): # RFC 4681\n name = \"TLS Extension - User Mapping\"\n fields_desc = [ShortEnumField(\"type\", 6, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"umlen\", None, fmt=\"B\", length_of=\"um\"),\n FieldListField(\"um\", [],\n ByteField(\"umtype\", 0),\n length_from=lambda pkt: pkt.umlen)]\n\n\nclass TLS_Ext_ClientAuthz(TLS_Ext_Unknown): # RFC 5878\n \"\"\" XXX Unsupported \"\"\"\n name = \"TLS Extension - Client Authz\"\n fields_desc = [ShortEnumField(\"type\", 7, _tls_ext),\n ShortField(\"len\", None),\n ]\n\n\nclass TLS_Ext_ServerAuthz(TLS_Ext_Unknown): # RFC 5878\n \"\"\" XXX Unsupported \"\"\"\n name = \"TLS Extension - Server Authz\"\n fields_desc = [ShortEnumField(\"type\", 8, _tls_ext),\n ShortField(\"len\", None),\n ]\n\n\n_tls_cert_types = {0: \"X.509\", 1: \"OpenPGP\"}\n\n\nclass TLS_Ext_ClientCertType(TLS_Ext_Unknown): # RFC 5081\n name = \"TLS Extension - Certificate Type (client version)\"\n fields_desc = [ShortEnumField(\"type\", 9, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"ctypeslen\", None, length_of=\"ctypes\"),\n FieldListField(\"ctypes\", [0, 1],\n ByteEnumField(\"certtypes\", None,\n _tls_cert_types),\n length_from=lambda pkt: pkt.ctypeslen)]\n\n\nclass TLS_Ext_ServerCertType(TLS_Ext_Unknown): # RFC 5081\n name = \"TLS Extension - Certificate Type (server version)\"\n fields_desc = [ShortEnumField(\"type\", 9, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"ctype\", None, _tls_cert_types)]\n\n\ndef _TLS_Ext_CertTypeDispatcher(m, *args, **kargs):\n \"\"\"\n We need to select the correct one on dissection. We use the length for\n that, as 1 for client version would emply an empty list.\n \"\"\"\n tmp_len = struct.unpack(\"!H\", m[2:4])[0]\n if tmp_len == 1:\n cls = TLS_Ext_ServerCertType\n else:\n cls = TLS_Ext_ClientCertType\n return cls(m, *args, **kargs)\n\n\nclass TLS_Ext_SupportedGroups(TLS_Ext_Unknown):\n \"\"\"\n This extension was known as 'Supported Elliptic Curves' before TLS 1.3\n merged both group selection mechanisms for ECDH and FFDH.\n \"\"\"\n name = \"TLS Extension - Supported Groups\"\n fields_desc = [ShortEnumField(\"type\", 10, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"groupslen\", None, length_of=\"groups\"),\n FieldListField(\"groups\", [],\n ShortEnumField(\"ng\", None,\n _tls_named_groups),\n length_from=lambda pkt: pkt.groupslen)]\n\n\nclass TLS_Ext_SupportedEllipticCurves(TLS_Ext_SupportedGroups): # RFC 4492\n pass\n\n\n_tls_ecpoint_format = {0: \"uncompressed\",\n 1: \"ansiX962_compressed_prime\",\n 2: \"ansiX962_compressed_char2\"}\n\n\nclass TLS_Ext_SupportedPointFormat(TLS_Ext_Unknown): # RFC 4492\n name = \"TLS Extension - Supported Point Format\"\n fields_desc = [ShortEnumField(\"type\", 11, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"ecpllen\", None, fmt=\"B\", length_of=\"ecpl\"),\n FieldListField(\"ecpl\", [0],\n ByteEnumField(\"nc\", None,\n _tls_ecpoint_format),\n length_from=lambda pkt: pkt.ecpllen)]\n\n\nclass TLS_Ext_SignatureAlgorithms(TLS_Ext_Unknown): # RFC 5246\n name = \"TLS Extension - Signature Algorithms\"\n fields_desc = [ShortEnumField(\"type\", 13, _tls_ext),\n ShortField(\"len\", None),\n SigAndHashAlgsLenField(\"sig_algs_len\", None,\n length_of=\"sig_algs\"),\n SigAndHashAlgsField(\"sig_algs\", [],\n EnumField(\"hash_sig\", None,\n _tls_hash_sig),\n length_from=lambda pkt: pkt.sig_algs_len)] # noqa: E501\n\n\nclass TLS_Ext_Heartbeat(TLS_Ext_Unknown): # RFC 6520\n name = \"TLS Extension - Heartbeat\"\n fields_desc = [ShortEnumField(\"type\", 0x0f, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"heartbeat_mode\", 2,\n {1: \"peer_allowed_to_send\",\n 2: \"peer_not_allowed_to_send\"})]\n\n\nclass ProtocolName(Packet):\n name = \"Protocol Name\"\n fields_desc = [FieldLenField(\"len\", None, fmt='B', length_of=\"protocol\"),\n StrLenField(\"protocol\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass ProtocolListField(PacketListField):\n def i2repr(self, pkt, x):\n res = [p.protocol for p in x]\n return \"[%s]\" % b\", \".join(res)\n\n\nclass TLS_Ext_ALPN(TLS_Ext_PrettyPacketList): # RFC 7301\n name = \"TLS Extension - Application Layer Protocol Negotiation\"\n fields_desc = [ShortEnumField(\"type\", 0x10, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"protocolslen\", None, length_of=\"protocols\"),\n ProtocolListField(\"protocols\", [], ProtocolName,\n length_from=lambda pkt:pkt.protocolslen)]\n\n\nclass TLS_Ext_Padding(TLS_Ext_Unknown): # RFC 7685\n name = \"TLS Extension - Padding\"\n fields_desc = [ShortEnumField(\"type\", 0x15, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"padding\"),\n StrLenField(\"padding\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n\nclass TLS_Ext_EncryptThenMAC(TLS_Ext_Unknown): # RFC 7366\n name = \"TLS Extension - Encrypt-then-MAC\"\n fields_desc = [ShortEnumField(\"type\", 0x16, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_ExtendedMasterSecret(TLS_Ext_Unknown): # RFC 7627\n name = \"TLS Extension - Extended Master Secret\"\n fields_desc = [ShortEnumField(\"type\", 0x17, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_SessionTicket(TLS_Ext_Unknown): # RFC 5077\n \"\"\"\n RFC 5077 updates RFC 4507 according to most implementations, which do not\n use another (useless) 'ticketlen' field after the global 'len' field.\n \"\"\"\n name = \"TLS Extension - Session Ticket\"\n fields_desc = [ShortEnumField(\"type\", 0x23, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"ticket\"),\n StrLenField(\"ticket\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n\nclass TLS_Ext_KeyShare(TLS_Ext_Unknown):\n name = \"TLS Extension - Key Share (dummy class)\"\n fields_desc = [ShortEnumField(\"type\", 0x28, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_PreSharedKey(TLS_Ext_Unknown):\n name = \"TLS Extension - Pre Shared Key (dummy class)\"\n fields_desc = [ShortEnumField(\"type\", 0x29, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_EarlyData(TLS_Ext_Unknown):\n name = \"TLS Extension - Early Data\"\n fields_desc = [ShortEnumField(\"type\", 0x2a, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_SupportedVersions(TLS_Ext_Unknown):\n name = \"TLS Extension - Supported Versions\"\n fields_desc = [ShortEnumField(\"type\", 0x2b, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"versionslen\", None, fmt='B',\n length_of=\"versions\"),\n FieldListField(\"versions\", [],\n ShortEnumField(\"version\", None,\n _tls_version),\n length_from=lambda pkt: pkt.versionslen)]\n\n\nclass TLS_Ext_Cookie(TLS_Ext_Unknown):\n name = \"TLS Extension - Cookie\"\n fields_desc = [ShortEnumField(\"type\", 0x2c, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"cookielen\", None, length_of=\"cookie\"),\n XStrLenField(\"cookie\", \"\",\n length_from=lambda pkt: pkt.cookielen)]\n\n\n_tls_psk_kx_modes = {0: \"psk_ke\", 1: \"psk_dhe_ke\"}\n\n\nclass TLS_Ext_PSKKeyExchangeModes(TLS_Ext_Unknown):\n name = \"TLS Extension - PSK Key Exchange Modes\"\n fields_desc = [ShortEnumField(\"type\", 0x2d, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"kxmodeslen\", None, fmt='B',\n length_of=\"kxmodes\"),\n FieldListField(\"kxmodes\", [],\n ByteEnumField(\"kxmode\", None,\n _tls_psk_kx_modes),\n length_from=lambda pkt: pkt.kxmodeslen)]\n\n\nclass TLS_Ext_TicketEarlyDataInfo(TLS_Ext_Unknown):\n name = \"TLS Extension - Ticket Early Data Info\"\n fields_desc = [ShortEnumField(\"type\", 0x2e, _tls_ext),\n ShortField(\"len\", None),\n IntField(\"max_early_data_size\", 0)]\n\n\nclass TLS_Ext_NPN(TLS_Ext_PrettyPacketList):\n \"\"\"\n Defined in RFC-draft-agl-tls-nextprotoneg-03. Deprecated in favour of ALPN.\n \"\"\"\n name = \"TLS Extension - Next Protocol Negotiation\"\n fields_desc = [ShortEnumField(\"type\", 0x3374, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"protocols\"),\n ProtocolListField(\"protocols\", [], ProtocolName,\n length_from=lambda pkt:pkt.len)]\n\n\nclass TLS_Ext_RenegotiationInfo(TLS_Ext_Unknown): # RFC 5746\n name = \"TLS Extension - Renegotiation Indication\"\n fields_desc = [ShortEnumField(\"type\", 0xff01, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"reneg_conn_len\", None, fmt='B',\n length_of=\"renegotiated_connection\"),\n StrLenField(\"renegotiated_connection\", \"\",\n length_from=lambda pkt: pkt.reneg_conn_len)]\n\n\n_tls_ext_cls = {0: TLS_Ext_ServerName,\n 1: TLS_Ext_MaxFragLen,\n 2: TLS_Ext_ClientCertURL,\n 3: TLS_Ext_TrustedCAInd,\n 4: TLS_Ext_TruncatedHMAC,\n 5: TLS_Ext_CSR,\n 6: TLS_Ext_UserMapping,\n 7: TLS_Ext_ClientAuthz,\n 8: TLS_Ext_ServerAuthz,\n 9: _TLS_Ext_CertTypeDispatcher,\n # 10: TLS_Ext_SupportedEllipticCurves,\n 10: TLS_Ext_SupportedGroups,\n 11: TLS_Ext_SupportedPointFormat,\n 13: TLS_Ext_SignatureAlgorithms,\n 0x0f: TLS_Ext_Heartbeat,\n 0x10: TLS_Ext_ALPN,\n 0x15: TLS_Ext_Padding,\n 0x16: TLS_Ext_EncryptThenMAC,\n 0x17: TLS_Ext_ExtendedMasterSecret,\n 0x23: TLS_Ext_SessionTicket,\n 0x28: TLS_Ext_KeyShare,\n 0x29: TLS_Ext_PreSharedKey,\n 0x2a: TLS_Ext_EarlyData,\n 0x2b: TLS_Ext_SupportedVersions,\n 0x2c: TLS_Ext_Cookie,\n 0x2d: TLS_Ext_PSKKeyExchangeModes,\n 0x2e: TLS_Ext_TicketEarlyDataInfo,\n # 0x2f: TLS_Ext_CertificateAuthorities, #XXX\n # 0x30: TLS_Ext_OIDFilters, #XXX\n 0x3374: TLS_Ext_NPN,\n 0xff01: TLS_Ext_RenegotiationInfo\n }\n\n\nclass _ExtensionsLenField(FieldLenField):\n def getfield(self, pkt, s):\n \"\"\"\n We try to compute a length, usually from a msglen parsed earlier.\n If this length is 0, we consider 'selection_present' (from RFC 5246)\n to be False. This means that there should not be any length field.\n However, with TLS 1.3, zero lengths are always explicit.\n \"\"\"\n ext = pkt.get_field(self.length_of)\n tmp_len = ext.length_from(pkt)\n if tmp_len is None or tmp_len <= 0:\n v = pkt.tls_session.tls_version\n if v is None or v < 0x0304:\n return s, None\n return super(_ExtensionsLenField, self).getfield(pkt, s)\n\n def addfield(self, pkt, s, i):\n \"\"\"\n There is a hack with the _ExtensionsField.i2len. It works only because\n we expect _ExtensionsField.i2m to return a string of the same size (if\n not of the same value) upon successive calls (e.g. through i2len here,\n then i2m when directly building the _ExtensionsField).\n\n XXX A proper way to do this would be to keep the extensions built from\n the i2len call here, instead of rebuilding them later on.\n \"\"\"\n if i is None:\n if self.length_of is not None:\n fld, fval = pkt.getfield_and_val(self.length_of)\n\n tmp = pkt.tls_session.frozen\n pkt.tls_session.frozen = True\n f = fld.i2len(pkt, fval)\n pkt.tls_session.frozen = tmp\n\n i = self.adjust(pkt, f)\n if i == 0: # for correct build if no ext and not explicitly 0\n return s\n return s + struct.pack(self.fmt, i)\n\n\nclass _ExtensionsField(StrLenField):\n islist = 1\n holds_packets = 1\n\n def i2len(self, pkt, i):\n if i is None:\n return 0\n return len(self.i2m(pkt, i))\n\n def getfield(self, pkt, s):\n tmp_len = self.length_from(pkt)\n if tmp_len is None:\n return s, []\n return s[tmp_len:], self.m2i(pkt, s[:tmp_len])\n\n def i2m(self, pkt, i):\n if i is None:\n return b\"\"\n if isinstance(pkt, _GenericTLSSessionInheritance):\n if not pkt.tls_session.frozen:\n s = b\"\"\n for ext in i:\n if isinstance(ext, _GenericTLSSessionInheritance):\n ext.tls_session = pkt.tls_session\n s += ext.raw_stateful()\n else:\n s += raw(ext)\n return s\n return b\"\".join(map(raw, i))\n\n def m2i(self, pkt, m):\n res = []\n while m:\n t = struct.unpack(\"!H\", m[:2])[0]\n tmp_len = struct.unpack(\"!H\", m[2:4])[0]\n cls = _tls_ext_cls.get(t, TLS_Ext_Unknown)\n if cls is TLS_Ext_KeyShare:\n from scapy.layers.tls.keyexchange_tls13 import _tls_ext_keyshare_cls # noqa: E501\n cls = _tls_ext_keyshare_cls.get(pkt.msgtype, TLS_Ext_Unknown)\n elif cls is TLS_Ext_PreSharedKey:\n from scapy.layers.tls.keyexchange_tls13 import _tls_ext_presharedkey_cls # noqa: E501\n cls = _tls_ext_presharedkey_cls.get(pkt.msgtype, TLS_Ext_Unknown) # noqa: E501\n res.append(cls(m[:tmp_len + 4], tls_session=pkt.tls_session))\n m = m[tmp_len + 4:]\n return res\n",
"path": "scapy/layers/tls/extensions.py"
}
] | [
{
"content": "# This file is part of Scapy\n# Copyright (C) 2017 Maxence Tury\n# This program is published under a GPLv2 license\n\n\"\"\"\nTLS handshake extensions.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport struct\n\nfrom scapy.fields import ByteEnumField, ByteField, EnumField, FieldLenField, \\\n FieldListField, IntField, PacketField, PacketListField, ShortEnumField, \\\n ShortField, StrFixedLenField, StrLenField, XStrLenField\nfrom scapy.packet import Packet, Raw, Padding\nfrom scapy.layers.x509 import X509_Extensions\nfrom scapy.layers.tls.basefields import _tls_version\nfrom scapy.layers.tls.keyexchange import (SigAndHashAlgsLenField,\n SigAndHashAlgsField, _tls_hash_sig)\nfrom scapy.layers.tls.session import _GenericTLSSessionInheritance\nfrom scapy.layers.tls.crypto.groups import _tls_named_groups\nfrom scapy.themes import AnsiColorTheme\nfrom scapy.compat import raw\nfrom scapy.config import conf\n\n\n_tls_ext = {0: \"server_name\", # RFC 4366\n 1: \"max_fragment_length\", # RFC 4366\n 2: \"client_certificate_url\", # RFC 4366\n 3: \"trusted_ca_keys\", # RFC 4366\n 4: \"truncated_hmac\", # RFC 4366\n 5: \"status_request\", # RFC 4366\n 6: \"user_mapping\", # RFC 4681\n 7: \"client_authz\", # RFC 5878\n 8: \"server_authz\", # RFC 5878\n 9: \"cert_type\", # RFC 6091\n # 10: \"elliptic_curves\", # RFC 4492\n 10: \"supported_groups\",\n 11: \"ec_point_formats\", # RFC 4492\n 13: \"signature_algorithms\", # RFC 5246\n 0x0f: \"heartbeat\", # RFC 6520\n 0x10: \"alpn\", # RFC 7301\n 0x12: \"signed_certificate_timestamp\", # RFC 6962\n 0x15: \"padding\", # RFC 7685\n 0x16: \"encrypt_then_mac\", # RFC 7366\n 0x17: \"extended_master_secret\", # RFC 7627\n 0x23: \"session_ticket\", # RFC 5077\n 0x28: \"key_share\",\n 0x29: \"pre_shared_key\",\n 0x2a: \"early_data\",\n 0x2b: \"supported_versions\",\n 0x2c: \"cookie\",\n 0x2d: \"psk_key_exchange_modes\",\n 0x2e: \"ticket_early_data_info\",\n 0x2f: \"certificate_authorities\",\n 0x30: \"oid_filters\",\n 0x3374: \"next_protocol_negotiation\",\n # RFC-draft-agl-tls-nextprotoneg-03\n 0xff01: \"renegotiation_info\" # RFC 5746\n }\n\n\nclass TLS_Ext_Unknown(_GenericTLSSessionInheritance):\n \"\"\"\n We put this here rather than in extensions.py in order to avoid\n circular imports...\n \"\"\"\n name = \"TLS Extension - Scapy Unknown\"\n fields_desc = [ShortEnumField(\"type\", None, _tls_ext),\n FieldLenField(\"len\", None, fmt=\"!H\", length_of=\"val\"),\n StrLenField(\"val\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n def post_build(self, p, pay):\n if self.len is None:\n tmp_len = len(p) - 4\n p = p[:2] + struct.pack(\"!H\", tmp_len) + p[4:]\n return p + pay\n\n\n###############################################################################\n# ClientHello/ServerHello extensions #\n###############################################################################\n\n# We provide these extensions mostly for packet manipulation purposes.\n# For now, most of them are not considered by our automaton.\n\nclass TLS_Ext_PrettyPacketList(TLS_Ext_Unknown):\n \"\"\"\n Dummy extension used for server_name/ALPN/NPN for a lighter representation:\n the final field is showed as a 1-line list rather than as lots of packets.\n XXX Define a new condition for packet lists in Packet._show_or_dump?\n \"\"\"\n\n def _show_or_dump(self, dump=False, indent=3,\n lvl=\"\", label_lvl=\"\", first_call=True):\n \"\"\" Reproduced from packet.py \"\"\"\n ct = AnsiColorTheme() if dump else conf.color_theme\n s = \"%s%s %s %s \\n\" % (label_lvl, ct.punct(\"###[\"),\n ct.layer_name(self.name), ct.punct(\"]###\"))\n for f in self.fields_desc[:-1]:\n ncol = ct.field_name\n vcol = ct.field_value\n fvalue = self.getfieldval(f.name)\n begn = \"%s %-10s%s \" % (label_lvl + lvl, ncol(f.name),\n ct.punct(\"=\"),)\n reprval = f.i2repr(self, fvalue)\n if isinstance(reprval, str):\n reprval = reprval.replace(\"\\n\", \"\\n\" + \" \" * (len(label_lvl) +\n len(lvl) +\n len(f.name) +\n 4))\n s += \"%s%s\\n\" % (begn, vcol(reprval))\n f = self.fields_desc[-1]\n ncol = ct.field_name\n vcol = ct.field_value\n fvalue = self.getfieldval(f.name)\n begn = \"%s %-10s%s \" % (label_lvl + lvl, ncol(f.name), ct.punct(\"=\"),)\n reprval = f.i2repr(self, fvalue)\n if isinstance(reprval, str):\n reprval = reprval.replace(\"\\n\", \"\\n\" + \" \" * (len(label_lvl) +\n len(lvl) +\n len(f.name) +\n 4))\n s += \"%s%s\\n\" % (begn, vcol(reprval))\n if self.payload:\n s += self.payload._show_or_dump(dump=dump, indent=indent,\n lvl=lvl + (\" \" * indent * self.show_indent), # noqa: E501\n label_lvl=label_lvl, first_call=False) # noqa: E501\n\n if first_call and not dump:\n print(s)\n else:\n return s\n\n\n_tls_server_name_types = {0: \"host_name\"}\n\n\nclass ServerName(Packet):\n name = \"HostName\"\n fields_desc = [ByteEnumField(\"nametype\", 0, _tls_server_name_types),\n FieldLenField(\"namelen\", None, length_of=\"servername\"),\n StrLenField(\"servername\", \"\",\n length_from=lambda pkt: pkt.namelen)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass ServerListField(PacketListField):\n def i2repr(self, pkt, x):\n res = [p.servername for p in x]\n return \"[%s]\" % b\", \".join(res)\n\n\nclass ServerLenField(FieldLenField):\n \"\"\"\n There is no length when there are no servernames (as in a ServerHello).\n \"\"\"\n\n def addfield(self, pkt, s, val):\n if not val:\n if not pkt.servernames:\n return s\n return super(ServerLenField, self).addfield(pkt, s, val)\n\n\nclass TLS_Ext_ServerName(TLS_Ext_PrettyPacketList): # RFC 4366\n name = \"TLS Extension - Server Name\"\n fields_desc = [ShortEnumField(\"type\", 0, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"servernames\",\n adjust=lambda pkt, x: x + 2),\n ServerLenField(\"servernameslen\", None,\n length_of=\"servernames\"),\n ServerListField(\"servernames\", [], ServerName,\n length_from=lambda pkt: pkt.servernameslen)]\n\n\nclass TLS_Ext_MaxFragLen(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Max Fragment Length\"\n fields_desc = [ShortEnumField(\"type\", 1, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"maxfraglen\", 4, {1: \"2^9\",\n 2: \"2^10\",\n 3: \"2^11\",\n 4: \"2^12\"})]\n\n\nclass TLS_Ext_ClientCertURL(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Client Certificate URL\"\n fields_desc = [ShortEnumField(\"type\", 2, _tls_ext),\n ShortField(\"len\", None)]\n\n\n_tls_trusted_authority_types = {0: \"pre_agreed\",\n 1: \"key_sha1_hash\",\n 2: \"x509_name\",\n 3: \"cert_sha1_hash\"}\n\n\nclass TAPreAgreed(Packet):\n name = \"Trusted authority - pre_agreed\"\n fields_desc = [ByteEnumField(\"idtype\", 0, _tls_trusted_authority_types)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass TAKeySHA1Hash(Packet):\n name = \"Trusted authority - key_sha1_hash\"\n fields_desc = [ByteEnumField(\"idtype\", 1, _tls_trusted_authority_types),\n StrFixedLenField(\"id\", None, 20)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass TAX509Name(Packet):\n \"\"\"\n XXX Section 3.4 of RFC 4366. Implement a more specific DNField\n rather than current StrLenField.\n \"\"\"\n name = \"Trusted authority - x509_name\"\n fields_desc = [ByteEnumField(\"idtype\", 2, _tls_trusted_authority_types),\n FieldLenField(\"dnlen\", None, length_of=\"dn\"),\n StrLenField(\"dn\", \"\", length_from=lambda pkt: pkt.dnlen)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass TACertSHA1Hash(Packet):\n name = \"Trusted authority - cert_sha1_hash\"\n fields_desc = [ByteEnumField(\"idtype\", 3, _tls_trusted_authority_types),\n StrFixedLenField(\"id\", None, 20)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\n_tls_trusted_authority_cls = {0: TAPreAgreed,\n 1: TAKeySHA1Hash,\n 2: TAX509Name,\n 3: TACertSHA1Hash}\n\n\nclass _TAListField(PacketListField):\n \"\"\"\n Specific version that selects the right Trusted Authority (previous TA*)\n class to be used for dissection based on idtype.\n \"\"\"\n\n def m2i(self, pkt, m):\n idtype = ord(m[0])\n cls = self.cls\n if idtype in _tls_trusted_authority_cls:\n cls = _tls_trusted_authority_cls[idtype]\n return cls(m)\n\n\nclass TLS_Ext_TrustedCAInd(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Trusted CA Indication\"\n fields_desc = [ShortEnumField(\"type\", 3, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"talen\", None, length_of=\"ta\"),\n _TAListField(\"ta\", [], Raw,\n length_from=lambda pkt: pkt.talen)]\n\n\nclass TLS_Ext_TruncatedHMAC(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Truncated HMAC\"\n fields_desc = [ShortEnumField(\"type\", 4, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass ResponderID(Packet):\n name = \"Responder ID structure\"\n fields_desc = [FieldLenField(\"respidlen\", None, length_of=\"respid\"),\n StrLenField(\"respid\", \"\",\n length_from=lambda pkt: pkt.respidlen)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass OCSPStatusRequest(Packet):\n \"\"\"\n This is the structure defined in RFC 6066, not in RFC 6960!\n \"\"\"\n name = \"OCSPStatusRequest structure\"\n fields_desc = [FieldLenField(\"respidlen\", None, length_of=\"respid\"),\n PacketListField(\"respid\", [], ResponderID,\n length_from=lambda pkt: pkt.respidlen),\n FieldLenField(\"reqextlen\", None, length_of=\"reqext\"),\n PacketField(\"reqext\", \"\", X509_Extensions)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\n_cert_status_type = {1: \"ocsp\"}\n_cert_status_req_cls = {1: OCSPStatusRequest}\n\n\nclass _StatusReqField(PacketListField):\n def m2i(self, pkt, m):\n idtype = pkt.stype\n cls = self.cls\n if idtype in _cert_status_req_cls:\n cls = _cert_status_req_cls[idtype]\n return cls(m)\n\n\nclass TLS_Ext_CSR(TLS_Ext_Unknown): # RFC 4366\n name = \"TLS Extension - Certificate Status Request\"\n fields_desc = [ShortEnumField(\"type\", 5, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"stype\", None, _cert_status_type),\n _StatusReqField(\"req\", [], Raw,\n length_from=lambda pkt: pkt.len - 1)]\n\n\nclass TLS_Ext_UserMapping(TLS_Ext_Unknown): # RFC 4681\n name = \"TLS Extension - User Mapping\"\n fields_desc = [ShortEnumField(\"type\", 6, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"umlen\", None, fmt=\"B\", length_of=\"um\"),\n FieldListField(\"um\", [],\n ByteField(\"umtype\", 0),\n length_from=lambda pkt: pkt.umlen)]\n\n\nclass TLS_Ext_ClientAuthz(TLS_Ext_Unknown): # RFC 5878\n \"\"\" XXX Unsupported \"\"\"\n name = \"TLS Extension - Client Authz\"\n fields_desc = [ShortEnumField(\"type\", 7, _tls_ext),\n ShortField(\"len\", None),\n ]\n\n\nclass TLS_Ext_ServerAuthz(TLS_Ext_Unknown): # RFC 5878\n \"\"\" XXX Unsupported \"\"\"\n name = \"TLS Extension - Server Authz\"\n fields_desc = [ShortEnumField(\"type\", 8, _tls_ext),\n ShortField(\"len\", None),\n ]\n\n\n_tls_cert_types = {0: \"X.509\", 1: \"OpenPGP\"}\n\n\nclass TLS_Ext_ClientCertType(TLS_Ext_Unknown): # RFC 5081\n name = \"TLS Extension - Certificate Type (client version)\"\n fields_desc = [ShortEnumField(\"type\", 9, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"ctypeslen\", None, length_of=\"ctypes\"),\n FieldListField(\"ctypes\", [0, 1],\n ByteEnumField(\"certtypes\", None,\n _tls_cert_types),\n length_from=lambda pkt: pkt.ctypeslen)]\n\n\nclass TLS_Ext_ServerCertType(TLS_Ext_Unknown): # RFC 5081\n name = \"TLS Extension - Certificate Type (server version)\"\n fields_desc = [ShortEnumField(\"type\", 9, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"ctype\", None, _tls_cert_types)]\n\n\ndef _TLS_Ext_CertTypeDispatcher(m, *args, **kargs):\n \"\"\"\n We need to select the correct one on dissection. We use the length for\n that, as 1 for client version would emply an empty list.\n \"\"\"\n tmp_len = struct.unpack(\"!H\", m[2:4])[0]\n if tmp_len == 1:\n cls = TLS_Ext_ServerCertType\n else:\n cls = TLS_Ext_ClientCertType\n return cls(m, *args, **kargs)\n\n\nclass TLS_Ext_SupportedGroups(TLS_Ext_Unknown):\n \"\"\"\n This extension was known as 'Supported Elliptic Curves' before TLS 1.3\n merged both group selection mechanisms for ECDH and FFDH.\n \"\"\"\n name = \"TLS Extension - Supported Groups\"\n fields_desc = [ShortEnumField(\"type\", 10, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"groupslen\", None, length_of=\"groups\"),\n FieldListField(\"groups\", [],\n ShortEnumField(\"ng\", None,\n _tls_named_groups),\n length_from=lambda pkt: pkt.groupslen)]\n\n\nclass TLS_Ext_SupportedEllipticCurves(TLS_Ext_SupportedGroups): # RFC 4492\n pass\n\n\n_tls_ecpoint_format = {0: \"uncompressed\",\n 1: \"ansiX962_compressed_prime\",\n 2: \"ansiX962_compressed_char2\"}\n\n\nclass TLS_Ext_SupportedPointFormat(TLS_Ext_Unknown): # RFC 4492\n name = \"TLS Extension - Supported Point Format\"\n fields_desc = [ShortEnumField(\"type\", 11, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"ecpllen\", None, fmt=\"B\", length_of=\"ecpl\"),\n FieldListField(\"ecpl\", [0],\n ByteEnumField(\"nc\", None,\n _tls_ecpoint_format),\n length_from=lambda pkt: pkt.ecpllen)]\n\n\nclass TLS_Ext_SignatureAlgorithms(TLS_Ext_Unknown): # RFC 5246\n name = \"TLS Extension - Signature Algorithms\"\n fields_desc = [ShortEnumField(\"type\", 13, _tls_ext),\n ShortField(\"len\", None),\n SigAndHashAlgsLenField(\"sig_algs_len\", None,\n length_of=\"sig_algs\"),\n SigAndHashAlgsField(\"sig_algs\", [],\n EnumField(\"hash_sig\", None,\n _tls_hash_sig),\n length_from=lambda pkt: pkt.sig_algs_len)] # noqa: E501\n\n\nclass TLS_Ext_Heartbeat(TLS_Ext_Unknown): # RFC 6520\n name = \"TLS Extension - Heartbeat\"\n fields_desc = [ShortEnumField(\"type\", 0x0f, _tls_ext),\n ShortField(\"len\", None),\n ByteEnumField(\"heartbeat_mode\", 2,\n {1: \"peer_allowed_to_send\",\n 2: \"peer_not_allowed_to_send\"})]\n\n\nclass ProtocolName(Packet):\n name = \"Protocol Name\"\n fields_desc = [FieldLenField(\"len\", None, fmt='B', length_of=\"protocol\"),\n StrLenField(\"protocol\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n def guess_payload_class(self, p):\n return Padding\n\n\nclass ProtocolListField(PacketListField):\n def i2repr(self, pkt, x):\n res = [p.protocol for p in x]\n return \"[%s]\" % b\", \".join(res)\n\n\nclass TLS_Ext_ALPN(TLS_Ext_PrettyPacketList): # RFC 7301\n name = \"TLS Extension - Application Layer Protocol Negotiation\"\n fields_desc = [ShortEnumField(\"type\", 0x10, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"protocolslen\", None, length_of=\"protocols\"),\n ProtocolListField(\"protocols\", [], ProtocolName,\n length_from=lambda pkt:pkt.protocolslen)]\n\n\nclass TLS_Ext_Padding(TLS_Ext_Unknown): # RFC 7685\n name = \"TLS Extension - Padding\"\n fields_desc = [ShortEnumField(\"type\", 0x15, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"padding\"),\n StrLenField(\"padding\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n\nclass TLS_Ext_EncryptThenMAC(TLS_Ext_Unknown): # RFC 7366\n name = \"TLS Extension - Encrypt-then-MAC\"\n fields_desc = [ShortEnumField(\"type\", 0x16, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_ExtendedMasterSecret(TLS_Ext_Unknown): # RFC 7627\n name = \"TLS Extension - Extended Master Secret\"\n fields_desc = [ShortEnumField(\"type\", 0x17, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_SessionTicket(TLS_Ext_Unknown): # RFC 5077\n \"\"\"\n RFC 5077 updates RFC 4507 according to most implementations, which do not\n use another (useless) 'ticketlen' field after the global 'len' field.\n \"\"\"\n name = \"TLS Extension - Session Ticket\"\n fields_desc = [ShortEnumField(\"type\", 0x23, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"ticket\"),\n StrLenField(\"ticket\", \"\",\n length_from=lambda pkt: pkt.len)]\n\n\nclass TLS_Ext_KeyShare(TLS_Ext_Unknown):\n name = \"TLS Extension - Key Share (dummy class)\"\n fields_desc = [ShortEnumField(\"type\", 0x28, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_PreSharedKey(TLS_Ext_Unknown):\n name = \"TLS Extension - Pre Shared Key (dummy class)\"\n fields_desc = [ShortEnumField(\"type\", 0x29, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_EarlyData(TLS_Ext_Unknown):\n name = \"TLS Extension - Early Data\"\n fields_desc = [ShortEnumField(\"type\", 0x2a, _tls_ext),\n ShortField(\"len\", None)]\n\n\nclass TLS_Ext_SupportedVersions(TLS_Ext_Unknown):\n name = \"TLS Extension - Supported Versions\"\n fields_desc = [ShortEnumField(\"type\", 0x2b, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"versionslen\", None, fmt='B',\n length_of=\"versions\"),\n FieldListField(\"versions\", [],\n ShortEnumField(\"version\", None,\n _tls_version),\n length_from=lambda pkt: pkt.versionslen)]\n\n\nclass TLS_Ext_Cookie(TLS_Ext_Unknown):\n name = \"TLS Extension - Cookie\"\n fields_desc = [ShortEnumField(\"type\", 0x2c, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"cookielen\", None, length_of=\"cookie\"),\n XStrLenField(\"cookie\", \"\",\n length_from=lambda pkt: pkt.cookielen)]\n\n\n_tls_psk_kx_modes = {0: \"psk_ke\", 1: \"psk_dhe_ke\"}\n\n\nclass TLS_Ext_PSKKeyExchangeModes(TLS_Ext_Unknown):\n name = \"TLS Extension - PSK Key Exchange Modes\"\n fields_desc = [ShortEnumField(\"type\", 0x2d, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"kxmodeslen\", None, fmt='B',\n length_of=\"kxmodes\"),\n FieldListField(\"kxmodes\", [],\n ByteEnumField(\"kxmode\", None,\n _tls_psk_kx_modes),\n length_from=lambda pkt: pkt.kxmodeslen)]\n\n\nclass TLS_Ext_TicketEarlyDataInfo(TLS_Ext_Unknown):\n name = \"TLS Extension - Ticket Early Data Info\"\n fields_desc = [ShortEnumField(\"type\", 0x2e, _tls_ext),\n ShortField(\"len\", None),\n IntField(\"max_early_data_size\", 0)]\n\n\nclass TLS_Ext_NPN(TLS_Ext_PrettyPacketList):\n \"\"\"\n Defined in RFC-draft-agl-tls-nextprotoneg-03. Deprecated in favour of ALPN.\n \"\"\"\n name = \"TLS Extension - Next Protocol Negotiation\"\n fields_desc = [ShortEnumField(\"type\", 0x3374, _tls_ext),\n FieldLenField(\"len\", None, length_of=\"protocols\"),\n ProtocolListField(\"protocols\", [], ProtocolName,\n length_from=lambda pkt:pkt.len)]\n\n\nclass TLS_Ext_RenegotiationInfo(TLS_Ext_Unknown): # RFC 5746\n name = \"TLS Extension - Renegotiation Indication\"\n fields_desc = [ShortEnumField(\"type\", 0xff01, _tls_ext),\n ShortField(\"len\", None),\n FieldLenField(\"reneg_conn_len\", None, fmt='B',\n length_of=\"renegotiated_connection\"),\n StrLenField(\"renegotiated_connection\", \"\",\n length_from=lambda pkt: pkt.reneg_conn_len)]\n\n\n_tls_ext_cls = {0: TLS_Ext_ServerName,\n 1: TLS_Ext_MaxFragLen,\n 2: TLS_Ext_ClientCertURL,\n 3: TLS_Ext_TrustedCAInd,\n 4: TLS_Ext_TruncatedHMAC,\n 5: TLS_Ext_CSR,\n 6: TLS_Ext_UserMapping,\n 7: TLS_Ext_ClientAuthz,\n 8: TLS_Ext_ServerAuthz,\n 9: _TLS_Ext_CertTypeDispatcher,\n # 10: TLS_Ext_SupportedEllipticCurves,\n 10: TLS_Ext_SupportedGroups,\n 11: TLS_Ext_SupportedPointFormat,\n 13: TLS_Ext_SignatureAlgorithms,\n 0x0f: TLS_Ext_Heartbeat,\n 0x10: TLS_Ext_ALPN,\n 0x15: TLS_Ext_Padding,\n 0x16: TLS_Ext_EncryptThenMAC,\n 0x17: TLS_Ext_ExtendedMasterSecret,\n 0x23: TLS_Ext_SessionTicket,\n 0x28: TLS_Ext_KeyShare,\n 0x29: TLS_Ext_PreSharedKey,\n 0x2a: TLS_Ext_EarlyData,\n 0x2b: TLS_Ext_SupportedVersions,\n 0x2c: TLS_Ext_Cookie,\n 0x2d: TLS_Ext_PSKKeyExchangeModes,\n 0x2e: TLS_Ext_TicketEarlyDataInfo,\n # 0x2f: TLS_Ext_CertificateAuthorities, #XXX\n # 0x30: TLS_Ext_OIDFilters, #XXX\n 0x3374: TLS_Ext_NPN,\n 0xff01: TLS_Ext_RenegotiationInfo\n }\n\n\nclass _ExtensionsLenField(FieldLenField):\n def getfield(self, pkt, s):\n \"\"\"\n We try to compute a length, usually from a msglen parsed earlier.\n If this length is 0, we consider 'selection_present' (from RFC 5246)\n to be False. This means that there should not be any length field.\n However, with TLS 1.3, zero lengths are always explicit.\n \"\"\"\n ext = pkt.get_field(self.length_of)\n tmp_len = ext.length_from(pkt)\n if tmp_len is None or tmp_len <= 0:\n v = pkt.tls_session.tls_version\n if v is None or v < 0x0304:\n return s, None\n return super(_ExtensionsLenField, self).getfield(pkt, s)\n\n def addfield(self, pkt, s, i):\n \"\"\"\n There is a hack with the _ExtensionsField.i2len. It works only because\n we expect _ExtensionsField.i2m to return a string of the same size (if\n not of the same value) upon successive calls (e.g. through i2len here,\n then i2m when directly building the _ExtensionsField).\n\n XXX A proper way to do this would be to keep the extensions built from\n the i2len call here, instead of rebuilding them later on.\n \"\"\"\n if i is None:\n if self.length_of is not None:\n fld, fval = pkt.getfield_and_val(self.length_of)\n\n tmp = pkt.tls_session.frozen\n pkt.tls_session.frozen = True\n f = fld.i2len(pkt, fval)\n pkt.tls_session.frozen = tmp\n\n i = self.adjust(pkt, f)\n if i == 0: # for correct build if no ext and not explicitly 0\n return s\n return s + struct.pack(self.fmt, i)\n\n\nclass _ExtensionsField(StrLenField):\n islist = 1\n holds_packets = 1\n\n def i2len(self, pkt, i):\n if i is None:\n return 0\n return len(self.i2m(pkt, i))\n\n def getfield(self, pkt, s):\n tmp_len = self.length_from(pkt)\n if tmp_len is None:\n return s, []\n return s[tmp_len:], self.m2i(pkt, s[:tmp_len])\n\n def i2m(self, pkt, i):\n if i is None:\n return b\"\"\n if isinstance(pkt, _GenericTLSSessionInheritance):\n if not pkt.tls_session.frozen:\n s = b\"\"\n for ext in i:\n if isinstance(ext, _GenericTLSSessionInheritance):\n ext.tls_session = pkt.tls_session\n s += ext.raw_stateful()\n else:\n s += raw(ext)\n return s\n return b\"\".join(map(raw, i))\n\n def m2i(self, pkt, m):\n res = []\n while len(m) > 4:\n t = struct.unpack(\"!H\", m[:2])[0]\n tmp_len = struct.unpack(\"!H\", m[2:4])[0]\n cls = _tls_ext_cls.get(t, TLS_Ext_Unknown)\n if cls is TLS_Ext_KeyShare:\n from scapy.layers.tls.keyexchange_tls13 import _tls_ext_keyshare_cls # noqa: E501\n cls = _tls_ext_keyshare_cls.get(pkt.msgtype, TLS_Ext_Unknown)\n elif cls is TLS_Ext_PreSharedKey:\n from scapy.layers.tls.keyexchange_tls13 import _tls_ext_presharedkey_cls # noqa: E501\n cls = _tls_ext_presharedkey_cls.get(pkt.msgtype, TLS_Ext_Unknown) # noqa: E501\n res.append(cls(m[:tmp_len + 4], tls_session=pkt.tls_session))\n m = m[tmp_len + 4:]\n return res\n",
"path": "scapy/layers/tls/extensions.py"
}
] | diff --git a/scapy/layers/tls/extensions.py b/scapy/layers/tls/extensions.py
index ffec02aa1ea..ea56a5e8bd0 100644
--- a/scapy/layers/tls/extensions.py
+++ b/scapy/layers/tls/extensions.py
@@ -684,7 +684,7 @@ def i2m(self, pkt, i):
def m2i(self, pkt, m):
res = []
- while m:
+ while len(m) > 4:
t = struct.unpack("!H", m[:2])[0]
tmp_len = struct.unpack("!H", m[2:4])[0]
cls = _tls_ext_cls.get(t, TLS_Ext_Unknown)
diff --git a/test/tls.uts b/test/tls.uts
index e9a8c98d11f..ca728bc2dad 100644
--- a/test/tls.uts
+++ b/test/tls.uts
@@ -1121,6 +1121,11 @@ def test_tls_without_cryptography(get_algs_from_ciphersuite_name_mock):
test_tls_without_cryptography()
+= Truncated TCP segment
+
+pkt = Ether(hex_bytes('00155dfb587a00155dfb58430800450005dc54d3400070065564400410d40a00000d01bb044e8b86744e16063ac45010faf06ba9000016030317c30200005503035cb336a067d53a5d2cedbdfec666ac740afbd0637ddd13eddeab768c3c63abee20981a0000d245f1c905b329323ad67127cd4b907a49f775c331d0794149aca7cdc02800000d0005000000170000ff010001000b000ec6000ec300090530820901308206e9a00302010202132000036e72aded906765595fae000000036e72300d06092a864886f70d01010b050030818b310b30090603550406130255533113'))
+assert TLSServerHello in pkt
+
###############################################################################
########################### TLS Misc tests ####################################
###############################################################################
|
pytorch__pytorch-4684 | Update documentation for Embedding layer
The documentation corresponding [torch.nn.Embedding](http://pytorch.org/docs/master/nn.html) mentions that ```Keep in mind that only a limited number of optimizers support sparse gradients: currently it’s optim.SGD (cuda and cpu), and optim.Adagrad (cpu)```. This is outdated and now `SparseAdam` is also supported.
Update documentation for Embedding layer
The documentation corresponding [torch.nn.Embedding](http://pytorch.org/docs/master/nn.html) mentions that ```Keep in mind that only a limited number of optimizers support sparse gradients: currently it’s optim.SGD (cuda and cpu), and optim.Adagrad (cpu)```. This is outdated and now `SparseAdam` is also supported.
| [
{
"content": "import torch\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\n\nfrom .module import Module\nfrom .. import functional as F\n\n\nclass Embedding(Module):\n r\"\"\"A simple lookup table that stores embeddings of a fixed dictionary and size.\n\n This module is often used to store word embeddings and retrieve them using indices.\n The input to the module is a list of indices, and the output is the corresponding\n word embeddings.\n\n Args:\n num_embeddings (int): size of the dictionary of embeddings\n embedding_dim (int): the size of each embedding vector\n padding_idx (int, optional): If given, pads the output with zeros whenever it encounters the index.\n max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this\n norm_type (float, optional): The p of the p-norm to compute for the max_norm option\n scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of\n the words in the mini-batch.\n sparse (boolean, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for\n more details regarding sparse gradients.\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)\n\n Shape:\n - Input: LongTensor `(N, W)`, N = mini-batch, W = number of indices to extract per mini-batch\n - Output: `(N, W, embedding_dim)`\n\n Notes:\n Keep in mind that only a limited number of optimizers support\n sparse gradients: currently it's `optim.SGD` (`cuda` and `cpu`),\n and `optim.Adagrad` (`cpu`)\n\n Examples::\n\n >>> # an Embedding module containing 10 tensors of size 3\n >>> embedding = nn.Embedding(10, 3)\n >>> # a batch of 2 samples of 4 indices each\n >>> input = Variable(torch.LongTensor([[1,2,4,5],[4,3,2,9]]))\n >>> embedding(input)\n\n Variable containing:\n (0 ,.,.) =\n -1.0822 1.2522 0.2434\n 0.8393 -0.6062 -0.3348\n 0.6597 0.0350 0.0837\n 0.5521 0.9447 0.0498\n\n (1 ,.,.) =\n 0.6597 0.0350 0.0837\n -0.1527 0.0877 0.4260\n 0.8393 -0.6062 -0.3348\n -0.8738 -0.9054 0.4281\n [torch.FloatTensor of size 2x4x3]\n\n >>> # example with padding_idx\n >>> embedding = nn.Embedding(10, 3, padding_idx=0)\n >>> input = Variable(torch.LongTensor([[0,2,0,5]]))\n >>> embedding(input)\n\n Variable containing:\n (0 ,.,.) =\n 0.0000 0.0000 0.0000\n 0.3452 0.4937 -0.9361\n 0.0000 0.0000 0.0000\n 0.0706 -2.1962 -0.6276\n [torch.FloatTensor of size 1x4x3]\n\n \"\"\"\n\n def __init__(self, num_embeddings, embedding_dim, padding_idx=None,\n max_norm=None, norm_type=2, scale_grad_by_freq=False,\n sparse=False):\n super(Embedding, self).__init__()\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n if padding_idx is not None:\n if padding_idx > 0:\n assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'\n elif padding_idx < 0:\n assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'\n padding_idx = self.num_embeddings + padding_idx\n self.padding_idx = padding_idx\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))\n self.sparse = sparse\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.weight.data.normal_(0, 1)\n if self.padding_idx is not None:\n self.weight.data[self.padding_idx].fill_(0)\n\n def forward(self, input):\n return F.embedding(\n input, self.weight, self.padding_idx, self.max_norm,\n self.norm_type, self.scale_grad_by_freq, self.sparse)\n\n def __repr__(self):\n s = '{name}({num_embeddings}, {embedding_dim}'\n if self.padding_idx is not None:\n s += ', padding_idx={padding_idx}'\n if self.max_norm is not None:\n s += ', max_norm={max_norm}'\n if self.norm_type != 2:\n s += ', norm_type={norm_type}'\n if self.scale_grad_by_freq is not False:\n s += ', scale_grad_by_freq={scale_grad_by_freq}'\n if self.sparse is not False:\n s += ', sparse=True'\n s += ')'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n\nclass EmbeddingBag(Module):\n r\"\"\"Computes sums or means of 'bags' of embeddings, without instantiating the\n intermediate embeddings.\n\n For bags of constant length,\n * nn.EmbeddingBag with `mode=sum` is equivalent to nn.Embedding followed by `torch.sum(dim=1)`\n * with `mode=mean` is equivalent to nn.Embedding followed by `torch.mean(dim=1)`\n\n However, nn.EmbeddingBag is much more time and memory efficient than using a chain of these\n operations.\n\n Args:\n num_embeddings (int): size of the dictionary of embeddings\n embedding_dim (int): the size of each embedding vector\n max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this\n norm_type (float, optional): The p of the p-norm to compute for the max_norm option\n scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of\n the words in the dictionary.\n mode (string, optional): 'sum' | 'mean'. Specifies the way to reduce the bag. Default: 'mean'\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)\n\n Inputs: input, offsets\n - **input** (N or BxN): LongTensor containing the indices of the embeddings\n to extract. When `input` is 1D Tensor of shape `N`,\n an `offsets` Tensor is given, that contains the\n starting position of each new sequence in the\n mini-batch.\n - **offsets** (B or None): LongTensor containing the starting positions of\n each sample in a mini-batch of variable length\n sequences. If `input` is 2D (BxN), then offsets\n does not need to be given, as the `input` is\n treated as a mini-batch of fixed length sequences\n of length `N` each.\n\n\n Shape:\n - Input: LongTensor `N`, N = number of embeddings to extract\n (or) LongTensor `BxN`, B = number of sequences in mini-batch,\n N = number of embeddings per sequence\n - Offsets: LongTensor `B`, B = number of bags. The values are the\n offsets in `input` for each bag, i.e. the cumsum of lengths.\n Offsets is not given if Input is 2D `BxN` Tensor,\n the input is considered to be of fixed-length sequences\n - Output: `(B, embedding_dim)`\n\n Examples::\n\n >>> # an Embedding module containing 10 tensors of size 3\n >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')\n >>> # a batch of 2 samples of 4 indices each\n >>> input = Variable(torch.LongTensor([1,2,4,5,4,3,2,9]))\n >>> offsets = Variable(torch.LongTensor([0,4]))\n >>> embedding_sum(input, offsets)\n\n Variable containing:\n -0.7296 -4.6926 0.3295\n -0.5186 -0.5631 -0.2792\n [torch.FloatTensor of size 2x3]\n\n \"\"\"\n\n def __init__(self, num_embeddings, embedding_dim,\n max_norm=None, norm_type=2, scale_grad_by_freq=False,\n mode='mean'):\n super(EmbeddingBag, self).__init__()\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))\n self.mode = mode\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.weight.data.normal_(0, 1)\n\n def forward(self, input, offsets=None):\n return F.embedding_bag(self.weight, input, offsets,\n self.max_norm, self.norm_type,\n self.scale_grad_by_freq, self.mode)\n\n def __repr__(self):\n s = '{name}({num_embeddings}, {embedding_dim}'\n if self.max_norm is not None:\n s += ', max_norm={max_norm}'\n if self.norm_type != 2:\n s += ', norm_type={norm_type}'\n if self.scale_grad_by_freq is not False:\n s += ', scale_grad_by_freq={scale_grad_by_freq}'\n s += ', mode={mode}'\n s += ')'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n# TODO: SparseLinear\n",
"path": "torch/nn/modules/sparse.py"
}
] | [
{
"content": "import torch\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\n\nfrom .module import Module\nfrom .. import functional as F\n\n\nclass Embedding(Module):\n r\"\"\"A simple lookup table that stores embeddings of a fixed dictionary and size.\n\n This module is often used to store word embeddings and retrieve them using indices.\n The input to the module is a list of indices, and the output is the corresponding\n word embeddings.\n\n Args:\n num_embeddings (int): size of the dictionary of embeddings\n embedding_dim (int): the size of each embedding vector\n padding_idx (int, optional): If given, pads the output with zeros whenever it encounters the index.\n max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this\n norm_type (float, optional): The p of the p-norm to compute for the max_norm option\n scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of\n the words in the mini-batch.\n sparse (boolean, optional): if ``True``, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for\n more details regarding sparse gradients.\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)\n\n Shape:\n - Input: LongTensor `(N, W)`, N = mini-batch, W = number of indices to extract per mini-batch\n - Output: `(N, W, embedding_dim)`\n\n Notes:\n Keep in mind that only a limited number of optimizers support\n sparse gradients: currently it's `optim.SGD` (`cuda` and `cpu`),\n `optim.SparseAdam` (`cuda` and `cpu`) and `optim.Adagrad` (`cpu`)\n\n Examples::\n\n >>> # an Embedding module containing 10 tensors of size 3\n >>> embedding = nn.Embedding(10, 3)\n >>> # a batch of 2 samples of 4 indices each\n >>> input = Variable(torch.LongTensor([[1,2,4,5],[4,3,2,9]]))\n >>> embedding(input)\n\n Variable containing:\n (0 ,.,.) =\n -1.0822 1.2522 0.2434\n 0.8393 -0.6062 -0.3348\n 0.6597 0.0350 0.0837\n 0.5521 0.9447 0.0498\n\n (1 ,.,.) =\n 0.6597 0.0350 0.0837\n -0.1527 0.0877 0.4260\n 0.8393 -0.6062 -0.3348\n -0.8738 -0.9054 0.4281\n [torch.FloatTensor of size 2x4x3]\n\n >>> # example with padding_idx\n >>> embedding = nn.Embedding(10, 3, padding_idx=0)\n >>> input = Variable(torch.LongTensor([[0,2,0,5]]))\n >>> embedding(input)\n\n Variable containing:\n (0 ,.,.) =\n 0.0000 0.0000 0.0000\n 0.3452 0.4937 -0.9361\n 0.0000 0.0000 0.0000\n 0.0706 -2.1962 -0.6276\n [torch.FloatTensor of size 1x4x3]\n\n \"\"\"\n\n def __init__(self, num_embeddings, embedding_dim, padding_idx=None,\n max_norm=None, norm_type=2, scale_grad_by_freq=False,\n sparse=False):\n super(Embedding, self).__init__()\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n if padding_idx is not None:\n if padding_idx > 0:\n assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'\n elif padding_idx < 0:\n assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'\n padding_idx = self.num_embeddings + padding_idx\n self.padding_idx = padding_idx\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))\n self.sparse = sparse\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.weight.data.normal_(0, 1)\n if self.padding_idx is not None:\n self.weight.data[self.padding_idx].fill_(0)\n\n def forward(self, input):\n return F.embedding(\n input, self.weight, self.padding_idx, self.max_norm,\n self.norm_type, self.scale_grad_by_freq, self.sparse)\n\n def __repr__(self):\n s = '{name}({num_embeddings}, {embedding_dim}'\n if self.padding_idx is not None:\n s += ', padding_idx={padding_idx}'\n if self.max_norm is not None:\n s += ', max_norm={max_norm}'\n if self.norm_type != 2:\n s += ', norm_type={norm_type}'\n if self.scale_grad_by_freq is not False:\n s += ', scale_grad_by_freq={scale_grad_by_freq}'\n if self.sparse is not False:\n s += ', sparse=True'\n s += ')'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n\nclass EmbeddingBag(Module):\n r\"\"\"Computes sums or means of 'bags' of embeddings, without instantiating the\n intermediate embeddings.\n\n For bags of constant length,\n * nn.EmbeddingBag with `mode=sum` is equivalent to nn.Embedding followed by `torch.sum(dim=1)`\n * with `mode=mean` is equivalent to nn.Embedding followed by `torch.mean(dim=1)`\n\n However, nn.EmbeddingBag is much more time and memory efficient than using a chain of these\n operations.\n\n Args:\n num_embeddings (int): size of the dictionary of embeddings\n embedding_dim (int): the size of each embedding vector\n max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this\n norm_type (float, optional): The p of the p-norm to compute for the max_norm option\n scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of\n the words in the dictionary.\n mode (string, optional): 'sum' | 'mean'. Specifies the way to reduce the bag. Default: 'mean'\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)\n\n Inputs: input, offsets\n - **input** (N or BxN): LongTensor containing the indices of the embeddings\n to extract. When `input` is 1D Tensor of shape `N`,\n an `offsets` Tensor is given, that contains the\n starting position of each new sequence in the\n mini-batch.\n - **offsets** (B or None): LongTensor containing the starting positions of\n each sample in a mini-batch of variable length\n sequences. If `input` is 2D (BxN), then offsets\n does not need to be given, as the `input` is\n treated as a mini-batch of fixed length sequences\n of length `N` each.\n\n\n Shape:\n - Input: LongTensor `N`, N = number of embeddings to extract\n (or) LongTensor `BxN`, B = number of sequences in mini-batch,\n N = number of embeddings per sequence\n - Offsets: LongTensor `B`, B = number of bags. The values are the\n offsets in `input` for each bag, i.e. the cumsum of lengths.\n Offsets is not given if Input is 2D `BxN` Tensor,\n the input is considered to be of fixed-length sequences\n - Output: `(B, embedding_dim)`\n\n Examples::\n\n >>> # an Embedding module containing 10 tensors of size 3\n >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')\n >>> # a batch of 2 samples of 4 indices each\n >>> input = Variable(torch.LongTensor([1,2,4,5,4,3,2,9]))\n >>> offsets = Variable(torch.LongTensor([0,4]))\n >>> embedding_sum(input, offsets)\n\n Variable containing:\n -0.7296 -4.6926 0.3295\n -0.5186 -0.5631 -0.2792\n [torch.FloatTensor of size 2x3]\n\n \"\"\"\n\n def __init__(self, num_embeddings, embedding_dim,\n max_norm=None, norm_type=2, scale_grad_by_freq=False,\n mode='mean'):\n super(EmbeddingBag, self).__init__()\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))\n self.mode = mode\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.weight.data.normal_(0, 1)\n\n def forward(self, input, offsets=None):\n return F.embedding_bag(self.weight, input, offsets,\n self.max_norm, self.norm_type,\n self.scale_grad_by_freq, self.mode)\n\n def __repr__(self):\n s = '{name}({num_embeddings}, {embedding_dim}'\n if self.max_norm is not None:\n s += ', max_norm={max_norm}'\n if self.norm_type != 2:\n s += ', norm_type={norm_type}'\n if self.scale_grad_by_freq is not False:\n s += ', scale_grad_by_freq={scale_grad_by_freq}'\n s += ', mode={mode}'\n s += ')'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n# TODO: SparseLinear\n",
"path": "torch/nn/modules/sparse.py"
}
] | diff --git a/torch/nn/modules/sparse.py b/torch/nn/modules/sparse.py
index 80ee92abb7396..eaaef86452bb1 100644
--- a/torch/nn/modules/sparse.py
+++ b/torch/nn/modules/sparse.py
@@ -34,7 +34,7 @@ class Embedding(Module):
Notes:
Keep in mind that only a limited number of optimizers support
sparse gradients: currently it's `optim.SGD` (`cuda` and `cpu`),
- and `optim.Adagrad` (`cpu`)
+ `optim.SparseAdam` (`cuda` and `cpu`) and `optim.Adagrad` (`cpu`)
Examples::
|
conan-io__conan-center-index-1534 | [conan.io/center] parallel-hashmap/1.31 merged but not found in conan center
Even though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search`
| [
{
"content": "import os\n\nfrom conans import ConanFile, tools\n\nclass ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"parallel-hashmap\", \"parallel\", \"hashmap\", \"btree\")\n homepage = \"https://github.com/greg7mdp/parallel-hashmap\"\n url = \"https://github.com/conan-io/conan-center-index\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\",\n dst=os.path.join(\"include\", \"parallel_hashmap\"),\n src=os.path.join(self._source_subfolder, \"parallel_hashmap\"))\n self.copy(\"phmap.natvis\", dst=\"res\", src=self._source_subfolder)\n\n def package_id(self):\n self.info.header_only()\n",
"path": "recipes/parallel-hashmap/all/conanfile.py"
}
] | [
{
"content": "import os\nfrom conans import ConanFile, tools\n\n\nclass ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"parallel-hashmap\", \"parallel\", \"hashmap\", \"btree\")\n homepage = \"https://github.com/greg7mdp/parallel-hashmap\"\n url = \"https://github.com/conan-io/conan-center-index\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\",\n dst=os.path.join(\"include\", \"parallel_hashmap\"),\n src=os.path.join(self._source_subfolder, \"parallel_hashmap\"))\n self.copy(\"phmap.natvis\", dst=\"res\", src=self._source_subfolder)\n\n def package_id(self):\n self.info.header_only()\n",
"path": "recipes/parallel-hashmap/all/conanfile.py"
}
] | diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py
index a14b1fc4e12f0..18d79e65113de 100644
--- a/recipes/parallel-hashmap/all/conanfile.py
+++ b/recipes/parallel-hashmap/all/conanfile.py
@@ -1,7 +1,7 @@
import os
-
from conans import ConanFile, tools
+
class ParallelHashmapConan(ConanFile):
name = "parallel-hashmap"
description = "A family of header-only, very fast and memory-friendly hashmap and btree containers."
diff --git a/recipes/parallel-hashmap/all/test_package/test_package.cpp b/recipes/parallel-hashmap/all/test_package/test_package.cpp
index 853610a46cc3d..db1405a69dfcd 100644
--- a/recipes/parallel-hashmap/all/test_package/test_package.cpp
+++ b/recipes/parallel-hashmap/all/test_package/test_package.cpp
@@ -6,23 +6,23 @@
using phmap::flat_hash_map;
int main() {
- // Create an unordered_map of three strings (that map to strings)
- flat_hash_map<std::string, std::string> email = {
- {"tom", "[email protected]"},
- {"jeff", "[email protected]"},
- {"jim", "[email protected]"}
- };
+ // Create an unordered_map of three strings (that map to strings)
+ flat_hash_map<std::string, std::string> email = {
+ {"tom", "[email protected]"},
+ {"jeff", "[email protected]"},
+ {"jim", "[email protected]"}
+ };
- // Iterate and print keys and values
- for (const auto &n : email) {
- std::cout << n.first << "'s email is: " << n.second << "\n";
- }
+ // Iterate and print keys and values
+ for (const auto &n : email) {
+ std::cout << n.first << "'s email is: " << n.second << "\n";
+ }
- // Add a new entry
- email["bill"] = "[email protected]";
+ // Add a new entry
+ email["bill"] = "[email protected]";
- // and print it
- std::cout << "bill's email is: " << email["bill"] << "\n";
+ // and print it
+ std::cout << "bill's email is: " << email["bill"] << "\n";
- return 0;
+ return 0;
}
|
certbot__certbot-2004 | don't add includeSubDomains for Strict-Transport-Security header
Hi,
I think the letsencrypt client shouldn’t add `includeSubDomains` to the Strict-Transport-Security header by default.
If you let the letsencrypt client modify the configuration for a webserver which hosts example.com, this would make it only possible to reach subdomain hosts (foo.example.com) with https. If you forgot only one webserver which serves a subdomain by http only, you can't reach this one any more.
And as browsers actually don't have an option to clear the HSTS cache, It won't help you to remove the includeSubDomains later from the webserver config.
includeSubDomains is something, that an admin should only add if he truly knows, that all subdomains are reachable by https. So I think this should never be added by an automatic tool, like the letsencrypt client.
RFC 6797 section 11.1 [1] also describes some implications of includeSubDomains
Chris
[1] https://tools.ietf.org/html/rfc6797#section-11.4
| [
{
"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\n\n\nCLI_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n ctl=\"apache2ctl\",\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n)\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nMOD_SSL_CONF_SRC = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"options-ssl-apache.conf\")\n\"\"\"Path to the Apache mod_ssl config file found in the Let's Encrypt\ndistribution.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to https vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000; includeSubDomains\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n",
"path": "letsencrypt-apache/letsencrypt_apache/constants.py"
}
] | [
{
"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\n\n\nCLI_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n ctl=\"apache2ctl\",\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n)\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nMOD_SSL_CONF_SRC = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"options-ssl-apache.conf\")\n\"\"\"Path to the Apache mod_ssl config file found in the Let's Encrypt\ndistribution.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to https vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n",
"path": "letsencrypt-apache/letsencrypt_apache/constants.py"
}
] | diff --git a/letsencrypt-apache/letsencrypt_apache/constants.py b/letsencrypt-apache/letsencrypt_apache/constants.py
index eb004b97558..4944ded1f14 100644
--- a/letsencrypt-apache/letsencrypt_apache/constants.py
+++ b/letsencrypt-apache/letsencrypt_apache/constants.py
@@ -33,7 +33,7 @@
https vhost"""
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
- "\"max-age=31536000; includeSubDomains\""]
+ "\"max-age=31536000\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
|
Parsl__parsl-414 | File creation fails if no executor has been loaded
```python
In [53]: file = File('first.txt')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-53-b5dc63ca042a> in <module>()
----> 1 file = File('first.txt')
~/ci/parsl/parsl/data_provider/files.py in __init__(self, url, dman, cache, caching_dir)
40 self.path = parsed_url.path
41 self.filename = os.path.basename(self.path)
---> 42 self.dman = dman if dman else DataManager.get_data_manager()
43 self.data_future = {}
44 if self.scheme == 'globus':
~/ci/parsl/parsl/data_provider/data_manager.py in get_data_manager(cls, max_threads, executors)
24 def get_data_manager(cls, max_threads=None, executors=None):
25 if cls.default_data_manager is None:
---> 26 cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)
27 return cls.default_data_manager
28
~/ci/parsl/parsl/data_provider/data_manager.py in __init__(self, max_threads, executors)
37
38 self.label = 'data_manager'
---> 39 self.executors = {e.label: e for e in executors}
40 self.max_threads = max_threads
41 self.files = []
TypeError: 'NoneType' object is not iterable
```
| [
{
"content": "import os\nimport logging\nimport requests\nimport ftplib\nimport concurrent.futures as cf\nfrom parsl.data_provider.scheme import GlobusScheme\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.data_provider.globus import get_globus\nfrom parsl.app.app import App\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataManager(ParslExecutor):\n \"\"\"The DataManager is responsible for transferring input and output data.\n\n It uses the Executor interface, where staging tasks are submitted\n to it, and DataFutures are returned.\n \"\"\"\n\n default_data_manager = None\n\n @classmethod\n def get_data_manager(cls, max_threads=None, executors=None):\n if cls.default_data_manager is None:\n cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)\n return cls.default_data_manager\n\n def __init__(self, max_threads=10, executors=None):\n \"\"\"Initialize the DataManager.\n\n Kwargs:\n - max_threads (int): Number of threads. Default is 10.\n - executors (list of Executors): Executors for which data transfer will be managed.\n \"\"\"\n self._scaling_enabled = False\n\n self.label = 'data_manager'\n self.executors = {e.label: e for e in executors}\n self.max_threads = max_threads\n self.files = []\n self.globus = None\n self.managed = True\n\n def start(self):\n self.executor = cf.ThreadPoolExecutor(max_workers=self.max_threads)\n\n def submit(self, *args, **kwargs):\n \"\"\"Submit a staging app. All optimization should be here.\"\"\"\n return self.executor.submit(*args, **kwargs)\n\n def scale_in(self, blocks, *args, **kwargs):\n pass\n\n def scale_out(self, *args, **kwargs):\n pass\n\n def shutdown(self, block=False):\n \"\"\"Shutdown the ThreadPool.\n\n Kwargs:\n - block (bool): To block for confirmations or not\n\n \"\"\"\n x = self.executor.shutdown(wait=block)\n logger.debug(\"Done with executor shutdown\")\n return x\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def add_file(self, file):\n if file.scheme == 'globus':\n if not self.globus:\n self.globus = get_globus()\n # keep a list of all remote files for optimization purposes (TODO)\n self.files.append(file)\n self._set_local_path(file)\n\n def _set_local_path(self, file):\n globus_ep = self._get_globus_endpoint()\n file.local_path = os.path.join(globus_ep['working_dir'], file.filename)\n\n def _get_globus_endpoint(self, executor_label=None):\n for executor in self.executors.values():\n if executor_label is None or executor.label == executor_label:\n for scheme in executor.storage_access:\n if isinstance(scheme, GlobusScheme):\n working_dir = os.path.normpath(executor.working_dir)\n if scheme.endpoint_path and scheme.local_path:\n endpoint_path = os.path.normpath(scheme.endpoint_path)\n local_path = os.path.normpath(scheme.local_path)\n common_path = os.path.commonpath((local_path, working_dir))\n if local_path != common_path:\n raise Exception('\"local_path\" must be equal or an absolute subpath of \"working_dir\"')\n relative_path = os.path.relpath(working_dir, common_path)\n endpoint_path = os.path.join(endpoint_path, relative_path)\n else:\n endpoint_path = working_dir\n return {'endpoint_uuid': scheme.endpoint_uuid,\n 'endpoint_path': endpoint_path,\n 'working_dir': working_dir}\n raise Exception('No executor with a Globus endpoint and working_dir defined')\n\n def stage_in(self, file, executor):\n \"\"\"Transport the file from the input source to the executor.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) : file to stage in\n - executor (str) : an executor the file is going to be staged in to.\n If the executor argument is not specified for a file\n with 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if file.scheme == 'file':\n stage_in_app = self._file_stage_in_app()\n app_fut = stage_in_app(outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'ftp':\n working_dir = self.executors[executor].working_dir\n stage_in_app = self._ftp_stage_in_app(executor=executor)\n app_fut = stage_in_app(working_dir, outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'http' or file.scheme == 'https':\n working_dir = self.executors[executor].working_dir\n stage_in_app = self._http_stage_in_app(executor=executor)\n app_fut = stage_in_app(working_dir, outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'globus':\n globus_ep = self._get_globus_endpoint(executor)\n stage_in_app = self._globus_stage_in_app()\n app_fut = stage_in_app(globus_ep, outputs=[file])\n return app_fut._outputs[0]\n else:\n raise Exception('Staging in with unknown file scheme {} is not supported'.format(file.scheme))\n\n def _file_stage_in_app(self):\n return App(\"python\", executors=['data_manager'])(self._file_stage_in)\n\n def _file_stage_in(self, outputs=[]):\n pass\n\n def _ftp_stage_in_app(self, executor):\n return App(\"python\", executors=[executor])(self._ftp_stage_in)\n\n def _ftp_stage_in(self, working_dir, outputs=[]):\n file = outputs[0]\n if working_dir:\n os.makedirs(working_dir, exist_ok=True)\n file.local_path = os.path.join(working_dir, file.filename)\n else:\n file.local_path = file.filename\n with open(file.local_path, 'wb') as f:\n ftp = ftplib.FTP(file.netloc)\n ftp.login()\n ftp.cwd(os.path.dirname(file.path))\n ftp.retrbinary('RETR {}'.format(file.filename), f.write)\n ftp.quit()\n\n def _http_stage_in_app(self, executor):\n return App(\"python\", executors=[executor])(self._http_stage_in)\n\n def _http_stage_in(self, working_dir, outputs=[]):\n file = outputs[0]\n if working_dir:\n os.makedirs(working_dir, exist_ok=True)\n file.local_path = os.path.join(working_dir, file.filename)\n else:\n file.local_path = file.filename\n resp = requests.get(file.url, stream=True)\n with open(file.local_path, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n def _globus_stage_in_app(self):\n return App(\"python\", executors=['data_manager'])(self._globus_stage_in)\n\n def _globus_stage_in(self, globus_ep, outputs=[]):\n file = outputs[0]\n file.local_path = os.path.join(\n globus_ep['working_dir'], file.filename)\n dst_path = os.path.join(\n globus_ep['endpoint_path'], file.filename)\n self.globus.transfer_file(\n file.netloc, globus_ep['endpoint_uuid'],\n file.path, dst_path)\n\n def stage_out(self, file, executor):\n \"\"\"Transport the file from the local filesystem to the remote Globus endpoint.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) - file to stage out\n - executor (str) - Which executor the file is going to be staged out from.\n If the executor argument is not specified for a file\n with the 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if file.scheme == 'file':\n stage_out_app = self._file_stage_out_app()\n return stage_out_app()\n elif file.scheme == 'http' or file.scheme == 'https':\n raise Exception('HTTP/HTTPS file staging out is not supported')\n elif file.scheme == 'ftp':\n raise Exception('FTP file staging out is not supported')\n elif file.scheme == 'globus':\n globus_ep = self._get_globus_endpoint(executor)\n stage_out_app = self._globus_stage_out_app()\n return stage_out_app(globus_ep, inputs=[file])\n else:\n raise Exception('Staging out with unknown file scheme {} is not supported'.format(file.scheme))\n\n def _file_stage_out_app(self):\n return App(\"python\", executors=['data_manager'])(self._file_stage_out)\n\n def _file_stage_out(self):\n pass\n\n def _globus_stage_out_app(self):\n return App(\"python\", executors=['data_manager'])(self._globus_stage_out)\n\n def _globus_stage_out(self, globus_ep, inputs=[]):\n file = inputs[0]\n src_path = os.path.join(globus_ep['endpoint_path'], file.filename)\n self.globus.transfer_file(\n globus_ep['endpoint_uuid'], file.netloc,\n src_path, file.path\n )\n",
"path": "parsl/data_provider/data_manager.py"
}
] | [
{
"content": "import os\nimport logging\nimport requests\nimport ftplib\nimport concurrent.futures as cf\nfrom parsl.data_provider.scheme import GlobusScheme\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.data_provider.globus import get_globus\nfrom parsl.app.app import App\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataManager(ParslExecutor):\n \"\"\"The DataManager is responsible for transferring input and output data.\n\n It uses the Executor interface, where staging tasks are submitted\n to it, and DataFutures are returned.\n \"\"\"\n\n default_data_manager = None\n\n @classmethod\n def get_data_manager(cls, max_threads=None, executors=None):\n if cls.default_data_manager is None:\n cls.default_data_manager = DataManager(max_threads=max_threads, executors=executors)\n return cls.default_data_manager\n\n def __init__(self, max_threads=10, executors=None):\n \"\"\"Initialize the DataManager.\n\n Kwargs:\n - max_threads (int): Number of threads. Default is 10.\n - executors (list of Executors): Executors for which data transfer will be managed.\n \"\"\"\n self._scaling_enabled = False\n\n self.label = 'data_manager'\n if executors is None:\n executors = []\n self.executors = {e.label: e for e in executors}\n self.max_threads = max_threads\n self.files = []\n self.globus = None\n self.managed = True\n\n def start(self):\n self.executor = cf.ThreadPoolExecutor(max_workers=self.max_threads)\n\n def submit(self, *args, **kwargs):\n \"\"\"Submit a staging app. All optimization should be here.\"\"\"\n return self.executor.submit(*args, **kwargs)\n\n def scale_in(self, blocks, *args, **kwargs):\n pass\n\n def scale_out(self, *args, **kwargs):\n pass\n\n def shutdown(self, block=False):\n \"\"\"Shutdown the ThreadPool.\n\n Kwargs:\n - block (bool): To block for confirmations or not\n\n \"\"\"\n x = self.executor.shutdown(wait=block)\n logger.debug(\"Done with executor shutdown\")\n return x\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def add_file(self, file):\n if file.scheme == 'globus':\n if not self.globus:\n self.globus = get_globus()\n # keep a list of all remote files for optimization purposes (TODO)\n self.files.append(file)\n self._set_local_path(file)\n\n def _set_local_path(self, file):\n globus_ep = self._get_globus_endpoint()\n file.local_path = os.path.join(globus_ep['working_dir'], file.filename)\n\n def _get_globus_endpoint(self, executor_label=None):\n for executor in self.executors.values():\n if executor_label is None or executor.label == executor_label:\n for scheme in executor.storage_access:\n if isinstance(scheme, GlobusScheme):\n working_dir = os.path.normpath(executor.working_dir)\n if scheme.endpoint_path and scheme.local_path:\n endpoint_path = os.path.normpath(scheme.endpoint_path)\n local_path = os.path.normpath(scheme.local_path)\n common_path = os.path.commonpath((local_path, working_dir))\n if local_path != common_path:\n raise Exception('\"local_path\" must be equal or an absolute subpath of \"working_dir\"')\n relative_path = os.path.relpath(working_dir, common_path)\n endpoint_path = os.path.join(endpoint_path, relative_path)\n else:\n endpoint_path = working_dir\n return {'endpoint_uuid': scheme.endpoint_uuid,\n 'endpoint_path': endpoint_path,\n 'working_dir': working_dir}\n raise Exception('No executor with a Globus endpoint and working_dir defined')\n\n def stage_in(self, file, executor):\n \"\"\"Transport the file from the input source to the executor.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) : file to stage in\n - executor (str) : an executor the file is going to be staged in to.\n If the executor argument is not specified for a file\n with 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if file.scheme == 'file':\n stage_in_app = self._file_stage_in_app()\n app_fut = stage_in_app(outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'ftp':\n working_dir = self.executors[executor].working_dir\n stage_in_app = self._ftp_stage_in_app(executor=executor)\n app_fut = stage_in_app(working_dir, outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'http' or file.scheme == 'https':\n working_dir = self.executors[executor].working_dir\n stage_in_app = self._http_stage_in_app(executor=executor)\n app_fut = stage_in_app(working_dir, outputs=[file])\n return app_fut._outputs[0]\n elif file.scheme == 'globus':\n globus_ep = self._get_globus_endpoint(executor)\n stage_in_app = self._globus_stage_in_app()\n app_fut = stage_in_app(globus_ep, outputs=[file])\n return app_fut._outputs[0]\n else:\n raise Exception('Staging in with unknown file scheme {} is not supported'.format(file.scheme))\n\n def _file_stage_in_app(self):\n return App(\"python\", executors=['data_manager'])(self._file_stage_in)\n\n def _file_stage_in(self, outputs=[]):\n pass\n\n def _ftp_stage_in_app(self, executor):\n return App(\"python\", executors=[executor])(self._ftp_stage_in)\n\n def _ftp_stage_in(self, working_dir, outputs=[]):\n file = outputs[0]\n if working_dir:\n os.makedirs(working_dir, exist_ok=True)\n file.local_path = os.path.join(working_dir, file.filename)\n else:\n file.local_path = file.filename\n with open(file.local_path, 'wb') as f:\n ftp = ftplib.FTP(file.netloc)\n ftp.login()\n ftp.cwd(os.path.dirname(file.path))\n ftp.retrbinary('RETR {}'.format(file.filename), f.write)\n ftp.quit()\n\n def _http_stage_in_app(self, executor):\n return App(\"python\", executors=[executor])(self._http_stage_in)\n\n def _http_stage_in(self, working_dir, outputs=[]):\n file = outputs[0]\n if working_dir:\n os.makedirs(working_dir, exist_ok=True)\n file.local_path = os.path.join(working_dir, file.filename)\n else:\n file.local_path = file.filename\n resp = requests.get(file.url, stream=True)\n with open(file.local_path, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n def _globus_stage_in_app(self):\n return App(\"python\", executors=['data_manager'])(self._globus_stage_in)\n\n def _globus_stage_in(self, globus_ep, outputs=[]):\n file = outputs[0]\n file.local_path = os.path.join(\n globus_ep['working_dir'], file.filename)\n dst_path = os.path.join(\n globus_ep['endpoint_path'], file.filename)\n self.globus.transfer_file(\n file.netloc, globus_ep['endpoint_uuid'],\n file.path, dst_path)\n\n def stage_out(self, file, executor):\n \"\"\"Transport the file from the local filesystem to the remote Globus endpoint.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) - file to stage out\n - executor (str) - Which executor the file is going to be staged out from.\n If the executor argument is not specified for a file\n with the 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.\n \"\"\"\n\n if file.scheme == 'file':\n stage_out_app = self._file_stage_out_app()\n return stage_out_app()\n elif file.scheme == 'http' or file.scheme == 'https':\n raise Exception('HTTP/HTTPS file staging out is not supported')\n elif file.scheme == 'ftp':\n raise Exception('FTP file staging out is not supported')\n elif file.scheme == 'globus':\n globus_ep = self._get_globus_endpoint(executor)\n stage_out_app = self._globus_stage_out_app()\n return stage_out_app(globus_ep, inputs=[file])\n else:\n raise Exception('Staging out with unknown file scheme {} is not supported'.format(file.scheme))\n\n def _file_stage_out_app(self):\n return App(\"python\", executors=['data_manager'])(self._file_stage_out)\n\n def _file_stage_out(self):\n pass\n\n def _globus_stage_out_app(self):\n return App(\"python\", executors=['data_manager'])(self._globus_stage_out)\n\n def _globus_stage_out(self, globus_ep, inputs=[]):\n file = inputs[0]\n src_path = os.path.join(globus_ep['endpoint_path'], file.filename)\n self.globus.transfer_file(\n globus_ep['endpoint_uuid'], file.netloc,\n src_path, file.path\n )\n",
"path": "parsl/data_provider/data_manager.py"
}
] | diff --git a/parsl/data_provider/data_manager.py b/parsl/data_provider/data_manager.py
index eb7a3df793..f64f7abade 100644
--- a/parsl/data_provider/data_manager.py
+++ b/parsl/data_provider/data_manager.py
@@ -36,6 +36,8 @@ def __init__(self, max_threads=10, executors=None):
self._scaling_enabled = False
self.label = 'data_manager'
+ if executors is None:
+ executors = []
self.executors = {e.label: e for e in executors}
self.max_threads = max_threads
self.files = []
|
cisagov__manage.get.gov-1398 | Improper handling of IP address with blank spaces on DNS nameserver form
### Current Behavior
User enters "1.1. 2.2" into the IP address field of a nameserver form in domain management
1. IP address is NOT stripping the blanks
2. "1.1. 2.2" is submitted as the IP address value on form
3. Throws the error, "Unable to update domain, changes were not applied. Check logs as a Registry Error is the likely cause"
### Expected Behavior
Enter '1.1. 2.2' into the IP address field of a nameserver form
1. Blank spaces are stripped from the user entered IP address (code handles scenario)
2. 1.1.2.2 is submitted as the IP address value on form
### Steps to Reproduce
1. Enter an IP address with spaces (e.g., "1.1. 2.2.")
### Environment
_No response_
### Additional Context
Acceptance criteria:
- fix bug
- create unit tests to check for scenario (to prevent IP address form fields with spaces from causing EPP errors)
### Issue Links
_No response_
| [
{
"content": "\"\"\"Forms for domain management.\"\"\"\n\nfrom django import forms\nfrom django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator\nfrom django.forms import formset_factory\n\nfrom phonenumber_field.widgets import RegionalPhoneNumberWidget\nfrom registrar.utility.errors import (\n NameserverError,\n NameserverErrorCodes as nsErrorCodes,\n DsDataError,\n DsDataErrorCodes,\n SecurityEmailError,\n SecurityEmailErrorCodes,\n)\n\nfrom ..models import Contact, DomainInformation, Domain\nfrom .common import (\n ALGORITHM_CHOICES,\n DIGEST_TYPE_CHOICES,\n)\n\nimport re\n\n\nclass DomainAddUserForm(forms.Form):\n \"\"\"Form for adding a user to a domain.\"\"\"\n\n email = forms.EmailField(label=\"Email\")\n\n\nclass DomainNameserverForm(forms.Form):\n \"\"\"Form for changing nameservers.\"\"\"\n\n domain = forms.CharField(widget=forms.HiddenInput, required=False)\n\n server = forms.CharField(label=\"Name server\", strip=True)\n\n ip = forms.CharField(\n label=\"IP address (IPv4 or IPv6)\",\n strip=True,\n required=False,\n )\n\n def __init__(self, *args, **kwargs):\n super(DomainNameserverForm, self).__init__(*args, **kwargs)\n\n # add custom error messages\n self.fields[\"server\"].error_messages.update(\n {\n \"required\": \"A minimum of 2 name servers are required.\",\n }\n )\n\n def clean(self):\n # clean is called from clean_forms, which is called from is_valid\n # after clean_fields. it is used to determine form level errors.\n # is_valid is typically called from view during a post\n cleaned_data = super().clean()\n self.clean_empty_strings(cleaned_data)\n server = cleaned_data.get(\"server\", \"\")\n # remove ANY spaces in the server field\n server = server.replace(\" \", \"\")\n # lowercase the server\n server = server.lower()\n cleaned_data[\"server\"] = server\n ip = cleaned_data.get(\"ip\", None)\n # remove ANY spaces in the ip field\n ip = ip.replace(\" \", \"\")\n domain = cleaned_data.get(\"domain\", \"\")\n\n ip_list = self.extract_ip_list(ip)\n\n # validate if the form has a server or an ip\n if (ip and ip_list) or server:\n self.validate_nameserver_ip_combo(domain, server, ip_list)\n\n return cleaned_data\n\n def clean_empty_strings(self, cleaned_data):\n ip = cleaned_data.get(\"ip\", \"\")\n if ip and len(ip.strip()) == 0:\n cleaned_data[\"ip\"] = None\n\n def extract_ip_list(self, ip):\n return [ip.strip() for ip in ip.split(\",\")] if ip else []\n\n def validate_nameserver_ip_combo(self, domain, server, ip_list):\n try:\n Domain.checkHostIPCombo(domain, server, ip_list)\n except NameserverError as e:\n if e.code == nsErrorCodes.GLUE_RECORD_NOT_ALLOWED:\n self.add_error(\n \"server\",\n NameserverError(\n code=nsErrorCodes.GLUE_RECORD_NOT_ALLOWED,\n nameserver=domain,\n ip=ip_list,\n ),\n )\n elif e.code == nsErrorCodes.MISSING_IP:\n self.add_error(\n \"ip\",\n NameserverError(code=nsErrorCodes.MISSING_IP, nameserver=domain, ip=ip_list),\n )\n elif e.code == nsErrorCodes.MISSING_HOST:\n self.add_error(\n \"server\",\n NameserverError(code=nsErrorCodes.MISSING_HOST, nameserver=domain, ip=ip_list),\n )\n elif e.code == nsErrorCodes.INVALID_HOST:\n self.add_error(\n \"server\",\n NameserverError(code=nsErrorCodes.INVALID_HOST, nameserver=server, ip=ip_list),\n )\n else:\n self.add_error(\"ip\", str(e))\n\n\nNameserverFormset = formset_factory(\n DomainNameserverForm,\n extra=1,\n max_num=13,\n validate_max=True,\n)\n\n\nclass ContactForm(forms.ModelForm):\n \"\"\"Form for updating contacts.\"\"\"\n\n class Meta:\n model = Contact\n fields = [\"first_name\", \"middle_name\", \"last_name\", \"title\", \"email\", \"phone\"]\n widgets = {\n \"first_name\": forms.TextInput,\n \"middle_name\": forms.TextInput,\n \"last_name\": forms.TextInput,\n \"title\": forms.TextInput,\n \"email\": forms.EmailInput,\n \"phone\": RegionalPhoneNumberWidget,\n }\n\n # the database fields have blank=True so ModelForm doesn't create\n # required fields by default. Use this list in __init__ to mark each\n # of these fields as required\n required = [\"first_name\", \"last_name\", \"title\", \"email\", \"phone\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # take off maxlength attribute for the phone number field\n # which interferes with out input_with_errors template tag\n self.fields[\"phone\"].widget.attrs.pop(\"maxlength\", None)\n\n for field_name in self.required:\n self.fields[field_name].required = True\n\n # Set custom error messages\n self.fields[\"first_name\"].error_messages = {\"required\": \"Enter your first name / given name.\"}\n self.fields[\"last_name\"].error_messages = {\"required\": \"Enter your last name / family name.\"}\n self.fields[\"title\"].error_messages = {\n \"required\": \"Enter your title or role in your organization (e.g., Chief Information Officer)\"\n }\n self.fields[\"email\"].error_messages = {\n \"required\": \"Enter your email address in the required format, like [email protected].\"\n }\n self.fields[\"phone\"].error_messages = {\"required\": \"Enter your phone number.\"}\n\n\nclass AuthorizingOfficialContactForm(ContactForm):\n \"\"\"Form for updating authorizing official contacts.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Set custom error messages\n self.fields[\"first_name\"].error_messages = {\n \"required\": \"Enter the first name / given name of your authorizing official.\"\n }\n self.fields[\"last_name\"].error_messages = {\n \"required\": \"Enter the last name / family name of your authorizing official.\"\n }\n self.fields[\"title\"].error_messages = {\n \"required\": \"Enter the title or role your authorizing official has in your \\\n organization (e.g., Chief Information Officer).\"\n }\n self.fields[\"email\"].error_messages = {\n \"required\": \"Enter an email address in the required format, like [email protected].\"\n }\n self.fields[\"phone\"].error_messages = {\"required\": \"Enter a phone number for your authorizing official.\"}\n\n\nclass DomainSecurityEmailForm(forms.Form):\n \"\"\"Form for adding or editing a security email to a domain.\"\"\"\n\n security_email = forms.EmailField(\n label=\"Security email\",\n required=False,\n error_messages={\n \"invalid\": str(SecurityEmailError(code=SecurityEmailErrorCodes.BAD_DATA)),\n },\n )\n\n\nclass DomainOrgNameAddressForm(forms.ModelForm):\n \"\"\"Form for updating the organization name and mailing address.\"\"\"\n\n zipcode = forms.CharField(\n label=\"Zip code\",\n validators=[\n RegexValidator(\n \"^[0-9]{5}(?:-[0-9]{4})?$|^$\",\n message=\"Enter a zip code in the form of 12345 or 12345-6789.\",\n )\n ],\n )\n\n class Meta:\n model = DomainInformation\n fields = [\n \"federal_agency\",\n \"organization_name\",\n \"address_line1\",\n \"address_line2\",\n \"city\",\n \"state_territory\",\n \"zipcode\",\n \"urbanization\",\n ]\n error_messages = {\n \"federal_agency\": {\"required\": \"Select the federal agency for your organization.\"},\n \"organization_name\": {\"required\": \"Enter the name of your organization.\"},\n \"address_line1\": {\"required\": \"Enter the street address of your organization.\"},\n \"city\": {\"required\": \"Enter the city where your organization is located.\"},\n \"state_territory\": {\n \"required\": \"Select the state, territory, or military post where your organization is located.\"\n },\n }\n widgets = {\n # We need to set the required attributed for federal_agency and\n # state/territory because for these fields we are creating an individual\n # instance of the Select. For the other fields we use the for loop to set\n # the class's required attribute to true.\n \"federal_agency\": forms.Select(attrs={\"required\": True}, choices=DomainInformation.AGENCY_CHOICES),\n \"organization_name\": forms.TextInput,\n \"address_line1\": forms.TextInput,\n \"address_line2\": forms.TextInput,\n \"city\": forms.TextInput,\n \"state_territory\": forms.Select(\n attrs={\n \"required\": True,\n },\n choices=DomainInformation.StateTerritoryChoices.choices,\n ),\n \"urbanization\": forms.TextInput,\n }\n\n # the database fields have blank=True so ModelForm doesn't create\n # required fields by default. Use this list in __init__ to mark each\n # of these fields as required\n required = [\"organization_name\", \"address_line1\", \"city\", \"zipcode\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name in self.required:\n self.fields[field_name].required = True\n self.fields[\"state_territory\"].widget.attrs.pop(\"maxlength\", None)\n self.fields[\"zipcode\"].widget.attrs.pop(\"maxlength\", None)\n\n\nclass DomainDnssecForm(forms.Form):\n \"\"\"Form for enabling and disabling dnssec\"\"\"\n\n\nclass DomainDsdataForm(forms.Form):\n \"\"\"Form for adding or editing DNSSEC DS Data to a domain.\"\"\"\n\n def validate_hexadecimal(value):\n \"\"\"\n Tests that string matches all hexadecimal values.\n\n Raise validation error to display error in form\n if invalid characters entered\n \"\"\"\n if not re.match(r\"^[0-9a-fA-F]+$\", value):\n raise forms.ValidationError(str(DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_CHARS)))\n\n key_tag = forms.IntegerField(\n required=True,\n label=\"Key tag\",\n validators=[\n MinValueValidator(0, message=str(DsDataError(code=DsDataErrorCodes.INVALID_KEYTAG_SIZE))),\n MaxValueValidator(65535, message=str(DsDataError(code=DsDataErrorCodes.INVALID_KEYTAG_SIZE))),\n ],\n error_messages={\"required\": (\"Key tag is required.\")},\n )\n\n algorithm = forms.TypedChoiceField(\n required=True,\n label=\"Algorithm\",\n coerce=int, # need to coerce into int so dsData objects can be compared\n choices=[(None, \"--Select--\")] + ALGORITHM_CHOICES, # type: ignore\n error_messages={\"required\": (\"Algorithm is required.\")},\n )\n\n digest_type = forms.TypedChoiceField(\n required=True,\n label=\"Digest type\",\n coerce=int, # need to coerce into int so dsData objects can be compared\n choices=[(None, \"--Select--\")] + DIGEST_TYPE_CHOICES, # type: ignore\n error_messages={\"required\": (\"Digest type is required.\")},\n )\n\n digest = forms.CharField(\n required=True,\n label=\"Digest\",\n validators=[validate_hexadecimal],\n error_messages={\n \"required\": \"Digest is required.\",\n },\n )\n\n def clean(self):\n # clean is called from clean_forms, which is called from is_valid\n # after clean_fields. it is used to determine form level errors.\n # is_valid is typically called from view during a post\n cleaned_data = super().clean()\n digest_type = cleaned_data.get(\"digest_type\", 0)\n digest = cleaned_data.get(\"digest\", \"\")\n # validate length of digest depending on digest_type\n if digest_type == 1 and len(digest) != 40:\n self.add_error(\n \"digest\",\n DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_SHA1),\n )\n elif digest_type == 2 and len(digest) != 64:\n self.add_error(\n \"digest\",\n DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_SHA256),\n )\n return cleaned_data\n\n\nDomainDsdataFormset = formset_factory(\n DomainDsdataForm,\n extra=0,\n can_delete=True,\n)\n",
"path": "src/registrar/forms/domain.py"
}
] | [
{
"content": "\"\"\"Forms for domain management.\"\"\"\n\nfrom django import forms\nfrom django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator\nfrom django.forms import formset_factory\n\nfrom phonenumber_field.widgets import RegionalPhoneNumberWidget\nfrom registrar.utility.errors import (\n NameserverError,\n NameserverErrorCodes as nsErrorCodes,\n DsDataError,\n DsDataErrorCodes,\n SecurityEmailError,\n SecurityEmailErrorCodes,\n)\n\nfrom ..models import Contact, DomainInformation, Domain\nfrom .common import (\n ALGORITHM_CHOICES,\n DIGEST_TYPE_CHOICES,\n)\n\nimport re\n\n\nclass DomainAddUserForm(forms.Form):\n \"\"\"Form for adding a user to a domain.\"\"\"\n\n email = forms.EmailField(label=\"Email\")\n\n\nclass DomainNameserverForm(forms.Form):\n \"\"\"Form for changing nameservers.\"\"\"\n\n domain = forms.CharField(widget=forms.HiddenInput, required=False)\n\n server = forms.CharField(label=\"Name server\", strip=True)\n\n ip = forms.CharField(\n label=\"IP address (IPv4 or IPv6)\",\n strip=True,\n required=False,\n )\n\n def __init__(self, *args, **kwargs):\n super(DomainNameserverForm, self).__init__(*args, **kwargs)\n\n # add custom error messages\n self.fields[\"server\"].error_messages.update(\n {\n \"required\": \"A minimum of 2 name servers are required.\",\n }\n )\n\n def clean(self):\n # clean is called from clean_forms, which is called from is_valid\n # after clean_fields. it is used to determine form level errors.\n # is_valid is typically called from view during a post\n cleaned_data = super().clean()\n self.clean_empty_strings(cleaned_data)\n server = cleaned_data.get(\"server\", \"\")\n # remove ANY spaces in the server field\n server = server.replace(\" \", \"\")\n # lowercase the server\n server = server.lower()\n cleaned_data[\"server\"] = server\n ip = cleaned_data.get(\"ip\", None)\n # remove ANY spaces in the ip field\n ip = ip.replace(\" \", \"\")\n cleaned_data[\"ip\"] = ip\n domain = cleaned_data.get(\"domain\", \"\")\n\n ip_list = self.extract_ip_list(ip)\n\n # validate if the form has a server or an ip\n if (ip and ip_list) or server:\n self.validate_nameserver_ip_combo(domain, server, ip_list)\n\n return cleaned_data\n\n def clean_empty_strings(self, cleaned_data):\n ip = cleaned_data.get(\"ip\", \"\")\n if ip and len(ip.strip()) == 0:\n cleaned_data[\"ip\"] = None\n\n def extract_ip_list(self, ip):\n return [ip.strip() for ip in ip.split(\",\")] if ip else []\n\n def validate_nameserver_ip_combo(self, domain, server, ip_list):\n try:\n Domain.checkHostIPCombo(domain, server, ip_list)\n except NameserverError as e:\n if e.code == nsErrorCodes.GLUE_RECORD_NOT_ALLOWED:\n self.add_error(\n \"server\",\n NameserverError(\n code=nsErrorCodes.GLUE_RECORD_NOT_ALLOWED,\n nameserver=domain,\n ip=ip_list,\n ),\n )\n elif e.code == nsErrorCodes.MISSING_IP:\n self.add_error(\n \"ip\",\n NameserverError(code=nsErrorCodes.MISSING_IP, nameserver=domain, ip=ip_list),\n )\n elif e.code == nsErrorCodes.MISSING_HOST:\n self.add_error(\n \"server\",\n NameserverError(code=nsErrorCodes.MISSING_HOST, nameserver=domain, ip=ip_list),\n )\n elif e.code == nsErrorCodes.INVALID_HOST:\n self.add_error(\n \"server\",\n NameserverError(code=nsErrorCodes.INVALID_HOST, nameserver=server, ip=ip_list),\n )\n else:\n self.add_error(\"ip\", str(e))\n\n\nNameserverFormset = formset_factory(\n DomainNameserverForm,\n extra=1,\n max_num=13,\n validate_max=True,\n)\n\n\nclass ContactForm(forms.ModelForm):\n \"\"\"Form for updating contacts.\"\"\"\n\n class Meta:\n model = Contact\n fields = [\"first_name\", \"middle_name\", \"last_name\", \"title\", \"email\", \"phone\"]\n widgets = {\n \"first_name\": forms.TextInput,\n \"middle_name\": forms.TextInput,\n \"last_name\": forms.TextInput,\n \"title\": forms.TextInput,\n \"email\": forms.EmailInput,\n \"phone\": RegionalPhoneNumberWidget,\n }\n\n # the database fields have blank=True so ModelForm doesn't create\n # required fields by default. Use this list in __init__ to mark each\n # of these fields as required\n required = [\"first_name\", \"last_name\", \"title\", \"email\", \"phone\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # take off maxlength attribute for the phone number field\n # which interferes with out input_with_errors template tag\n self.fields[\"phone\"].widget.attrs.pop(\"maxlength\", None)\n\n for field_name in self.required:\n self.fields[field_name].required = True\n\n # Set custom error messages\n self.fields[\"first_name\"].error_messages = {\"required\": \"Enter your first name / given name.\"}\n self.fields[\"last_name\"].error_messages = {\"required\": \"Enter your last name / family name.\"}\n self.fields[\"title\"].error_messages = {\n \"required\": \"Enter your title or role in your organization (e.g., Chief Information Officer)\"\n }\n self.fields[\"email\"].error_messages = {\n \"required\": \"Enter your email address in the required format, like [email protected].\"\n }\n self.fields[\"phone\"].error_messages = {\"required\": \"Enter your phone number.\"}\n\n\nclass AuthorizingOfficialContactForm(ContactForm):\n \"\"\"Form for updating authorizing official contacts.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Set custom error messages\n self.fields[\"first_name\"].error_messages = {\n \"required\": \"Enter the first name / given name of your authorizing official.\"\n }\n self.fields[\"last_name\"].error_messages = {\n \"required\": \"Enter the last name / family name of your authorizing official.\"\n }\n self.fields[\"title\"].error_messages = {\n \"required\": \"Enter the title or role your authorizing official has in your \\\n organization (e.g., Chief Information Officer).\"\n }\n self.fields[\"email\"].error_messages = {\n \"required\": \"Enter an email address in the required format, like [email protected].\"\n }\n self.fields[\"phone\"].error_messages = {\"required\": \"Enter a phone number for your authorizing official.\"}\n\n\nclass DomainSecurityEmailForm(forms.Form):\n \"\"\"Form for adding or editing a security email to a domain.\"\"\"\n\n security_email = forms.EmailField(\n label=\"Security email\",\n required=False,\n error_messages={\n \"invalid\": str(SecurityEmailError(code=SecurityEmailErrorCodes.BAD_DATA)),\n },\n )\n\n\nclass DomainOrgNameAddressForm(forms.ModelForm):\n \"\"\"Form for updating the organization name and mailing address.\"\"\"\n\n zipcode = forms.CharField(\n label=\"Zip code\",\n validators=[\n RegexValidator(\n \"^[0-9]{5}(?:-[0-9]{4})?$|^$\",\n message=\"Enter a zip code in the form of 12345 or 12345-6789.\",\n )\n ],\n )\n\n class Meta:\n model = DomainInformation\n fields = [\n \"federal_agency\",\n \"organization_name\",\n \"address_line1\",\n \"address_line2\",\n \"city\",\n \"state_territory\",\n \"zipcode\",\n \"urbanization\",\n ]\n error_messages = {\n \"federal_agency\": {\"required\": \"Select the federal agency for your organization.\"},\n \"organization_name\": {\"required\": \"Enter the name of your organization.\"},\n \"address_line1\": {\"required\": \"Enter the street address of your organization.\"},\n \"city\": {\"required\": \"Enter the city where your organization is located.\"},\n \"state_territory\": {\n \"required\": \"Select the state, territory, or military post where your organization is located.\"\n },\n }\n widgets = {\n # We need to set the required attributed for federal_agency and\n # state/territory because for these fields we are creating an individual\n # instance of the Select. For the other fields we use the for loop to set\n # the class's required attribute to true.\n \"federal_agency\": forms.Select(attrs={\"required\": True}, choices=DomainInformation.AGENCY_CHOICES),\n \"organization_name\": forms.TextInput,\n \"address_line1\": forms.TextInput,\n \"address_line2\": forms.TextInput,\n \"city\": forms.TextInput,\n \"state_territory\": forms.Select(\n attrs={\n \"required\": True,\n },\n choices=DomainInformation.StateTerritoryChoices.choices,\n ),\n \"urbanization\": forms.TextInput,\n }\n\n # the database fields have blank=True so ModelForm doesn't create\n # required fields by default. Use this list in __init__ to mark each\n # of these fields as required\n required = [\"organization_name\", \"address_line1\", \"city\", \"zipcode\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name in self.required:\n self.fields[field_name].required = True\n self.fields[\"state_territory\"].widget.attrs.pop(\"maxlength\", None)\n self.fields[\"zipcode\"].widget.attrs.pop(\"maxlength\", None)\n\n\nclass DomainDnssecForm(forms.Form):\n \"\"\"Form for enabling and disabling dnssec\"\"\"\n\n\nclass DomainDsdataForm(forms.Form):\n \"\"\"Form for adding or editing DNSSEC DS Data to a domain.\"\"\"\n\n def validate_hexadecimal(value):\n \"\"\"\n Tests that string matches all hexadecimal values.\n\n Raise validation error to display error in form\n if invalid characters entered\n \"\"\"\n if not re.match(r\"^[0-9a-fA-F]+$\", value):\n raise forms.ValidationError(str(DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_CHARS)))\n\n key_tag = forms.IntegerField(\n required=True,\n label=\"Key tag\",\n validators=[\n MinValueValidator(0, message=str(DsDataError(code=DsDataErrorCodes.INVALID_KEYTAG_SIZE))),\n MaxValueValidator(65535, message=str(DsDataError(code=DsDataErrorCodes.INVALID_KEYTAG_SIZE))),\n ],\n error_messages={\"required\": (\"Key tag is required.\")},\n )\n\n algorithm = forms.TypedChoiceField(\n required=True,\n label=\"Algorithm\",\n coerce=int, # need to coerce into int so dsData objects can be compared\n choices=[(None, \"--Select--\")] + ALGORITHM_CHOICES, # type: ignore\n error_messages={\"required\": (\"Algorithm is required.\")},\n )\n\n digest_type = forms.TypedChoiceField(\n required=True,\n label=\"Digest type\",\n coerce=int, # need to coerce into int so dsData objects can be compared\n choices=[(None, \"--Select--\")] + DIGEST_TYPE_CHOICES, # type: ignore\n error_messages={\"required\": (\"Digest type is required.\")},\n )\n\n digest = forms.CharField(\n required=True,\n label=\"Digest\",\n validators=[validate_hexadecimal],\n error_messages={\n \"required\": \"Digest is required.\",\n },\n )\n\n def clean(self):\n # clean is called from clean_forms, which is called from is_valid\n # after clean_fields. it is used to determine form level errors.\n # is_valid is typically called from view during a post\n cleaned_data = super().clean()\n digest_type = cleaned_data.get(\"digest_type\", 0)\n digest = cleaned_data.get(\"digest\", \"\")\n # validate length of digest depending on digest_type\n if digest_type == 1 and len(digest) != 40:\n self.add_error(\n \"digest\",\n DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_SHA1),\n )\n elif digest_type == 2 and len(digest) != 64:\n self.add_error(\n \"digest\",\n DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_SHA256),\n )\n return cleaned_data\n\n\nDomainDsdataFormset = formset_factory(\n DomainDsdataForm,\n extra=0,\n can_delete=True,\n)\n",
"path": "src/registrar/forms/domain.py"
}
] | diff --git a/src/registrar/forms/domain.py b/src/registrar/forms/domain.py
index ae83650cb..9c09467cd 100644
--- a/src/registrar/forms/domain.py
+++ b/src/registrar/forms/domain.py
@@ -67,6 +67,7 @@ def clean(self):
ip = cleaned_data.get("ip", None)
# remove ANY spaces in the ip field
ip = ip.replace(" ", "")
+ cleaned_data["ip"] = ip
domain = cleaned_data.get("domain", "")
ip_list = self.extract_ip_list(ip)
diff --git a/src/registrar/tests/common.py b/src/registrar/tests/common.py
index 9a062106f..d745669e5 100644
--- a/src/registrar/tests/common.py
+++ b/src/registrar/tests/common.py
@@ -859,15 +859,9 @@ def mockSend(self, _request, cleaned):
case commands.UpdateDomain:
return self.mockUpdateDomainCommands(_request, cleaned)
case commands.CreateHost:
- return MagicMock(
- res_data=[self.mockDataHostChange],
- code=ErrorCode.COMMAND_COMPLETED_SUCCESSFULLY,
- )
+ return self.mockCreateHostCommands(_request, cleaned)
case commands.UpdateHost:
- return MagicMock(
- res_data=[self.mockDataHostChange],
- code=ErrorCode.COMMAND_COMPLETED_SUCCESSFULLY,
- )
+ return self.mockUpdateHostCommands(_request, cleaned)
case commands.DeleteHost:
return MagicMock(
res_data=[self.mockDataHostChange],
@@ -882,6 +876,28 @@ def mockSend(self, _request, cleaned):
case _:
return MagicMock(res_data=[self.mockDataInfoHosts])
+ def mockCreateHostCommands(self, _request, cleaned):
+ test_ws_ip = common.Ip(addr="1.1. 1.1")
+ addrs_submitted = getattr(_request, "addrs", [])
+ if test_ws_ip in addrs_submitted:
+ raise RegistryError(code=ErrorCode.PARAMETER_VALUE_RANGE_ERROR)
+ else:
+ return MagicMock(
+ res_data=[self.mockDataHostChange],
+ code=ErrorCode.COMMAND_COMPLETED_SUCCESSFULLY,
+ )
+
+ def mockUpdateHostCommands(self, _request, cleaned):
+ test_ws_ip = common.Ip(addr="1.1. 1.1")
+ addrs_submitted = getattr(_request, "addrs", [])
+ if test_ws_ip in addrs_submitted:
+ raise RegistryError(code=ErrorCode.PARAMETER_VALUE_RANGE_ERROR)
+ else:
+ return MagicMock(
+ res_data=[self.mockDataHostChange],
+ code=ErrorCode.COMMAND_COMPLETED_SUCCESSFULLY,
+ )
+
def mockUpdateDomainCommands(self, _request, cleaned):
if getattr(_request, "name", None) == "dnssec-invalid.gov":
raise RegistryError(code=ErrorCode.PARAMETER_VALUE_RANGE_ERROR)
diff --git a/src/registrar/tests/test_views.py b/src/registrar/tests/test_views.py
index 936c344f7..39b23b546 100644
--- a/src/registrar/tests/test_views.py
+++ b/src/registrar/tests/test_views.py
@@ -1462,6 +1462,38 @@ def test_domain_nameservers_form_submit_missing_host(self):
status_code=200,
)
+ def test_domain_nameservers_form_submit_whitespace(self):
+ """Nameserver form removes whitespace from ip.
+
+ Uses self.app WebTest because we need to interact with forms.
+ """
+ nameserver1 = "ns1.igorville.gov"
+ nameserver2 = "ns2.igorville.gov"
+ valid_ip = "1.1. 1.1"
+ # initial nameservers page has one server with two ips
+ # have to throw an error in order to test that the whitespace has been stripped from ip
+ nameservers_page = self.app.get(reverse("domain-dns-nameservers", kwargs={"pk": self.domain.id}))
+ session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]
+ self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)
+ # attempt to submit the form without one host and an ip with whitespace
+ nameservers_page.form["form-0-server"] = nameserver1
+ nameservers_page.form["form-1-ip"] = valid_ip
+ nameservers_page.form["form-1-server"] = nameserver2
+ with less_console_noise(): # swallow log warning message
+ result = nameservers_page.form.submit()
+ # form submission was a post with an ip address which has been stripped of whitespace,
+ # response should be a 302 to success page
+ self.assertEqual(result.status_code, 302)
+ self.assertEqual(
+ result["Location"],
+ reverse("domain-dns-nameservers", kwargs={"pk": self.domain.id}),
+ )
+ self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)
+ page = result.follow()
+ # in the event of a generic nameserver error from registry error, there will be a 302
+ # with an error message displayed, so need to follow 302 and test for success message
+ self.assertContains(page, "The name servers for this domain have been updated")
+
def test_domain_nameservers_form_submit_glue_record_not_allowed(self):
"""Nameserver form catches error when IP is present
but host not subdomain.
@@ -1553,7 +1585,7 @@ def test_domain_nameservers_form_submits_successfully(self):
"""
nameserver1 = "ns1.igorville.gov"
nameserver2 = "ns2.igorville.gov"
- invalid_ip = "127.0.0.1"
+ valid_ip = "127.0.0.1"
# initial nameservers page has one server with two ips
nameservers_page = self.app.get(reverse("domain-dns-nameservers", kwargs={"pk": self.domain.id}))
session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]
@@ -1562,7 +1594,7 @@ def test_domain_nameservers_form_submits_successfully(self):
# only one has ips
nameservers_page.form["form-0-server"] = nameserver1
nameservers_page.form["form-1-server"] = nameserver2
- nameservers_page.form["form-1-ip"] = invalid_ip
+ nameservers_page.form["form-1-ip"] = valid_ip
with less_console_noise(): # swallow log warning message
result = nameservers_page.form.submit()
# form submission was a successful post, response should be a 302
|
pytorch__ignite-2717 | mypy issue on master
```bash
Run bash ./tests/run_code_style.sh mypy
+ '[' mypy = lint ']'
+ '[' mypy = fmt ']'
+ '[' mypy = mypy ']'
+ mypy --config-file mypy.ini
ignite/distributed/comp_models/native.py:125: error: Argument 3 to
"init_process_group" has incompatible type "**Dict[str, int]"; expected
"timedelta" [arg-type]
...init_process_group(backend, init_method=init_method, **init_pg_kwargs)
^
ignite/distributed/comp_models/native.py:125: error: Argument 3 to
"init_process_group" has incompatible type "**Dict[str, int]"; expected
"Optional[Store]" [arg-type]
...init_process_group(backend, init_method=init_method, **init_pg_kwargs)
^
ignite/distributed/comp_models/native.py:125: error: Argument 3 to
"init_process_group" has incompatible type "**Dict[str, int]"; expected "str"
[arg-type]
...init_process_group(backend, init_method=init_method, **init_pg_kwargs)
^
Found 3 errors in 1 file (checked 103 source files)
```
| [
{
"content": "import os\nimport re\nimport subprocess\nimport warnings\nfrom typing import Any, Callable, cast, Dict, List, Mapping, Optional, Tuple, Union\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom packaging.version import Version\n\nfrom ignite.distributed.comp_models.base import ComputationModel\n\nhas_native_dist_support = dist.is_available()\n\n\nif has_native_dist_support:\n\n NCCL = dist.Backend.NCCL\n GLOO = dist.Backend.GLOO\n MPI = dist.Backend.MPI\n\n class _NativeDistModel(ComputationModel):\n \"\"\"Private class for PyTorch native distributed computation model.\n\n Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_:\n\n - NCCL\n - GLOO\n - MPI\n\n In this implementation we assume the following mapping between backend and devices:\n\n - NCCL <-> GPU\n - GLOO <-> CPU or GPU\n - MPI <-> CPU\n\n \"\"\"\n\n name = \"native-dist\"\n available_backends = tuple(name for name in [NCCL, GLOO, MPI] if getattr(dist, f\"is_{name}_available\")())\n\n @staticmethod\n def create_from_context() -> Optional[\"_NativeDistModel\"]:\n if not (dist.is_available() and dist.is_initialized()):\n return None\n return _NativeDistModel()\n\n @staticmethod\n def create_from_backend(\n backend: str,\n init_method: Optional[str] = None,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n **kwargs: Any,\n ) -> \"_NativeDistModel\":\n if backend not in _NativeDistModel.available_backends:\n raise ValueError(f\"Backend should be one of '{_NativeDistModel.available_backends}'\")\n\n if dist.is_available() and dist.is_initialized():\n raise RuntimeError(\"Can not create new distributed process group if default one is already initialized\")\n\n if init_method is None:\n if world_size is not None or rank is not None:\n raise ValueError(\"Arguments rank and world_size should be None if no init_method is provided\")\n else:\n has_rank = rank is not None\n has_ws = world_size is not None\n if (has_rank or has_ws) and (not has_rank or not has_ws):\n raise ValueError(f\"Both rank and world_size should be provided, but given {rank} and {world_size}\")\n\n return _NativeDistModel(\n backend=backend, init_method=init_method, world_size=world_size, rank=rank, **kwargs\n )\n\n def __init__(\n self,\n backend: Optional[str] = None,\n timeout: Optional[int] = None,\n init_method: Optional[str] = None,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"This is a private method. Please, use `create_from_backend` or `create_from_context`\"\"\"\n super(_NativeDistModel, self).__init__()\n self._env_backup = None # type: Optional[Dict[str, str]]\n self._local_rank = None # type: Optional[int]\n self._master_port = None # type: Optional[int]\n self._master_addr = None # type: Optional[str]\n self._init_method = None # type: Optional[str]\n if backend is not None:\n self._create_from_backend(\n backend, timeout=timeout, init_method=init_method, world_size=world_size, rank=rank, **kwargs\n )\n else:\n self._init_from_context()\n\n def _create_from_backend(\n self,\n backend: str,\n timeout: Optional[int] = None,\n init_method: Optional[str] = None,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n **kwargs: Any,\n ) -> None:\n if backend == dist.Backend.NCCL and not torch.cuda.is_available():\n raise RuntimeError(\"Nccl backend is required but no cuda capable devices\")\n self._backend = backend\n self.setup_env_vars(rank, world_size)\n\n init_pg_kwargs = {}\n if timeout is not None:\n init_pg_kwargs[\"timeout\"] = timeout\n\n if init_method is None:\n init_method = \"env://\"\n\n if \"env\" not in init_method:\n init_pg_kwargs[\"world_size\"] = int(os.environ[\"WORLD_SIZE\"])\n init_pg_kwargs[\"rank\"] = int(os.environ[\"RANK\"])\n self._init_method = init_method\n\n dist.init_process_group(backend, init_method=init_method, **init_pg_kwargs)\n\n if torch.cuda.is_available():\n torch.cuda.set_device(self._local_rank)\n\n # Call barrier after init_process_group as in\n # https://github.com/facebookresearch/maskrcnn-benchmark/issues/172\n # Define device ids for NCCL to avoid warnings\n # [W ProcessGroupNCCL.cpp:1569] Rank 0 using best-guess GPU 0 to perform barrier as devices used by\n # this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping\n # is incorrect.Specify device_ids in barrier() to force use of a particular device.\n if backend == dist.Backend.NCCL and Version(torch.__version__) >= Version(\"1.8.0\"):\n device_ids = [torch.cuda.current_device()]\n dist.barrier(device_ids=device_ids)\n else:\n # For older versions there is no device_ids arg\n dist.barrier()\n\n self._setup_attrs()\n\n def _init_from_context(self) -> None:\n self._backend = dist.get_backend()\n self._identify_local_rank()\n self._setup_attrs()\n\n def _compute_nproc_per_node(self) -> int:\n local_rank = self.get_local_rank()\n # Create new cpu group to get nproc_per_node such we avoid using\n # badly configured NCCL\n gloo_group = dist.new_group(backend=\"gloo\")\n tensor = torch.tensor([local_rank + 1]).to(\"cpu\")\n dist.all_reduce(tensor, op=dist.ReduceOp.MAX, group=gloo_group)\n return int(tensor.item())\n\n def _get_all_hostnames(self) -> List[Tuple[str, ...]]:\n import socket\n\n device = \"cpu\"\n if torch.cuda.is_available():\n index = torch.cuda.current_device()\n device = f\"cuda:{index}\"\n hostname = socket.gethostname()\n name = torch.tensor(bytearray(hostname, \"utf-8\")).to(device)\n padded_t_name = torch.zeros(256, device=device, dtype=torch.long)\n padded_t_name[: len(name)] = name\n out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())]\n dist.all_gather(out_t_names, padded_t_name)\n return [tuple(t.cpu().tolist()) for t in out_t_names]\n\n @staticmethod\n def _compute_node_and_local_ranks(rank: int, hostnames: List[Tuple[str, ...]]) -> Tuple[int, int]:\n from collections import Counter\n\n c = Counter(hostnames) # type: Counter\n sizes = torch.tensor([0] + list(c.values()))\n cumsum_sizes = torch.cumsum(sizes, dim=0)\n node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()\n local_rank = rank - cumsum_sizes[node_rank].item()\n return int(local_rank), node_rank\n\n def _compute_local_rank_via_hostname(self) -> int:\n # get all hostnames\n hostnames = self._get_all_hostnames()\n local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames)\n\n if local_rank < 0 or self._node < 0:\n raise ValueError(\n \"Failed to correctly estimate local rank. \"\n f\"Debugging info: local rank: {local_rank}, node rank: {self._node}, hostnames: {hostnames}\"\n )\n return local_rank\n\n def _identify_local_rank(self) -> None:\n\n if \"SLURM_JOB_ID\" in os.environ:\n os.environ[\"LOCAL_RANK\"] = os.environ[\"SLURM_LOCALID\"]\n\n if \"LOCAL_RANK\" in os.environ:\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n elif self._ext_local_rank is not None:\n self._local_rank = self._ext_local_rank\n else:\n warnings.warn(\n \"Local rank information for native distributed setting will be initialized using \"\n \"a heuristic approach based on the hostnames. In some corner cases, determined \"\n \"local rank can be different from the real setup. To avoid this warning, \"\n \"please either set `os.environ['LOCAL_RANK']` \"\n \"or use `idist.set_local_rank(local_rank)` with correct local rank index.\"\n )\n # use socket gethostname heuristic to determine number of nodes => local rank\n self._local_rank = self._compute_local_rank_via_hostname()\n\n def setup_env_vars(self, rank: Optional[int] = None, world_size: Optional[int] = None) -> None:\n\n self._env_backup = os.environ.copy()\n\n if \"SLURM_JOB_ID\" in os.environ:\n if rank is not None or world_size is not None:\n raise ValueError(\"Arguments rank and world_size should not be specified with SLURM\")\n self._setup_env_in_slurm()\n else:\n env_vars = [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]\n all_env_vars_defined = [k in os.environ for k in env_vars]\n # check if all necessary env vars are set\n # if partially defined raise an error\n if any(all_env_vars_defined) and not all(all_env_vars_defined):\n raise RuntimeError(f\"PyTorch distributed configuration should define env variables '{env_vars}'\")\n\n os.environ[\"RANK\"] = os.environ.get(\"RANK\", f\"{rank if rank is not None else 0}\")\n os.environ[\"WORLD_SIZE\"] = os.environ.get(\n \"WORLD_SIZE\", f\"{world_size if world_size is not None else 1}\"\n )\n os.environ[\"LOCAL_RANK\"] = os.environ.get(\"LOCAL_RANK\", \"0\")\n os.environ[\"MASTER_PORT\"] = os.environ.get(\"MASTER_PORT\", \"15000\")\n os.environ[\"MASTER_ADDR\"] = os.environ.get(\"MASTER_ADDR\", \"127.0.0.1\")\n\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n self._master_addr = os.environ[\"MASTER_ADDR\"]\n self._master_port = int(os.environ[\"MASTER_PORT\"])\n\n def _setup_env_in_slurm(self) -> None:\n slurm_env_req_vars = [\n \"SLURM_JOB_ID\",\n \"SLURM_PROCID\",\n \"SLURM_LOCALID\",\n \"SLURM_NTASKS\",\n \"SLURM_JOB_NODELIST\",\n \"SLURM_JOB_NUM_NODES\",\n ]\n for k in slurm_env_req_vars:\n if k not in os.environ:\n raise RuntimeError(f\"SLURM distributed configuration is missing '{k}' in env variables\")\n\n ddp_vars = _setup_ddp_vars_from_slurm_env(cast(Dict, os.environ))\n\n # define DDP env vars required by PTH:\n for key, value in ddp_vars.items():\n os.environ[key] = str(value)\n\n def get_local_rank(self) -> int:\n return cast(int, self._local_rank)\n\n def get_rank(self) -> int:\n return dist.get_rank()\n\n def get_world_size(self) -> int:\n return dist.get_world_size()\n\n def get_nproc_per_node(self) -> int:\n return cast(int, self._nproc_per_node)\n\n def get_nnodes(self) -> int:\n return cast(int, self._nnodes)\n\n def get_node_rank(self) -> int:\n return cast(int, self._node)\n\n def device(self) -> torch.device:\n if torch.cuda.is_available():\n index = torch.cuda.current_device()\n if index < self.get_local_rank():\n warnings.warn(\n \"Current device index is less than current local rank. \"\n \"Please, make sure to call torch.cuda.set_device(local_rank).\"\n )\n return torch.device(f\"cuda:{index}\")\n return torch.device(\"cpu\")\n\n def backend(self) -> str:\n return dist.get_backend()\n\n def finalize(self) -> None:\n dist.destroy_process_group()\n # restore backed-up env\n self._restore_env()\n\n def _restore_env(self) -> None:\n # restore backed-up env\n if self._env_backup is not None:\n os.environ.clear()\n os.environ.update(self._env_backup)\n\n @staticmethod\n def _dist_worker_task_fn(\n local_rank: int,\n backend: str,\n fn: Callable,\n args: Tuple,\n kw_dict: Mapping,\n world_size: int,\n nprocs_per_node: int,\n node_rank: int,\n master_addr: Optional[str],\n master_port: Optional[str],\n init_method: str,\n kw: Any,\n ) -> None:\n from ignite.distributed.utils import _set_model, finalize\n\n copy_env_vars = os.environ.copy()\n\n rank = node_rank * nprocs_per_node + local_rank\n os.environ[\"LOCAL_RANK\"] = str(local_rank)\n os.environ[\"RANK\"] = str(rank)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n\n arg_world_size = world_size # type: Optional[int]\n arg_rank = rank # type: Optional[int]\n if init_method == \"env://\":\n os.environ[\"MASTER_ADDR\"] = str(master_addr)\n os.environ[\"MASTER_PORT\"] = str(master_port)\n arg_world_size = None\n arg_rank = None\n\n model = _NativeDistModel.create_from_backend(\n backend, init_method=init_method, world_size=arg_world_size, rank=arg_rank, **kw\n )\n _set_model(model)\n fn(local_rank, *args, **kw_dict)\n finalize()\n\n os.environ.clear()\n os.environ.update(copy_env_vars)\n\n @staticmethod\n def spawn( # type: ignore[override]\n fn: Callable,\n args: Tuple,\n kwargs_dict: Optional[Mapping] = None,\n nproc_per_node: int = 1,\n nnodes: int = 1,\n node_rank: int = 0,\n master_addr: Optional[str] = None,\n master_port: Optional[int] = None,\n backend: str = \"nccl\",\n init_method: Optional[str] = None,\n **kwargs: Any,\n ) -> None:\n world_size = nnodes * nproc_per_node\n\n spawn_kwargs = {\n \"join\": kwargs.get(\"join\", True),\n \"daemon\": kwargs.get(\"daemon\", False),\n }\n\n start_processes = mp.spawn\n # start_method and start_processes in pytorch >= 1.5\n if Version(torch.__version__) >= Version(\"1.5.0\"):\n import builtins\n\n if \"__IPYTHON__\" in builtins.__dict__:\n # use fork in jupyter\n default_start_method = \"fork\"\n else:\n default_start_method = \"spawn\"\n spawn_kwargs[\"start_method\"] = kwargs.get(\"start_method\", default_start_method)\n start_processes = mp.start_processes\n\n if init_method in [None, \"env://\"]:\n init_method = \"env://\"\n if master_port is None:\n master_port = 2222\n if master_addr is None:\n master_addr = \"127.0.0.1\"\n elif master_addr is not None:\n raise ValueError(\"master_addr should be None if init_method is provided other then 'env://'\")\n elif master_port is not None:\n raise ValueError(\"master_port should be None if init_method is provided other then 'env://'\")\n\n start_processes(\n _NativeDistModel._dist_worker_task_fn,\n nprocs=nproc_per_node,\n args=(\n backend,\n fn,\n args,\n kwargs_dict,\n world_size,\n nproc_per_node,\n node_rank,\n master_addr,\n master_port,\n init_method,\n kwargs,\n ),\n **spawn_kwargs,\n )\n\n _reduce_op_map = {\n \"SUM\": dist.ReduceOp.SUM,\n \"PRODUCT\": dist.ReduceOp.PRODUCT,\n \"MIN\": dist.ReduceOp.MIN,\n \"MAX\": dist.ReduceOp.MAX,\n \"AND\": dist.ReduceOp.BAND,\n \"OR\": dist.ReduceOp.BOR,\n }\n\n def _do_all_reduce(self, tensor: torch.Tensor, op: str = \"SUM\") -> torch.Tensor:\n if op not in self._reduce_op_map:\n raise ValueError(f\"Unsupported reduction operation: '{op}'\")\n reduce_op = self._reduce_op_map[op]\n dist.all_reduce(tensor, reduce_op)\n return tensor\n\n def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor:\n if tensor.ndimension() == 0:\n tensor = tensor.unsqueeze(0)\n output = [torch.zeros_like(tensor) for _ in range(self.get_world_size())]\n dist.all_gather(output, tensor)\n return torch.cat(output, dim=0)\n\n def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:\n dist.broadcast(tensor, src=src)\n return tensor\n\n def barrier(self) -> None:\n dist.barrier()\n\n def _expand_hostlist(nodelist: str) -> List[str]:\n \"\"\"Expand a compressed hostlist string and returns all hosts listed.\n\n Source : https://github.com/LLNL/py-hostlist/blob/master/hostlist/hostlist.py\n\n Args:\n nodelist: Compressed hostlist string\n\n .. note::\n The host names can be composed by any character except the special ones `[`, `]`, `,`. Only one\n sequence `[...]` is supported per hostname.\n\n .. versionadded:: 0.4.6\n \"\"\"\n result_hostlist = []\n\n nodelist_match = r\"([^,\\[\\]]+\\[[^\\[\\]]*\\][^,\\[\\]]*|[^,\\[\\]]*),?\"\n\n nodelist = nodelist.replace(\" \", \"\")\n\n for node in re.findall(nodelist_match, nodelist):\n\n node_match = r\"(.+)\\[((,?[0-9]+-?,?-?){0,})\\](.*)?\"\n\n match = re.search(node_match, node)\n\n if match is None:\n if node:\n result_hostlist.append(node)\n else:\n # holds the ranges of nodes as a string\n # now we can manipulate the string and cast it to a list of numbers\n num = str(match.group(2)).replace(\"[\", \"\").replace(\"]\", \"\")\n\n if len(num) == 0:\n raise ValueError(f\"hostlist invalid : {nodelist}\")\n\n num_list = num.split(\",\")\n\n # find range of node numbers\n ranges = [elem.split(\"-\") if \"-\" in elem else [elem, elem] for elem in num_list]\n\n # if the node numbers contain leading zeros, store them to be\n lead_zeros = max([len(s) - len(s.lstrip(\"0\")) for s, _ in ranges])\n\n # list of expanded ranges of node numbers\n nodes_list = [list(range(int(s), int(e) + 1)) for s, e in ranges]\n\n # flat the list\n final_list = [item for sublist in nodes_list for item in sublist]\n\n # put final list in ascending order and append cluster name to each node number\n final_list = list(sorted(set(final_list)))\n\n # prepend leading zeros to numbers required\n hostlist_tmp = [str(elem).zfill(lead_zeros + 1) for elem in final_list]\n\n # append hostname to the node numbers\n hostlist_no_suffix = [match.group(1) + elem for elem in hostlist_tmp]\n\n # append suffix to hostlist if there is one\n final_hostlist = [elem + match.group(4) for elem in hostlist_no_suffix]\n\n result_hostlist += final_hostlist\n\n return result_hostlist\n\n def _setup_ddp_vars_from_slurm_env(environ: Dict[str, str]) -> Dict[str, Union[str, int]]:\n \"\"\"Method to setup DDP env vars required by PyTorch from SLURM env\"\"\"\n # 1) Tools like enroot can have hooks to translate slurm env vars to RANK, LOCAL_RANK, WORLD_SIZE etc\n # See https://github.com/NVIDIA/enroot/blob/v3.1.0/conf/hooks/extra/50-slurm-pytorch.sh\n # 2) User can use torch.distributed.launch tool to schedule on N local GPUs using 1 node, 1 task by SLURM\n # To cover case 1), let's ensure that defined RANK == SLURM_PROCID, LOCAL_RANK == SLURM_LOCALID,\n # WORLD_SIZE == SLURM_NTASKS. We will use defined MASTER_ADDR and MASTER_PORT instead of defining\n # them by our means\n # To cover case 2), let's check that defined RANK >= SLURM_PROCID, LOCAL_RANK >= SLURM_LOCALID,\n # WORLD_SIZE >= SLURM_NTASKS, SLURM_JOB_NUM_NODES == 1\n\n ddp_vars: Dict[str, Union[str, int, None]] = {\n \"RANK\": int(environ[\"SLURM_PROCID\"]),\n \"LOCAL_RANK\": int(environ[\"SLURM_LOCALID\"]),\n \"WORLD_SIZE\": int(environ[\"SLURM_NTASKS\"]),\n \"MASTER_ADDR\": None,\n \"MASTER_PORT\": None,\n }\n\n pth_ddp_env_vars = {key: environ.get(key, None) for key in ddp_vars}\n defined_pth_ddp_env_vars = [v is not None for v in pth_ddp_env_vars.values()]\n if all(defined_pth_ddp_env_vars):\n nnodes = int(environ[\"SLURM_JOB_NUM_NODES\"])\n if nnodes > 1:\n # ensure that all pth_ddp_env_vars are consistent with slurm vars\n for key in [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]:\n slurm_var = cast(int, ddp_vars[key])\n pth_var = int(cast(str, pth_ddp_env_vars[key]))\n if slurm_var != pth_var:\n raise RuntimeError(\n \"Environment variable defined for PyTorch Distributed context is inconsistent with \"\n f\"equivalent SLURM env variable. {key}: {pth_var} vs {slurm_var}\\n\"\n f\"SLURM vars: {ddp_vars}\\n\"\n f\"PTH vars: {pth_ddp_env_vars}\\n\"\n )\n else:\n # ensure that PTH RANK >= SLURM_PROCID, PTH LOCAL_RANK >= SLURM_LOCALID,\n # PTH WORLD_SIZE >= SLURM_NTASKS\n for key in [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]:\n slurm_var = cast(int, ddp_vars[key])\n pth_var = int(cast(str, pth_ddp_env_vars[key]))\n if pth_var < slurm_var:\n raise RuntimeError(\n \"Environment variable defined for PyTorch Distributed context is \"\n \"inconsistent with equivalent SLURM env variable. \"\n f\"We expect that {key}: {pth_var} >= {slurm_var}\\n\"\n f\"SLURM vars: {ddp_vars}\\n\"\n f\"PTH vars: {pth_ddp_env_vars}\\n\"\n )\n ddp_vars[key] = pth_var\n # set up MASTER_ADDR and MASTER_PORT from PTH\n ddp_vars[\"MASTER_ADDR\"] = cast(str, pth_ddp_env_vars[\"MASTER_ADDR\"])\n ddp_vars[\"MASTER_PORT\"] = int(cast(str, pth_ddp_env_vars[\"MASTER_PORT\"]))\n elif any(defined_pth_ddp_env_vars):\n # Let's warn user about PTH env variables that we could not taken into account\n warnings.warn(\n \"We detected the following env variables: \"\n f\"{[(k, v) for k, v in pth_ddp_env_vars.items() if v is not None]},\\n\"\n \"but will not take them into account as the following env vars are missing:\"\n f\"{[k for k, v in pth_ddp_env_vars.items() if v is None]},\\n\"\n )\n\n if ddp_vars[\"MASTER_ADDR\"] is None:\n nodelist = environ[\"SLURM_JOB_NODELIST\"]\n try:\n # use scontrol to expand hostname list\n hostnames = subprocess.check_output([\"scontrol\", \"show\", \"hostnames\", nodelist])\n method = \"scontrol\"\n except FileNotFoundError:\n # expand hostname list as scontrol\n hostnames = \" \".join(_expand_hostlist(nodelist)).encode(\"utf-8\")\n method = \"ignite\"\n # at least one hostname should be defined\n hostname_list = hostnames.split()\n if len(hostname_list) < 1:\n raise RuntimeError(f\"No hostname detected in SLURM_JOB_NODELIST by {method} (nodelist={nodelist})\")\n # master address is the first hostname of nodes list\n ddp_vars[\"MASTER_ADDR\"] = str(hostname_list[0].decode(\"utf-8\"))\n\n if ddp_vars[\"MASTER_PORT\"] is None:\n # port should be the same over all process\n slurm_port = environ[\"SLURM_JOB_ID\"]\n slurm_port = slurm_port[-4:]\n ddp_vars[\"MASTER_PORT\"] = int(slurm_port) + 15000\n\n return cast(Dict[str, Union[str, int]], ddp_vars)\n",
"path": "ignite/distributed/comp_models/native.py"
}
] | [
{
"content": "import os\nimport re\nimport subprocess\nimport warnings\nfrom typing import Any, Callable, cast, Dict, List, Mapping, Optional, Tuple, Union\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom packaging.version import Version\n\nfrom ignite.distributed.comp_models.base import ComputationModel\n\nhas_native_dist_support = dist.is_available()\n\n\nif has_native_dist_support:\n\n NCCL = dist.Backend.NCCL\n GLOO = dist.Backend.GLOO\n MPI = dist.Backend.MPI\n\n class _NativeDistModel(ComputationModel):\n \"\"\"Private class for PyTorch native distributed computation model.\n\n Supported `backends <https://pytorch.org/docs/stable/distributed.html#backends>`_:\n\n - NCCL\n - GLOO\n - MPI\n\n In this implementation we assume the following mapping between backend and devices:\n\n - NCCL <-> GPU\n - GLOO <-> CPU or GPU\n - MPI <-> CPU\n\n \"\"\"\n\n name = \"native-dist\"\n available_backends = tuple(name for name in [NCCL, GLOO, MPI] if getattr(dist, f\"is_{name}_available\")())\n\n @staticmethod\n def create_from_context() -> Optional[\"_NativeDistModel\"]:\n if not (dist.is_available() and dist.is_initialized()):\n return None\n return _NativeDistModel()\n\n @staticmethod\n def create_from_backend(\n backend: str,\n init_method: Optional[str] = None,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n **kwargs: Any,\n ) -> \"_NativeDistModel\":\n if backend not in _NativeDistModel.available_backends:\n raise ValueError(f\"Backend should be one of '{_NativeDistModel.available_backends}'\")\n\n if dist.is_available() and dist.is_initialized():\n raise RuntimeError(\"Can not create new distributed process group if default one is already initialized\")\n\n if init_method is None:\n if world_size is not None or rank is not None:\n raise ValueError(\"Arguments rank and world_size should be None if no init_method is provided\")\n else:\n has_rank = rank is not None\n has_ws = world_size is not None\n if (has_rank or has_ws) and (not has_rank or not has_ws):\n raise ValueError(f\"Both rank and world_size should be provided, but given {rank} and {world_size}\")\n\n return _NativeDistModel(\n backend=backend, init_method=init_method, world_size=world_size, rank=rank, **kwargs\n )\n\n def __init__(\n self,\n backend: Optional[str] = None,\n timeout: Optional[int] = None,\n init_method: Optional[str] = None,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"This is a private method. Please, use `create_from_backend` or `create_from_context`\"\"\"\n super(_NativeDistModel, self).__init__()\n self._env_backup = None # type: Optional[Dict[str, str]]\n self._local_rank = None # type: Optional[int]\n self._master_port = None # type: Optional[int]\n self._master_addr = None # type: Optional[str]\n self._init_method = None # type: Optional[str]\n if backend is not None:\n self._create_from_backend(\n backend, timeout=timeout, init_method=init_method, world_size=world_size, rank=rank, **kwargs\n )\n else:\n self._init_from_context()\n\n def _create_from_backend(\n self,\n backend: str,\n timeout: Optional[int] = None,\n init_method: Optional[str] = None,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n **kwargs: Any,\n ) -> None:\n if backend == dist.Backend.NCCL and not torch.cuda.is_available():\n raise RuntimeError(\"Nccl backend is required but no cuda capable devices\")\n self._backend = backend\n self.setup_env_vars(rank, world_size)\n\n init_pg_kwargs: Dict[str, Any] = {}\n if timeout is not None:\n init_pg_kwargs[\"timeout\"] = timeout\n\n if init_method is None:\n init_method = \"env://\"\n\n if \"env\" not in init_method:\n init_pg_kwargs[\"world_size\"] = int(os.environ[\"WORLD_SIZE\"])\n init_pg_kwargs[\"rank\"] = int(os.environ[\"RANK\"])\n self._init_method = init_method\n\n dist.init_process_group(backend, init_method=init_method, **init_pg_kwargs)\n\n if torch.cuda.is_available():\n torch.cuda.set_device(self._local_rank)\n\n # Call barrier after init_process_group as in\n # https://github.com/facebookresearch/maskrcnn-benchmark/issues/172\n # Define device ids for NCCL to avoid warnings\n # [W ProcessGroupNCCL.cpp:1569] Rank 0 using best-guess GPU 0 to perform barrier as devices used by\n # this process are currently unknown. This can potentially cause a hang if this rank to GPU mapping\n # is incorrect.Specify device_ids in barrier() to force use of a particular device.\n if backend == dist.Backend.NCCL and Version(torch.__version__) >= Version(\"1.8.0\"):\n device_ids = [torch.cuda.current_device()]\n dist.barrier(device_ids=device_ids)\n else:\n # For older versions there is no device_ids arg\n dist.barrier()\n\n self._setup_attrs()\n\n def _init_from_context(self) -> None:\n self._backend = dist.get_backend()\n self._identify_local_rank()\n self._setup_attrs()\n\n def _compute_nproc_per_node(self) -> int:\n local_rank = self.get_local_rank()\n # Create new cpu group to get nproc_per_node such we avoid using\n # badly configured NCCL\n gloo_group = dist.new_group(backend=\"gloo\")\n tensor = torch.tensor([local_rank + 1]).to(\"cpu\")\n dist.all_reduce(tensor, op=dist.ReduceOp.MAX, group=gloo_group)\n return int(tensor.item())\n\n def _get_all_hostnames(self) -> List[Tuple[str, ...]]:\n import socket\n\n device = \"cpu\"\n if torch.cuda.is_available():\n index = torch.cuda.current_device()\n device = f\"cuda:{index}\"\n hostname = socket.gethostname()\n name = torch.tensor(bytearray(hostname, \"utf-8\")).to(device)\n padded_t_name = torch.zeros(256, device=device, dtype=torch.long)\n padded_t_name[: len(name)] = name\n out_t_names = [torch.zeros_like(padded_t_name) for _ in range(self.get_world_size())]\n dist.all_gather(out_t_names, padded_t_name)\n return [tuple(t.cpu().tolist()) for t in out_t_names]\n\n @staticmethod\n def _compute_node_and_local_ranks(rank: int, hostnames: List[Tuple[str, ...]]) -> Tuple[int, int]:\n from collections import Counter\n\n c = Counter(hostnames) # type: Counter\n sizes = torch.tensor([0] + list(c.values()))\n cumsum_sizes = torch.cumsum(sizes, dim=0)\n node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()\n local_rank = rank - cumsum_sizes[node_rank].item()\n return int(local_rank), node_rank\n\n def _compute_local_rank_via_hostname(self) -> int:\n # get all hostnames\n hostnames = self._get_all_hostnames()\n local_rank, self._node = self._compute_node_and_local_ranks(self.get_rank(), hostnames)\n\n if local_rank < 0 or self._node < 0:\n raise ValueError(\n \"Failed to correctly estimate local rank. \"\n f\"Debugging info: local rank: {local_rank}, node rank: {self._node}, hostnames: {hostnames}\"\n )\n return local_rank\n\n def _identify_local_rank(self) -> None:\n\n if \"SLURM_JOB_ID\" in os.environ:\n os.environ[\"LOCAL_RANK\"] = os.environ[\"SLURM_LOCALID\"]\n\n if \"LOCAL_RANK\" in os.environ:\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n elif self._ext_local_rank is not None:\n self._local_rank = self._ext_local_rank\n else:\n warnings.warn(\n \"Local rank information for native distributed setting will be initialized using \"\n \"a heuristic approach based on the hostnames. In some corner cases, determined \"\n \"local rank can be different from the real setup. To avoid this warning, \"\n \"please either set `os.environ['LOCAL_RANK']` \"\n \"or use `idist.set_local_rank(local_rank)` with correct local rank index.\"\n )\n # use socket gethostname heuristic to determine number of nodes => local rank\n self._local_rank = self._compute_local_rank_via_hostname()\n\n def setup_env_vars(self, rank: Optional[int] = None, world_size: Optional[int] = None) -> None:\n\n self._env_backup = os.environ.copy()\n\n if \"SLURM_JOB_ID\" in os.environ:\n if rank is not None or world_size is not None:\n raise ValueError(\"Arguments rank and world_size should not be specified with SLURM\")\n self._setup_env_in_slurm()\n else:\n env_vars = [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]\n all_env_vars_defined = [k in os.environ for k in env_vars]\n # check if all necessary env vars are set\n # if partially defined raise an error\n if any(all_env_vars_defined) and not all(all_env_vars_defined):\n raise RuntimeError(f\"PyTorch distributed configuration should define env variables '{env_vars}'\")\n\n os.environ[\"RANK\"] = os.environ.get(\"RANK\", f\"{rank if rank is not None else 0}\")\n os.environ[\"WORLD_SIZE\"] = os.environ.get(\n \"WORLD_SIZE\", f\"{world_size if world_size is not None else 1}\"\n )\n os.environ[\"LOCAL_RANK\"] = os.environ.get(\"LOCAL_RANK\", \"0\")\n os.environ[\"MASTER_PORT\"] = os.environ.get(\"MASTER_PORT\", \"15000\")\n os.environ[\"MASTER_ADDR\"] = os.environ.get(\"MASTER_ADDR\", \"127.0.0.1\")\n\n self._local_rank = int(os.environ[\"LOCAL_RANK\"])\n self._master_addr = os.environ[\"MASTER_ADDR\"]\n self._master_port = int(os.environ[\"MASTER_PORT\"])\n\n def _setup_env_in_slurm(self) -> None:\n slurm_env_req_vars = [\n \"SLURM_JOB_ID\",\n \"SLURM_PROCID\",\n \"SLURM_LOCALID\",\n \"SLURM_NTASKS\",\n \"SLURM_JOB_NODELIST\",\n \"SLURM_JOB_NUM_NODES\",\n ]\n for k in slurm_env_req_vars:\n if k not in os.environ:\n raise RuntimeError(f\"SLURM distributed configuration is missing '{k}' in env variables\")\n\n ddp_vars = _setup_ddp_vars_from_slurm_env(cast(Dict, os.environ))\n\n # define DDP env vars required by PTH:\n for key, value in ddp_vars.items():\n os.environ[key] = str(value)\n\n def get_local_rank(self) -> int:\n return cast(int, self._local_rank)\n\n def get_rank(self) -> int:\n return dist.get_rank()\n\n def get_world_size(self) -> int:\n return dist.get_world_size()\n\n def get_nproc_per_node(self) -> int:\n return cast(int, self._nproc_per_node)\n\n def get_nnodes(self) -> int:\n return cast(int, self._nnodes)\n\n def get_node_rank(self) -> int:\n return cast(int, self._node)\n\n def device(self) -> torch.device:\n if torch.cuda.is_available():\n index = torch.cuda.current_device()\n if index < self.get_local_rank():\n warnings.warn(\n \"Current device index is less than current local rank. \"\n \"Please, make sure to call torch.cuda.set_device(local_rank).\"\n )\n return torch.device(f\"cuda:{index}\")\n return torch.device(\"cpu\")\n\n def backend(self) -> str:\n return dist.get_backend()\n\n def finalize(self) -> None:\n dist.destroy_process_group()\n # restore backed-up env\n self._restore_env()\n\n def _restore_env(self) -> None:\n # restore backed-up env\n if self._env_backup is not None:\n os.environ.clear()\n os.environ.update(self._env_backup)\n\n @staticmethod\n def _dist_worker_task_fn(\n local_rank: int,\n backend: str,\n fn: Callable,\n args: Tuple,\n kw_dict: Mapping,\n world_size: int,\n nprocs_per_node: int,\n node_rank: int,\n master_addr: Optional[str],\n master_port: Optional[str],\n init_method: str,\n kw: Any,\n ) -> None:\n from ignite.distributed.utils import _set_model, finalize\n\n copy_env_vars = os.environ.copy()\n\n rank = node_rank * nprocs_per_node + local_rank\n os.environ[\"LOCAL_RANK\"] = str(local_rank)\n os.environ[\"RANK\"] = str(rank)\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n\n arg_world_size = world_size # type: Optional[int]\n arg_rank = rank # type: Optional[int]\n if init_method == \"env://\":\n os.environ[\"MASTER_ADDR\"] = str(master_addr)\n os.environ[\"MASTER_PORT\"] = str(master_port)\n arg_world_size = None\n arg_rank = None\n\n model = _NativeDistModel.create_from_backend(\n backend, init_method=init_method, world_size=arg_world_size, rank=arg_rank, **kw\n )\n _set_model(model)\n fn(local_rank, *args, **kw_dict)\n finalize()\n\n os.environ.clear()\n os.environ.update(copy_env_vars)\n\n @staticmethod\n def spawn( # type: ignore[override]\n fn: Callable,\n args: Tuple,\n kwargs_dict: Optional[Mapping] = None,\n nproc_per_node: int = 1,\n nnodes: int = 1,\n node_rank: int = 0,\n master_addr: Optional[str] = None,\n master_port: Optional[int] = None,\n backend: str = \"nccl\",\n init_method: Optional[str] = None,\n **kwargs: Any,\n ) -> None:\n world_size = nnodes * nproc_per_node\n\n spawn_kwargs = {\n \"join\": kwargs.get(\"join\", True),\n \"daemon\": kwargs.get(\"daemon\", False),\n }\n\n start_processes = mp.spawn\n # start_method and start_processes in pytorch >= 1.5\n if Version(torch.__version__) >= Version(\"1.5.0\"):\n import builtins\n\n if \"__IPYTHON__\" in builtins.__dict__:\n # use fork in jupyter\n default_start_method = \"fork\"\n else:\n default_start_method = \"spawn\"\n spawn_kwargs[\"start_method\"] = kwargs.get(\"start_method\", default_start_method)\n start_processes = mp.start_processes\n\n if init_method in [None, \"env://\"]:\n init_method = \"env://\"\n if master_port is None:\n master_port = 2222\n if master_addr is None:\n master_addr = \"127.0.0.1\"\n elif master_addr is not None:\n raise ValueError(\"master_addr should be None if init_method is provided other then 'env://'\")\n elif master_port is not None:\n raise ValueError(\"master_port should be None if init_method is provided other then 'env://'\")\n\n start_processes(\n _NativeDistModel._dist_worker_task_fn,\n nprocs=nproc_per_node,\n args=(\n backend,\n fn,\n args,\n kwargs_dict,\n world_size,\n nproc_per_node,\n node_rank,\n master_addr,\n master_port,\n init_method,\n kwargs,\n ),\n **spawn_kwargs,\n )\n\n _reduce_op_map = {\n \"SUM\": dist.ReduceOp.SUM,\n \"PRODUCT\": dist.ReduceOp.PRODUCT,\n \"MIN\": dist.ReduceOp.MIN,\n \"MAX\": dist.ReduceOp.MAX,\n \"AND\": dist.ReduceOp.BAND,\n \"OR\": dist.ReduceOp.BOR,\n }\n\n def _do_all_reduce(self, tensor: torch.Tensor, op: str = \"SUM\") -> torch.Tensor:\n if op not in self._reduce_op_map:\n raise ValueError(f\"Unsupported reduction operation: '{op}'\")\n reduce_op = self._reduce_op_map[op]\n dist.all_reduce(tensor, reduce_op)\n return tensor\n\n def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor:\n if tensor.ndimension() == 0:\n tensor = tensor.unsqueeze(0)\n output = [torch.zeros_like(tensor) for _ in range(self.get_world_size())]\n dist.all_gather(output, tensor)\n return torch.cat(output, dim=0)\n\n def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:\n dist.broadcast(tensor, src=src)\n return tensor\n\n def barrier(self) -> None:\n dist.barrier()\n\n def _expand_hostlist(nodelist: str) -> List[str]:\n \"\"\"Expand a compressed hostlist string and returns all hosts listed.\n\n Source : https://github.com/LLNL/py-hostlist/blob/master/hostlist/hostlist.py\n\n Args:\n nodelist: Compressed hostlist string\n\n .. note::\n The host names can be composed by any character except the special ones `[`, `]`, `,`. Only one\n sequence `[...]` is supported per hostname.\n\n .. versionadded:: 0.4.6\n \"\"\"\n result_hostlist = []\n\n nodelist_match = r\"([^,\\[\\]]+\\[[^\\[\\]]*\\][^,\\[\\]]*|[^,\\[\\]]*),?\"\n\n nodelist = nodelist.replace(\" \", \"\")\n\n for node in re.findall(nodelist_match, nodelist):\n\n node_match = r\"(.+)\\[((,?[0-9]+-?,?-?){0,})\\](.*)?\"\n\n match = re.search(node_match, node)\n\n if match is None:\n if node:\n result_hostlist.append(node)\n else:\n # holds the ranges of nodes as a string\n # now we can manipulate the string and cast it to a list of numbers\n num = str(match.group(2)).replace(\"[\", \"\").replace(\"]\", \"\")\n\n if len(num) == 0:\n raise ValueError(f\"hostlist invalid : {nodelist}\")\n\n num_list = num.split(\",\")\n\n # find range of node numbers\n ranges = [elem.split(\"-\") if \"-\" in elem else [elem, elem] for elem in num_list]\n\n # if the node numbers contain leading zeros, store them to be\n lead_zeros = max([len(s) - len(s.lstrip(\"0\")) for s, _ in ranges])\n\n # list of expanded ranges of node numbers\n nodes_list = [list(range(int(s), int(e) + 1)) for s, e in ranges]\n\n # flat the list\n final_list = [item for sublist in nodes_list for item in sublist]\n\n # put final list in ascending order and append cluster name to each node number\n final_list = list(sorted(set(final_list)))\n\n # prepend leading zeros to numbers required\n hostlist_tmp = [str(elem).zfill(lead_zeros + 1) for elem in final_list]\n\n # append hostname to the node numbers\n hostlist_no_suffix = [match.group(1) + elem for elem in hostlist_tmp]\n\n # append suffix to hostlist if there is one\n final_hostlist = [elem + match.group(4) for elem in hostlist_no_suffix]\n\n result_hostlist += final_hostlist\n\n return result_hostlist\n\n def _setup_ddp_vars_from_slurm_env(environ: Dict[str, str]) -> Dict[str, Union[str, int]]:\n \"\"\"Method to setup DDP env vars required by PyTorch from SLURM env\"\"\"\n # 1) Tools like enroot can have hooks to translate slurm env vars to RANK, LOCAL_RANK, WORLD_SIZE etc\n # See https://github.com/NVIDIA/enroot/blob/v3.1.0/conf/hooks/extra/50-slurm-pytorch.sh\n # 2) User can use torch.distributed.launch tool to schedule on N local GPUs using 1 node, 1 task by SLURM\n # To cover case 1), let's ensure that defined RANK == SLURM_PROCID, LOCAL_RANK == SLURM_LOCALID,\n # WORLD_SIZE == SLURM_NTASKS. We will use defined MASTER_ADDR and MASTER_PORT instead of defining\n # them by our means\n # To cover case 2), let's check that defined RANK >= SLURM_PROCID, LOCAL_RANK >= SLURM_LOCALID,\n # WORLD_SIZE >= SLURM_NTASKS, SLURM_JOB_NUM_NODES == 1\n\n ddp_vars: Dict[str, Union[str, int, None]] = {\n \"RANK\": int(environ[\"SLURM_PROCID\"]),\n \"LOCAL_RANK\": int(environ[\"SLURM_LOCALID\"]),\n \"WORLD_SIZE\": int(environ[\"SLURM_NTASKS\"]),\n \"MASTER_ADDR\": None,\n \"MASTER_PORT\": None,\n }\n\n pth_ddp_env_vars = {key: environ.get(key, None) for key in ddp_vars}\n defined_pth_ddp_env_vars = [v is not None for v in pth_ddp_env_vars.values()]\n if all(defined_pth_ddp_env_vars):\n nnodes = int(environ[\"SLURM_JOB_NUM_NODES\"])\n if nnodes > 1:\n # ensure that all pth_ddp_env_vars are consistent with slurm vars\n for key in [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]:\n slurm_var = cast(int, ddp_vars[key])\n pth_var = int(cast(str, pth_ddp_env_vars[key]))\n if slurm_var != pth_var:\n raise RuntimeError(\n \"Environment variable defined for PyTorch Distributed context is inconsistent with \"\n f\"equivalent SLURM env variable. {key}: {pth_var} vs {slurm_var}\\n\"\n f\"SLURM vars: {ddp_vars}\\n\"\n f\"PTH vars: {pth_ddp_env_vars}\\n\"\n )\n else:\n # ensure that PTH RANK >= SLURM_PROCID, PTH LOCAL_RANK >= SLURM_LOCALID,\n # PTH WORLD_SIZE >= SLURM_NTASKS\n for key in [\"RANK\", \"LOCAL_RANK\", \"WORLD_SIZE\"]:\n slurm_var = cast(int, ddp_vars[key])\n pth_var = int(cast(str, pth_ddp_env_vars[key]))\n if pth_var < slurm_var:\n raise RuntimeError(\n \"Environment variable defined for PyTorch Distributed context is \"\n \"inconsistent with equivalent SLURM env variable. \"\n f\"We expect that {key}: {pth_var} >= {slurm_var}\\n\"\n f\"SLURM vars: {ddp_vars}\\n\"\n f\"PTH vars: {pth_ddp_env_vars}\\n\"\n )\n ddp_vars[key] = pth_var\n # set up MASTER_ADDR and MASTER_PORT from PTH\n ddp_vars[\"MASTER_ADDR\"] = cast(str, pth_ddp_env_vars[\"MASTER_ADDR\"])\n ddp_vars[\"MASTER_PORT\"] = int(cast(str, pth_ddp_env_vars[\"MASTER_PORT\"]))\n elif any(defined_pth_ddp_env_vars):\n # Let's warn user about PTH env variables that we could not taken into account\n warnings.warn(\n \"We detected the following env variables: \"\n f\"{[(k, v) for k, v in pth_ddp_env_vars.items() if v is not None]},\\n\"\n \"but will not take them into account as the following env vars are missing:\"\n f\"{[k for k, v in pth_ddp_env_vars.items() if v is None]},\\n\"\n )\n\n if ddp_vars[\"MASTER_ADDR\"] is None:\n nodelist = environ[\"SLURM_JOB_NODELIST\"]\n try:\n # use scontrol to expand hostname list\n hostnames = subprocess.check_output([\"scontrol\", \"show\", \"hostnames\", nodelist])\n method = \"scontrol\"\n except FileNotFoundError:\n # expand hostname list as scontrol\n hostnames = \" \".join(_expand_hostlist(nodelist)).encode(\"utf-8\")\n method = \"ignite\"\n # at least one hostname should be defined\n hostname_list = hostnames.split()\n if len(hostname_list) < 1:\n raise RuntimeError(f\"No hostname detected in SLURM_JOB_NODELIST by {method} (nodelist={nodelist})\")\n # master address is the first hostname of nodes list\n ddp_vars[\"MASTER_ADDR\"] = str(hostname_list[0].decode(\"utf-8\"))\n\n if ddp_vars[\"MASTER_PORT\"] is None:\n # port should be the same over all process\n slurm_port = environ[\"SLURM_JOB_ID\"]\n slurm_port = slurm_port[-4:]\n ddp_vars[\"MASTER_PORT\"] = int(slurm_port) + 15000\n\n return cast(Dict[str, Union[str, int]], ddp_vars)\n",
"path": "ignite/distributed/comp_models/native.py"
}
] | diff --git a/ignite/distributed/comp_models/native.py b/ignite/distributed/comp_models/native.py
index 56508806f309..2587c4a8a5d9 100644
--- a/ignite/distributed/comp_models/native.py
+++ b/ignite/distributed/comp_models/native.py
@@ -110,7 +110,7 @@ def _create_from_backend(
self._backend = backend
self.setup_env_vars(rank, world_size)
- init_pg_kwargs = {}
+ init_pg_kwargs: Dict[str, Any] = {}
if timeout is not None:
init_pg_kwargs["timeout"] = timeout
|
pennersr__django-allauth-3242 | empty user.email for keycloak
I found a bug introduced by #3165 regarding the `user.email` field with the `keycloak` provider (possibly other OIDC providers too).
After a successful login, the `user.email` attribute is empty but was filled in with django-allauth 0.51.0.
I think the problem is in [extract_common_fields](https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/openid_connect/provider.py#L43), which doesn't return the `email` attribute anymore, but that would be needed for [populate_user](https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/base/provider.py#L75) that updates the user field.
| [
{
"content": "# -*- coding: utf-8 -*-\nfrom allauth.account.models import EmailAddress\nfrom allauth.socialaccount import app_settings\nfrom allauth.socialaccount.providers.base import ProviderAccount\nfrom allauth.socialaccount.providers.oauth2.provider import OAuth2Provider\n\n\nclass OpenIDConnectProviderAccount(ProviderAccount):\n def to_str(self):\n dflt = super(OpenIDConnectProviderAccount, self).to_str()\n return self.account.extra_data.get(\"name\", dflt)\n\n\nclass OpenIDConnectProvider(OAuth2Provider):\n id = \"openid_connect\"\n name = \"OpenID Connect\"\n _server_id = None\n _server_url = None\n account_class = OpenIDConnectProviderAccount\n\n @property\n def server_url(self):\n well_known_uri = \"/.well-known/openid-configuration\"\n url = self._server_url\n if not url.endswith(well_known_uri):\n url += well_known_uri\n return url\n\n @property\n def token_auth_method(self):\n return app_settings.PROVIDERS.get(self.id, {}).get(\"token_auth_method\")\n\n @classmethod\n def get_slug(cls):\n return cls._server_id if cls._server_id else \"openid_connect\"\n\n def get_default_scope(self):\n return [\"openid\", \"profile\", \"email\"]\n\n def extract_uid(self, data):\n return str(data[\"sub\"])\n\n def extract_common_fields(self, data):\n return dict(\n username=data.get(\"preferred_username\"),\n name=data.get(\"name\"),\n user_id=data.get(\"user_id\"),\n picture=data.get(\"picture\"),\n )\n\n def extract_email_addresses(self, data):\n addresses = []\n email = data.get(\"email\")\n if email:\n addresses.append(\n EmailAddress(\n email=email,\n verified=data.get(\"email_verified\", False),\n primary=True,\n )\n )\n return addresses\n\n\ndef _provider_factory(server_settings):\n class OpenIDConnectProviderServer(OpenIDConnectProvider):\n name = server_settings.get(\"name\", OpenIDConnectProvider.name)\n id = server_settings[\"id\"]\n _server_id = server_settings[\"id\"]\n _server_url = server_settings[\"server_url\"]\n\n def get_app(self, request, config=None):\n return super().get_app(request, config=server_settings.get(\"APP\"))\n\n OpenIDConnectProviderServer.__name__ = (\n \"OpenIDConnectProviderServer_\" + server_settings[\"id\"]\n )\n app_settings.PROVIDERS.setdefault(OpenIDConnectProviderServer.id, {})\n app_settings.PROVIDERS[OpenIDConnectProviderServer.id].update(server_settings)\n return OpenIDConnectProviderServer\n\n\nprovider_classes = [\n _provider_factory(server_settings)\n for server_settings in app_settings.PROVIDERS.get(OpenIDConnectProvider.id, {}).get(\n \"SERVERS\", []\n )\n]\n",
"path": "allauth/socialaccount/providers/openid_connect/provider.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom allauth.account.models import EmailAddress\nfrom allauth.socialaccount import app_settings\nfrom allauth.socialaccount.providers.base import ProviderAccount\nfrom allauth.socialaccount.providers.oauth2.provider import OAuth2Provider\n\n\nclass OpenIDConnectProviderAccount(ProviderAccount):\n def to_str(self):\n dflt = super(OpenIDConnectProviderAccount, self).to_str()\n return self.account.extra_data.get(\"name\", dflt)\n\n\nclass OpenIDConnectProvider(OAuth2Provider):\n id = \"openid_connect\"\n name = \"OpenID Connect\"\n _server_id = None\n _server_url = None\n account_class = OpenIDConnectProviderAccount\n\n @property\n def server_url(self):\n well_known_uri = \"/.well-known/openid-configuration\"\n url = self._server_url\n if not url.endswith(well_known_uri):\n url += well_known_uri\n return url\n\n @property\n def token_auth_method(self):\n return app_settings.PROVIDERS.get(self.id, {}).get(\"token_auth_method\")\n\n @classmethod\n def get_slug(cls):\n return cls._server_id if cls._server_id else \"openid_connect\"\n\n def get_default_scope(self):\n return [\"openid\", \"profile\", \"email\"]\n\n def extract_uid(self, data):\n return str(data[\"sub\"])\n\n def extract_common_fields(self, data):\n return dict(\n email=data.get(\"email\"),\n username=data.get(\"preferred_username\"),\n name=data.get(\"name\"),\n user_id=data.get(\"user_id\"),\n picture=data.get(\"picture\"),\n )\n\n def extract_email_addresses(self, data):\n addresses = []\n email = data.get(\"email\")\n if email:\n addresses.append(\n EmailAddress(\n email=email,\n verified=data.get(\"email_verified\", False),\n primary=True,\n )\n )\n return addresses\n\n\ndef _provider_factory(server_settings):\n class OpenIDConnectProviderServer(OpenIDConnectProvider):\n name = server_settings.get(\"name\", OpenIDConnectProvider.name)\n id = server_settings[\"id\"]\n _server_id = server_settings[\"id\"]\n _server_url = server_settings[\"server_url\"]\n\n def get_app(self, request, config=None):\n return super().get_app(request, config=server_settings.get(\"APP\"))\n\n OpenIDConnectProviderServer.__name__ = (\n \"OpenIDConnectProviderServer_\" + server_settings[\"id\"]\n )\n app_settings.PROVIDERS.setdefault(OpenIDConnectProviderServer.id, {})\n app_settings.PROVIDERS[OpenIDConnectProviderServer.id].update(server_settings)\n return OpenIDConnectProviderServer\n\n\nprovider_classes = [\n _provider_factory(server_settings)\n for server_settings in app_settings.PROVIDERS.get(OpenIDConnectProvider.id, {}).get(\n \"SERVERS\", []\n )\n]\n",
"path": "allauth/socialaccount/providers/openid_connect/provider.py"
}
] | diff --git a/allauth/socialaccount/providers/openid_connect/provider.py b/allauth/socialaccount/providers/openid_connect/provider.py
index 6ab482065a..455f2566e0 100644
--- a/allauth/socialaccount/providers/openid_connect/provider.py
+++ b/allauth/socialaccount/providers/openid_connect/provider.py
@@ -42,6 +42,7 @@ def extract_uid(self, data):
def extract_common_fields(self, data):
return dict(
+ email=data.get("email"),
username=data.get("preferred_username"),
name=data.get("name"),
user_id=data.get("user_id"),
|
ivy-llc__ivy-17685 | floor_divide
will implement floor_divide paddle frontend function
| [
{
"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes\r\nfrom ivy.functional.frontends.paddle.func_wrapper import _to_ivy_array\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def ceil(self):\r\n return paddle_frontend.ceil(self)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\",)}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cosh(self, name=None):\r\n return ivy.cosh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sinh(self, name=None):\r\n return ivy.sinh(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sqrt(self, name=None):\r\n return ivy.sqrt(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cos(self, name=None):\r\n return ivy.cos(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def exp(self, name=None):\r\n return ivy.exp(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def erf(self, name=None):\r\n return ivy.erf(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def subtract(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.subtract(self._ivy_array, y_ivy)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def log10(self, name=None):\r\n return ivy.log10(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argsort(self, axis=-1, descending=False, name=None):\r\n return ivy.argsort(self._ivy_array, axis=axis, descending=descending)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def floor(self, name=None):\r\n return ivy.floor(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def tanh(self, name=None):\r\n return ivy.tanh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def add_(self, name=None):\r\n return ivy.add(self._ivy_array)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\r\n \"paddle\",\r\n )\r\n def isinf(self, name=None):\r\n return ivy.isinf(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def square(self, name=None):\r\n return ivy.square(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cholesky(self, upper=False, name=None):\r\n return ivy.cholesky(self._ivy_array, upper=upper)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def multiply(self, y, name=None):\r\n return paddle_frontend.multiply(self, y)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\r\n \"paddle\",\r\n )\r\n def isfinite(self, name=None):\r\n return ivy.isfinite(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def all(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.all(self.ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):\r\n return ivy.allclose(\r\n self._ivy_array, other, rtol=rtol, atol=atol, equal_nan=equal_nan\r\n )\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def sort(self, axis=-1, descending=False, name=None):\r\n return ivy.sort(self._ivy_array, axis=axis, descending=descending)\r\n\r\n @with_supported_dtypes(\r\n {\r\n \"2.4.2 and below\": (\r\n \"bool\",\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"int32\",\r\n \"int64\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def bitwise_and(self, y, out=None, name=None):\r\n return paddle_frontend.bitwise_and(self, y)\r\n\r\n @with_supported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"int8\",\r\n \"int16\",\r\n \"int32\",\r\n \"int64\",\r\n \"float32\",\r\n \"float64\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def logical_or(self, y, out=None, name=None):\r\n return paddle_frontend.logical_or(self, y, out=out)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"int32\", \"int64\")},\r\n \"paddle\",\r\n )\r\n def bitwise_xor(self, y, out=None, name=None):\r\n return paddle_frontend.bitwise_xor(self, y)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def any(self, axis=None, keepdim=False, name=None):\r\n return ivy.any(self._ivy_array, axis=axis, keepdims=keepdim)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def astype(self, dtype):\r\n return ivy.astype(self._ivy_array, dtype=dtype)\r\n\r\n @with_supported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"int8\",\r\n \"int16\",\r\n \"int32\",\r\n \"int64\",\r\n \"float32\",\r\n \"float64\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def logical_xor(self, y, out=None, name=None):\r\n return paddle_frontend.logical_xor(self, y, out=out)\r\n\r\n @with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"complex64\",\r\n \"complex128\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def greater_than(self, y, name=None):\r\n return paddle_frontend.greater_than(self, y)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def rsqrt(self, name=None):\r\n return ivy.reciprocal(ivy.sqrt(self._ivy_array))\r\n\r\n @with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"complex64\",\r\n \"complex128\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def less_than(self, y, name=None):\r\n return paddle_frontend.less_than(self, y)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def cumprod(self, dim=None, dtype=None, name=None):\r\n return ivy.cumprod(self._ivy_array, axis=dim, dtype=dtype)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def cumsum(self, axis=None, dtype=None, name=None):\r\n return ivy.cumsum(self._ivy_array, axis=axis, dtype=dtype)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\r\n \"paddle\",\r\n )\r\n def angle(self, name=None):\r\n return ivy.angle(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def rad2deg(self, name=None):\r\n return ivy.rad2deg(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\r\n def fmax(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.fmax(self._ivy_array, y_ivy)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\r\n def fmin(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.fmin(self._ivy_array, y_ivy)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n )\r\n def minimum(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.minimum(self._ivy_array, y_ivy)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n )\r\n def max(self, axis=None, keepdim=False, name=None):\r\n return ivy.max(self._ivy_array, axis=axis, keepdims=keepdim)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def deg2rad(self, name=None):\r\n return ivy.deg2rad(self._ivy_array)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"complex64\", \"complex128\")},\r\n \"paddle\",\r\n )\r\n def imag(self, name=None):\r\n return paddle_frontend.imag(self)\r\n\r\n def is_tensor(self):\r\n return paddle_frontend.is_tensor(self._ivy_array)\r\n",
"path": "ivy/functional/frontends/paddle/tensor/tensor.py"
}
] | [
{
"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes\r\nfrom ivy.functional.frontends.paddle.func_wrapper import _to_ivy_array\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def ceil(self):\r\n return paddle_frontend.ceil(self)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\",)}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cosh(self, name=None):\r\n return ivy.cosh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sinh(self, name=None):\r\n return ivy.sinh(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sqrt(self, name=None):\r\n return ivy.sqrt(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cos(self, name=None):\r\n return ivy.cos(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def exp(self, name=None):\r\n return ivy.exp(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def erf(self, name=None):\r\n return ivy.erf(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def subtract(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.subtract(self._ivy_array, y_ivy)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def log10(self, name=None):\r\n return ivy.log10(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def argsort(self, axis=-1, descending=False, name=None):\r\n return ivy.argsort(self._ivy_array, axis=axis, descending=descending)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def floor(self, name=None):\r\n return ivy.floor(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def tanh(self, name=None):\r\n return ivy.tanh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def add_(self, name=None):\r\n return ivy.add(self._ivy_array)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\r\n \"paddle\",\r\n )\r\n def isinf(self, name=None):\r\n return ivy.isinf(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def square(self, name=None):\r\n return ivy.square(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def cholesky(self, upper=False, name=None):\r\n return ivy.cholesky(self._ivy_array, upper=upper)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def multiply(self, y, name=None):\r\n return paddle_frontend.multiply(self, y)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\r\n \"paddle\",\r\n )\r\n def isfinite(self, name=None):\r\n return ivy.isfinite(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def all(self, axis=None, keepdim=False, dtype=None, name=None):\r\n return ivy.all(self.ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):\r\n return ivy.allclose(\r\n self._ivy_array, other, rtol=rtol, atol=atol, equal_nan=equal_nan\r\n )\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def sort(self, axis=-1, descending=False, name=None):\r\n return ivy.sort(self._ivy_array, axis=axis, descending=descending)\r\n\r\n @with_supported_dtypes(\r\n {\r\n \"2.4.2 and below\": (\r\n \"bool\",\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"int32\",\r\n \"int64\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def bitwise_and(self, y, out=None, name=None):\r\n return paddle_frontend.bitwise_and(self, y)\r\n\r\n @with_supported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"int8\",\r\n \"int16\",\r\n \"int32\",\r\n \"int64\",\r\n \"float32\",\r\n \"float64\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def logical_or(self, y, out=None, name=None):\r\n return paddle_frontend.logical_or(self, y, out=out)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"bool\", \"uint8\", \"int8\", \"int16\", \"int32\", \"int64\")},\r\n \"paddle\",\r\n )\r\n def bitwise_xor(self, y, out=None, name=None):\r\n return paddle_frontend.bitwise_xor(self, y)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def any(self, axis=None, keepdim=False, name=None):\r\n return ivy.any(self._ivy_array, axis=axis, keepdims=keepdim)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def astype(self, dtype):\r\n return ivy.astype(self._ivy_array, dtype=dtype)\r\n\r\n @with_supported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"int8\",\r\n \"int16\",\r\n \"int32\",\r\n \"int64\",\r\n \"float32\",\r\n \"float64\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def logical_xor(self, y, out=None, name=None):\r\n return paddle_frontend.logical_xor(self, y, out=out)\r\n\r\n @with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"complex64\",\r\n \"complex128\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def greater_than(self, y, name=None):\r\n return paddle_frontend.greater_than(self, y)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def rsqrt(self, name=None):\r\n return ivy.reciprocal(ivy.sqrt(self._ivy_array))\r\n\r\n @with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"bool\",\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"complex64\",\r\n \"complex128\",\r\n )\r\n },\r\n \"paddle\",\r\n )\r\n def less_than(self, y, name=None):\r\n return paddle_frontend.less_than(self, y)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def cumprod(self, dim=None, dtype=None, name=None):\r\n return ivy.cumprod(self._ivy_array, axis=dim, dtype=dtype)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def cumsum(self, axis=None, dtype=None, name=None):\r\n return ivy.cumsum(self._ivy_array, axis=axis, dtype=dtype)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\r\n \"paddle\",\r\n )\r\n def angle(self, name=None):\r\n return ivy.angle(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def rad2deg(self, name=None):\r\n return ivy.rad2deg(self._ivy_array)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\r\n def fmax(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.fmax(self._ivy_array, y_ivy)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\r\n def fmin(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.fmin(self._ivy_array, y_ivy)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n )\r\n def minimum(self, y, name=None):\r\n y_ivy = _to_ivy_array(y)\r\n return ivy.minimum(self._ivy_array, y_ivy)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n )\r\n def max(self, axis=None, keepdim=False, name=None):\r\n return ivy.max(self._ivy_array, axis=axis, keepdims=keepdim)\r\n\r\n @with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def deg2rad(self, name=None):\r\n return ivy.deg2rad(self._ivy_array)\r\n\r\n @with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"complex64\", \"complex128\")},\r\n \"paddle\",\r\n )\r\n def imag(self, name=None):\r\n return paddle_frontend.imag(self)\r\n\r\n def is_tensor(self):\r\n return paddle_frontend.is_tensor(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.5.0 and below\": (\"int32\", \"int64\")}, \"paddle\")\r\n def floor_divide(self, y, name=None):\r\n y_ivy = y._ivy_array if isinstance(y, Tensor) else _to_ivy_array(y)\r\n return ivy.floor_divide(self._ivy_array, y_ivy)\r\n",
"path": "ivy/functional/frontends/paddle/tensor/tensor.py"
}
] | diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py
index 7d7a6f1c74cdb..5122422504031 100644
--- a/ivy/functional/frontends/paddle/tensor/tensor.py
+++ b/ivy/functional/frontends/paddle/tensor/tensor.py
@@ -365,3 +365,8 @@ def imag(self, name=None):
def is_tensor(self):
return paddle_frontend.is_tensor(self._ivy_array)
+
+ @with_supported_dtypes({"2.5.0 and below": ("int32", "int64")}, "paddle")
+ def floor_divide(self, y, name=None):
+ y_ivy = y._ivy_array if isinstance(y, Tensor) else _to_ivy_array(y)
+ return ivy.floor_divide(self._ivy_array, y_ivy)
diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py
index bf517f39982dc..a0eed17c398a7 100644
--- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py
+++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py
@@ -1851,6 +1851,45 @@ def test_paddle_imag(
)
+# floor_divide
+@handle_frontend_method(
+ class_tree=CLASS_TREE,
+ init_tree="paddle.to_tensor",
+ method_name="floor_divide",
+ dtypes_and_x=helpers.dtype_and_values(
+ available_dtypes=helpers.get_dtypes("valid"),
+ num_arrays=2,
+ min_value=2,
+ shared_dtype=True,
+ large_abs_safety_factor=2,
+ small_abs_safety_factor=2,
+ safety_factor_scale="linear",
+ ),
+)
+def test_paddle_floor_divide(
+ dtypes_and_x,
+ frontend_method_data,
+ init_flags,
+ method_flags,
+ frontend,
+ on_device,
+):
+ input_dtype, x = dtypes_and_x
+ # Absolute tolerance is 1,
+ helpers.test_frontend_method(
+ init_input_dtypes=input_dtype,
+ init_all_as_kwargs_np={"data": x[0]},
+ method_input_dtypes=input_dtype,
+ method_all_as_kwargs_np={"y": x[1]},
+ frontend_method_data=frontend_method_data,
+ init_flags=init_flags,
+ method_flags=method_flags,
+ frontend=frontend,
+ on_device=on_device,
+ atol_=1,
+ )
+
+
# is_tensor
@handle_frontend_method(
class_tree=CLASS_TREE,
|
MongoEngine__mongoengine-1951 | document._cls field not set after document.save() call
```
from mongoengine import (
connection,
Document,
EmbeddedDocument,
EmbeddedDocumentField,
StringField
)
class EmbedData(EmbeddedDocument):
data = StringField()
meta = {'allow_inheritance': True}
class DataDoc(Document):
name = StringField()
embed = EmbeddedDocumentField(EmbedData)
meta = {'allow_inheritance': True}
connection.connect(is_mock=True)
test_doc = DataDoc(name='test',embed=EmbedData(data='data'))
test_doc.save()
saved_doc = DataDoc.objects(name='test').first()
assert test_doc._cls == saved_doc._cls
assert test_doc.embed._cls == saved_doc.embed._cls
test_doc.delete()
```
In MongoEninge 0.15.3, the above code works, with version 0.16.0, the asserts fail due to the _cls field being set to None in the test_doc object.
| [
{
"content": "import copy\nimport numbers\nfrom functools import partial\n\nfrom bson import ObjectId, json_util\nfrom bson.dbref import DBRef\nfrom bson.son import SON\nimport pymongo\nimport six\n\nfrom mongoengine import signals\nfrom mongoengine.base.common import get_document\nfrom mongoengine.base.datastructures import (BaseDict, BaseList,\n EmbeddedDocumentList,\n LazyReference,\n StrictDict)\nfrom mongoengine.base.fields import ComplexBaseField\nfrom mongoengine.common import _import_class\nfrom mongoengine.errors import (FieldDoesNotExist, InvalidDocumentError,\n LookUpError, OperationError, ValidationError)\nfrom mongoengine.python_support import Hashable\n\n__all__ = ('BaseDocument', 'NON_FIELD_ERRORS')\n\nNON_FIELD_ERRORS = '__all__'\n\n\nclass BaseDocument(object):\n __slots__ = ('_changed_fields', '_initialised', '_created', '_data',\n '_dynamic_fields', '_auto_id_field', '_db_field_map',\n '__weakref__')\n\n _dynamic = False\n _dynamic_lock = True\n STRICT = False\n\n def __init__(self, *args, **values):\n \"\"\"\n Initialise a document or embedded document\n\n :param __auto_convert: Try and will cast python objects to Object types\n :param values: A dictionary of values for the document\n \"\"\"\n self._initialised = False\n self._created = True\n if args:\n # Combine positional arguments with named arguments.\n # We only want named arguments.\n field = iter(self._fields_ordered)\n # If its an automatic id field then skip to the first defined field\n if getattr(self, '_auto_id_field', False):\n next(field)\n for value in args:\n name = next(field)\n if name in values:\n raise TypeError(\n 'Multiple values for keyword argument \"%s\"' % name)\n values[name] = value\n\n __auto_convert = values.pop('__auto_convert', True)\n\n # 399: set default values only to fields loaded from DB\n __only_fields = set(values.pop('__only_fields', values))\n\n _created = values.pop('_created', True)\n\n signals.pre_init.send(self.__class__, document=self, values=values)\n\n # Check if there are undefined fields supplied to the constructor,\n # if so raise an Exception.\n if not self._dynamic and (self._meta.get('strict', True) or _created):\n _undefined_fields = set(values.keys()) - set(\n self._fields.keys() + ['id', 'pk', '_cls', '_text_score'])\n if _undefined_fields:\n msg = (\n 'The fields \"{0}\" do not exist on the document \"{1}\"'\n ).format(_undefined_fields, self._class_name)\n raise FieldDoesNotExist(msg)\n\n if self.STRICT and not self._dynamic:\n self._data = StrictDict.create(allowed_keys=self._fields_ordered)()\n else:\n self._data = {}\n\n self._dynamic_fields = SON()\n\n # Assign default values to instance\n for key, field in self._fields.iteritems():\n if self._db_field_map.get(key, key) in __only_fields:\n continue\n value = getattr(self, key, None)\n setattr(self, key, value)\n\n # Set passed values after initialisation\n if self._dynamic:\n dynamic_data = {}\n for key, value in values.iteritems():\n if key in self._fields or key == '_id':\n setattr(self, key, value)\n else:\n dynamic_data[key] = value\n else:\n FileField = _import_class('FileField')\n for key, value in values.iteritems():\n key = self._reverse_db_field_map.get(key, key)\n if key in self._fields or key in ('id', 'pk', '_cls'):\n if __auto_convert and value is not None:\n field = self._fields.get(key)\n if field and not isinstance(field, FileField):\n value = field.to_python(value)\n setattr(self, key, value)\n else:\n self._data[key] = value\n\n # Set any get_<field>_display methods\n self.__set_field_display()\n\n if self._dynamic:\n self._dynamic_lock = False\n for key, value in dynamic_data.iteritems():\n setattr(self, key, value)\n\n # Flag initialised\n self._initialised = True\n self._created = _created\n signals.post_init.send(self.__class__, document=self)\n\n def __delattr__(self, *args, **kwargs):\n \"\"\"Handle deletions of fields\"\"\"\n field_name = args[0]\n if field_name in self._fields:\n default = self._fields[field_name].default\n if callable(default):\n default = default()\n setattr(self, field_name, default)\n else:\n super(BaseDocument, self).__delattr__(*args, **kwargs)\n\n def __setattr__(self, name, value):\n # Handle dynamic data only if an initialised dynamic document\n if self._dynamic and not self._dynamic_lock:\n\n if not hasattr(self, name) and not name.startswith('_'):\n DynamicField = _import_class('DynamicField')\n field = DynamicField(db_field=name, null=True)\n field.name = name\n self._dynamic_fields[name] = field\n self._fields_ordered += (name,)\n\n if not name.startswith('_'):\n value = self.__expand_dynamic_values(name, value)\n\n # Handle marking data as changed\n if name in self._dynamic_fields:\n self._data[name] = value\n if hasattr(self, '_changed_fields'):\n self._mark_as_changed(name)\n try:\n self__created = self._created\n except AttributeError:\n self__created = True\n\n if (\n self._is_document and\n not self__created and\n name in self._meta.get('shard_key', tuple()) and\n self._data.get(name) != value\n ):\n msg = 'Shard Keys are immutable. Tried to update %s' % name\n raise OperationError(msg)\n\n try:\n self__initialised = self._initialised\n except AttributeError:\n self__initialised = False\n # Check if the user has created a new instance of a class\n if (self._is_document and self__initialised and\n self__created and name == self._meta.get('id_field')):\n super(BaseDocument, self).__setattr__('_created', False)\n\n super(BaseDocument, self).__setattr__(name, value)\n\n def __getstate__(self):\n data = {}\n for k in ('_changed_fields', '_initialised', '_created',\n '_dynamic_fields', '_fields_ordered'):\n if hasattr(self, k):\n data[k] = getattr(self, k)\n data['_data'] = self.to_mongo()\n return data\n\n def __setstate__(self, data):\n if isinstance(data['_data'], SON):\n data['_data'] = self.__class__._from_son(data['_data'])._data\n for k in ('_changed_fields', '_initialised', '_created', '_data',\n '_dynamic_fields'):\n if k in data:\n setattr(self, k, data[k])\n if '_fields_ordered' in data:\n if self._dynamic:\n setattr(self, '_fields_ordered', data['_fields_ordered'])\n else:\n _super_fields_ordered = type(self)._fields_ordered\n setattr(self, '_fields_ordered', _super_fields_ordered)\n\n dynamic_fields = data.get('_dynamic_fields') or SON()\n for k in dynamic_fields.keys():\n setattr(self, k, data['_data'].get(k))\n\n def __iter__(self):\n return iter(self._fields_ordered)\n\n def __getitem__(self, name):\n \"\"\"Dictionary-style field access, return a field's value if present.\n \"\"\"\n try:\n if name in self._fields_ordered:\n return getattr(self, name)\n except AttributeError:\n pass\n raise KeyError(name)\n\n def __setitem__(self, name, value):\n \"\"\"Dictionary-style field access, set a field's value.\n \"\"\"\n # Ensure that the field exists before settings its value\n if not self._dynamic and name not in self._fields:\n raise KeyError(name)\n return setattr(self, name, value)\n\n def __contains__(self, name):\n try:\n val = getattr(self, name)\n return val is not None\n except AttributeError:\n return False\n\n def __len__(self):\n return len(self._data)\n\n def __repr__(self):\n try:\n u = self.__str__()\n except (UnicodeEncodeError, UnicodeDecodeError):\n u = '[Bad Unicode data]'\n repr_type = str if u is None else type(u)\n return repr_type('<%s: %s>' % (self.__class__.__name__, u))\n\n def __str__(self):\n # TODO this could be simpler?\n if hasattr(self, '__unicode__'):\n if six.PY3:\n return self.__unicode__()\n else:\n return six.text_type(self).encode('utf-8')\n return six.text_type('%s object' % self.__class__.__name__)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__) and hasattr(other, 'id') and other.id is not None:\n return self.id == other.id\n if isinstance(other, DBRef):\n return self._get_collection_name() == other.collection and self.id == other.id\n if self.id is None:\n return self is other\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def clean(self):\n \"\"\"\n Hook for doing document level data cleaning before validation is run.\n\n Any ValidationError raised by this method will not be associated with\n a particular field; it will have a special-case association with the\n field defined by NON_FIELD_ERRORS.\n \"\"\"\n pass\n\n def get_text_score(self):\n \"\"\"\n Get text score from text query\n \"\"\"\n\n if '_text_score' not in self._data:\n raise InvalidDocumentError('This document is not originally built from a text query')\n\n return self._data['_text_score']\n\n def to_mongo(self, use_db_field=True, fields=None):\n \"\"\"\n Return as SON data ready for use with MongoDB.\n \"\"\"\n if not fields:\n fields = []\n\n data = SON()\n data['_id'] = None\n data['_cls'] = self._class_name\n\n # only root fields ['test1.a', 'test2'] => ['test1', 'test2']\n root_fields = {f.split('.')[0] for f in fields}\n\n for field_name in self:\n if root_fields and field_name not in root_fields:\n continue\n\n value = self._data.get(field_name, None)\n field = self._fields.get(field_name)\n\n if field is None and self._dynamic:\n field = self._dynamic_fields.get(field_name)\n\n if value is not None:\n f_inputs = field.to_mongo.__code__.co_varnames\n ex_vars = {}\n if fields and 'fields' in f_inputs:\n key = '%s.' % field_name\n embedded_fields = [\n i.replace(key, '') for i in fields\n if i.startswith(key)]\n\n ex_vars['fields'] = embedded_fields\n\n if 'use_db_field' in f_inputs:\n ex_vars['use_db_field'] = use_db_field\n\n value = field.to_mongo(value, **ex_vars)\n\n # Handle self generating fields\n if value is None and field._auto_gen:\n value = field.generate()\n self._data[field_name] = value\n\n if (value is not None) or (field.null):\n if use_db_field:\n data[field.db_field] = value\n else:\n data[field.name] = value\n\n # Only add _cls if allow_inheritance is True\n if not self._meta.get('allow_inheritance'):\n data.pop('_cls')\n\n return data\n\n def validate(self, clean=True):\n \"\"\"Ensure that all fields' values are valid and that required fields\n are present.\n \"\"\"\n # Ensure that each field is matched to a valid value\n errors = {}\n if clean:\n try:\n self.clean()\n except ValidationError as error:\n errors[NON_FIELD_ERRORS] = error\n\n # Get a list of tuples of field names and their current values\n fields = [(self._fields.get(name, self._dynamic_fields.get(name)),\n self._data.get(name)) for name in self._fields_ordered]\n\n EmbeddedDocumentField = _import_class('EmbeddedDocumentField')\n GenericEmbeddedDocumentField = _import_class(\n 'GenericEmbeddedDocumentField')\n\n for field, value in fields:\n if value is not None:\n try:\n if isinstance(field, (EmbeddedDocumentField,\n GenericEmbeddedDocumentField)):\n field._validate(value, clean=clean)\n else:\n field._validate(value)\n except ValidationError as error:\n errors[field.name] = error.errors or error\n except (ValueError, AttributeError, AssertionError) as error:\n errors[field.name] = error\n elif field.required and not getattr(field, '_auto_gen', False):\n errors[field.name] = ValidationError('Field is required',\n field_name=field.name)\n\n if errors:\n pk = 'None'\n if hasattr(self, 'pk'):\n pk = self.pk\n elif self._instance and hasattr(self._instance, 'pk'):\n pk = self._instance.pk\n message = 'ValidationError (%s:%s) ' % (self._class_name, pk)\n raise ValidationError(message, errors=errors)\n\n def to_json(self, *args, **kwargs):\n \"\"\"Convert this document to JSON.\n\n :param use_db_field: Serialize field names as they appear in\n MongoDB (as opposed to attribute names on this document).\n Defaults to True.\n \"\"\"\n use_db_field = kwargs.pop('use_db_field', True)\n return json_util.dumps(self.to_mongo(use_db_field), *args, **kwargs)\n\n @classmethod\n def from_json(cls, json_data, created=False):\n \"\"\"Converts json data to a Document instance\n\n :param json_data: The json data to load into the Document\n :param created: If True, the document will be considered as a brand new document\n If False and an id is provided, it will consider that the data being\n loaded corresponds to what's already in the database (This has an impact of subsequent call to .save())\n If False and no id is provided, it will consider the data as a new document\n (default ``False``)\n \"\"\"\n return cls._from_son(json_util.loads(json_data), created=created)\n\n def __expand_dynamic_values(self, name, value):\n \"\"\"Expand any dynamic values to their correct types / values.\"\"\"\n if not isinstance(value, (dict, list, tuple)):\n return value\n\n # If the value is a dict with '_cls' in it, turn it into a document\n is_dict = isinstance(value, dict)\n if is_dict and '_cls' in value:\n cls = get_document(value['_cls'])\n return cls(**value)\n\n if is_dict:\n value = {\n k: self.__expand_dynamic_values(k, v)\n for k, v in value.items()\n }\n else:\n value = [self.__expand_dynamic_values(name, v) for v in value]\n\n # Convert lists / values so we can watch for any changes on them\n EmbeddedDocumentListField = _import_class('EmbeddedDocumentListField')\n if (isinstance(value, (list, tuple)) and\n not isinstance(value, BaseList)):\n if issubclass(type(self), EmbeddedDocumentListField):\n value = EmbeddedDocumentList(value, self, name)\n else:\n value = BaseList(value, self, name)\n elif isinstance(value, dict) and not isinstance(value, BaseDict):\n value = BaseDict(value, self, name)\n\n return value\n\n def _mark_as_changed(self, key):\n \"\"\"Mark a key as explicitly changed by the user.\"\"\"\n if not key:\n return\n\n if not hasattr(self, '_changed_fields'):\n return\n\n if '.' in key:\n key, rest = key.split('.', 1)\n key = self._db_field_map.get(key, key)\n key = '%s.%s' % (key, rest)\n else:\n key = self._db_field_map.get(key, key)\n\n if key not in self._changed_fields:\n levels, idx = key.split('.'), 1\n while idx <= len(levels):\n if '.'.join(levels[:idx]) in self._changed_fields:\n break\n idx += 1\n else:\n self._changed_fields.append(key)\n # remove lower level changed fields\n level = '.'.join(levels[:idx]) + '.'\n remove = self._changed_fields.remove\n for field in self._changed_fields[:]:\n if field.startswith(level):\n remove(field)\n\n def _clear_changed_fields(self):\n \"\"\"Using _get_changed_fields iterate and remove any fields that\n are marked as changed.\n \"\"\"\n for changed in self._get_changed_fields():\n parts = changed.split('.')\n data = self\n for part in parts:\n if isinstance(data, list):\n try:\n data = data[int(part)]\n except IndexError:\n data = None\n elif isinstance(data, dict):\n data = data.get(part, None)\n else:\n data = getattr(data, part, None)\n\n if not isinstance(data, LazyReference) and hasattr(data, '_changed_fields'):\n if getattr(data, '_is_document', False):\n continue\n\n data._changed_fields = []\n\n self._changed_fields = []\n\n def _nestable_types_changed_fields(self, changed_fields, base_key, data):\n \"\"\"Inspect nested data for changed fields\n\n :param changed_fields: Previously collected changed fields\n :param base_key: The base key that must be used to prepend changes to this data\n :param data: data to inspect for changes\n \"\"\"\n # Loop list / dict fields as they contain documents\n # Determine the iterator to use\n if not hasattr(data, 'items'):\n iterator = enumerate(data)\n else:\n iterator = data.iteritems()\n\n for index_or_key, value in iterator:\n item_key = '%s%s.' % (base_key, index_or_key)\n # don't check anything lower if this key is already marked\n # as changed.\n if item_key[:-1] in changed_fields:\n continue\n\n if hasattr(value, '_get_changed_fields'):\n changed = value._get_changed_fields()\n changed_fields += ['%s%s' % (item_key, k) for k in changed if k]\n elif isinstance(value, (list, tuple, dict)):\n self._nestable_types_changed_fields(\n changed_fields, item_key, value)\n\n def _get_changed_fields(self):\n \"\"\"Return a list of all fields that have explicitly been changed.\n \"\"\"\n EmbeddedDocument = _import_class('EmbeddedDocument')\n ReferenceField = _import_class('ReferenceField')\n GenericReferenceField = _import_class('GenericReferenceField')\n SortedListField = _import_class('SortedListField')\n\n changed_fields = []\n changed_fields += getattr(self, '_changed_fields', [])\n\n for field_name in self._fields_ordered:\n db_field_name = self._db_field_map.get(field_name, field_name)\n key = '%s.' % db_field_name\n data = self._data.get(field_name, None)\n field = self._fields.get(field_name)\n\n if db_field_name in changed_fields:\n # Whole field already marked as changed, no need to go further\n continue\n\n if isinstance(field, ReferenceField): # Don't follow referenced documents\n continue\n\n if isinstance(data, EmbeddedDocument):\n # Find all embedded fields that have been changed\n changed = data._get_changed_fields()\n changed_fields += ['%s%s' % (key, k) for k in changed if k]\n elif isinstance(data, (list, tuple, dict)):\n if (hasattr(field, 'field') and\n isinstance(field.field, (ReferenceField, GenericReferenceField))):\n continue\n elif isinstance(field, SortedListField) and field._ordering:\n # if ordering is affected whole list is changed\n if any(field._ordering in d._changed_fields for d in data):\n changed_fields.append(db_field_name)\n continue\n\n self._nestable_types_changed_fields(\n changed_fields, key, data)\n return changed_fields\n\n def _delta(self):\n \"\"\"Returns the delta (set, unset) of the changes for a document.\n Gets any values that have been explicitly changed.\n \"\"\"\n # Handles cases where not loaded from_son but has _id\n doc = self.to_mongo()\n\n set_fields = self._get_changed_fields()\n unset_data = {}\n if hasattr(self, '_changed_fields'):\n set_data = {}\n # Fetch each set item from its path\n for path in set_fields:\n parts = path.split('.')\n d = doc\n new_path = []\n for p in parts:\n if isinstance(d, (ObjectId, DBRef)):\n # Don't dig in the references\n break\n elif isinstance(d, list) and p.isdigit():\n # An item of a list (identified by its index) is updated\n d = d[int(p)]\n elif hasattr(d, 'get'):\n # dict-like (dict, embedded document)\n d = d.get(p)\n new_path.append(p)\n path = '.'.join(new_path)\n set_data[path] = d\n else:\n set_data = doc\n if '_id' in set_data:\n del set_data['_id']\n\n # Determine if any changed items were actually unset.\n for path, value in set_data.items():\n if value or isinstance(value, (numbers.Number, bool)): # Account for 0 and True that are truthy\n continue\n\n parts = path.split('.')\n\n if (self._dynamic and len(parts) and parts[0] in\n self._dynamic_fields):\n del set_data[path]\n unset_data[path] = 1\n continue\n\n # If we've set a value that ain't the default value don't unset it.\n default = None\n if path in self._fields:\n default = self._fields[path].default\n else: # Perform a full lookup for lists / embedded lookups\n d = self\n db_field_name = parts.pop()\n for p in parts:\n if isinstance(d, list) and p.isdigit():\n d = d[int(p)]\n elif (hasattr(d, '__getattribute__') and\n not isinstance(d, dict)):\n real_path = d._reverse_db_field_map.get(p, p)\n d = getattr(d, real_path)\n else:\n d = d.get(p)\n\n if hasattr(d, '_fields'):\n field_name = d._reverse_db_field_map.get(db_field_name,\n db_field_name)\n if field_name in d._fields:\n default = d._fields.get(field_name).default\n else:\n default = None\n\n if default is not None:\n default = default() if callable(default) else default\n\n if value != default:\n continue\n\n del set_data[path]\n unset_data[path] = 1\n return set_data, unset_data\n\n @classmethod\n def _get_collection_name(cls):\n \"\"\"Return the collection name for this class. None for abstract\n class.\n \"\"\"\n return cls._meta.get('collection', None)\n\n @classmethod\n def _from_son(cls, son, _auto_dereference=True, only_fields=None, created=False):\n \"\"\"Create an instance of a Document (subclass) from a PyMongo\n SON.\n \"\"\"\n if not only_fields:\n only_fields = []\n\n if son and not isinstance(son, dict):\n raise ValueError(\"The source SON object needs to be of type 'dict'\")\n\n # Get the class name from the document, falling back to the given\n # class if unavailable\n class_name = son.get('_cls', cls._class_name)\n\n # Convert SON to a data dict, making sure each key is a string and\n # corresponds to the right db field.\n data = {}\n for key, value in son.iteritems():\n key = str(key)\n key = cls._db_field_map.get(key, key)\n data[key] = value\n\n # Return correct subclass for document type\n if class_name != cls._class_name:\n cls = get_document(class_name)\n\n changed_fields = []\n errors_dict = {}\n\n fields = cls._fields\n if not _auto_dereference:\n fields = copy.deepcopy(fields)\n\n for field_name, field in fields.iteritems():\n field._auto_dereference = _auto_dereference\n if field.db_field in data:\n value = data[field.db_field]\n try:\n data[field_name] = (value if value is None\n else field.to_python(value))\n if field_name != field.db_field:\n del data[field.db_field]\n except (AttributeError, ValueError) as e:\n errors_dict[field_name] = e\n\n if errors_dict:\n errors = '\\n'.join(['%s - %s' % (k, v)\n for k, v in errors_dict.items()])\n msg = ('Invalid data to create a `%s` instance.\\n%s'\n % (cls._class_name, errors))\n raise InvalidDocumentError(msg)\n\n # In STRICT documents, remove any keys that aren't in cls._fields\n if cls.STRICT:\n data = {k: v for k, v in data.iteritems() if k in cls._fields}\n\n obj = cls(__auto_convert=False, _created=created, __only_fields=only_fields, **data)\n obj._changed_fields = changed_fields\n if not _auto_dereference:\n obj._fields = fields\n\n return obj\n\n @classmethod\n def _build_index_specs(cls, meta_indexes):\n \"\"\"Generate and merge the full index specs.\"\"\"\n geo_indices = cls._geo_indices()\n unique_indices = cls._unique_with_indexes()\n index_specs = [cls._build_index_spec(spec) for spec in meta_indexes]\n\n def merge_index_specs(index_specs, indices):\n \"\"\"Helper method for merging index specs.\"\"\"\n if not indices:\n return index_specs\n\n # Create a map of index fields to index spec. We're converting\n # the fields from a list to a tuple so that it's hashable.\n spec_fields = {\n tuple(index['fields']): index for index in index_specs\n }\n\n # For each new index, if there's an existing index with the same\n # fields list, update the existing spec with all data from the\n # new spec.\n for new_index in indices:\n candidate = spec_fields.get(tuple(new_index['fields']))\n if candidate is None:\n index_specs.append(new_index)\n else:\n candidate.update(new_index)\n\n return index_specs\n\n # Merge geo indexes and unique_with indexes into the meta index specs.\n index_specs = merge_index_specs(index_specs, geo_indices)\n index_specs = merge_index_specs(index_specs, unique_indices)\n return index_specs\n\n @classmethod\n def _build_index_spec(cls, spec):\n \"\"\"Build a PyMongo index spec from a MongoEngine index spec.\"\"\"\n if isinstance(spec, six.string_types):\n spec = {'fields': [spec]}\n elif isinstance(spec, (list, tuple)):\n spec = {'fields': list(spec)}\n elif isinstance(spec, dict):\n spec = dict(spec)\n\n index_list = []\n direction = None\n\n # Check to see if we need to include _cls\n allow_inheritance = cls._meta.get('allow_inheritance')\n include_cls = (\n allow_inheritance and\n not spec.get('sparse', False) and\n spec.get('cls', True) and\n '_cls' not in spec['fields']\n )\n\n # 733: don't include cls if index_cls is False unless there is an explicit cls with the index\n include_cls = include_cls and (spec.get('cls', False) or cls._meta.get('index_cls', True))\n if 'cls' in spec:\n spec.pop('cls')\n for key in spec['fields']:\n # If inherited spec continue\n if isinstance(key, (list, tuple)):\n continue\n\n # ASCENDING from +\n # DESCENDING from -\n # TEXT from $\n # HASHED from #\n # GEOSPHERE from (\n # GEOHAYSTACK from )\n # GEO2D from *\n direction = pymongo.ASCENDING\n if key.startswith('-'):\n direction = pymongo.DESCENDING\n elif key.startswith('$'):\n direction = pymongo.TEXT\n elif key.startswith('#'):\n direction = pymongo.HASHED\n elif key.startswith('('):\n direction = pymongo.GEOSPHERE\n elif key.startswith(')'):\n direction = pymongo.GEOHAYSTACK\n elif key.startswith('*'):\n direction = pymongo.GEO2D\n if key.startswith(('+', '-', '*', '$', '#', '(', ')')):\n key = key[1:]\n\n # Use real field name, do it manually because we need field\n # objects for the next part (list field checking)\n parts = key.split('.')\n if parts in (['pk'], ['id'], ['_id']):\n key = '_id'\n else:\n fields = cls._lookup_field(parts)\n parts = []\n for field in fields:\n try:\n if field != '_id':\n field = field.db_field\n except AttributeError:\n pass\n parts.append(field)\n key = '.'.join(parts)\n index_list.append((key, direction))\n\n # Don't add cls to a geo index\n if include_cls and direction not in (\n pymongo.GEO2D, pymongo.GEOHAYSTACK, pymongo.GEOSPHERE):\n index_list.insert(0, ('_cls', 1))\n\n if index_list:\n spec['fields'] = index_list\n\n return spec\n\n @classmethod\n def _unique_with_indexes(cls, namespace=''):\n \"\"\"Find unique indexes in the document schema and return them.\"\"\"\n unique_indexes = []\n for field_name, field in cls._fields.items():\n sparse = field.sparse\n\n # Generate a list of indexes needed by uniqueness constraints\n if field.unique:\n unique_fields = [field.db_field]\n\n # Add any unique_with fields to the back of the index spec\n if field.unique_with:\n if isinstance(field.unique_with, six.string_types):\n field.unique_with = [field.unique_with]\n\n # Convert unique_with field names to real field names\n unique_with = []\n for other_name in field.unique_with:\n parts = other_name.split('.')\n\n # Lookup real name\n parts = cls._lookup_field(parts)\n name_parts = [part.db_field for part in parts]\n unique_with.append('.'.join(name_parts))\n\n # Unique field should be required\n parts[-1].required = True\n sparse = (not sparse and\n parts[-1].name not in cls.__dict__)\n\n unique_fields += unique_with\n\n # Add the new index to the list\n fields = [\n ('%s%s' % (namespace, f), pymongo.ASCENDING)\n for f in unique_fields\n ]\n index = {'fields': fields, 'unique': True, 'sparse': sparse}\n unique_indexes.append(index)\n\n if field.__class__.__name__ == 'ListField':\n field = field.field\n\n # Grab any embedded document field unique indexes\n if (field.__class__.__name__ == 'EmbeddedDocumentField' and\n field.document_type != cls):\n field_namespace = '%s.' % field_name\n doc_cls = field.document_type\n unique_indexes += doc_cls._unique_with_indexes(field_namespace)\n\n return unique_indexes\n\n @classmethod\n def _geo_indices(cls, inspected=None, parent_field=None):\n inspected = inspected or []\n geo_indices = []\n inspected.append(cls)\n\n geo_field_type_names = ('EmbeddedDocumentField', 'GeoPointField',\n 'PointField', 'LineStringField',\n 'PolygonField')\n\n geo_field_types = tuple([_import_class(field)\n for field in geo_field_type_names])\n\n for field in cls._fields.values():\n if not isinstance(field, geo_field_types):\n continue\n\n if hasattr(field, 'document_type'):\n field_cls = field.document_type\n if field_cls in inspected:\n continue\n\n if hasattr(field_cls, '_geo_indices'):\n geo_indices += field_cls._geo_indices(\n inspected, parent_field=field.db_field)\n elif field._geo_index:\n field_name = field.db_field\n if parent_field:\n field_name = '%s.%s' % (parent_field, field_name)\n geo_indices.append({\n 'fields': [(field_name, field._geo_index)]\n })\n\n return geo_indices\n\n @classmethod\n def _lookup_field(cls, parts):\n \"\"\"Given the path to a given field, return a list containing\n the Field object associated with that field and all of its parent\n Field objects.\n\n Args:\n parts (str, list, or tuple) - path to the field. Should be a\n string for simple fields existing on this document or a list\n of strings for a field that exists deeper in embedded documents.\n\n Returns:\n A list of Field instances for fields that were found or\n strings for sub-fields that weren't.\n\n Example:\n >>> user._lookup_field('name')\n [<mongoengine.fields.StringField at 0x1119bff50>]\n\n >>> user._lookup_field('roles')\n [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>]\n\n >>> user._lookup_field(['roles', 'role'])\n [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,\n <mongoengine.fields.StringField at 0x1119ec050>]\n\n >>> user._lookup_field('doesnt_exist')\n raises LookUpError\n\n >>> user._lookup_field(['roles', 'doesnt_exist'])\n [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,\n 'doesnt_exist']\n\n \"\"\"\n # TODO this method is WAY too complicated. Simplify it.\n # TODO don't think returning a string for embedded non-existent fields is desired\n\n ListField = _import_class('ListField')\n DynamicField = _import_class('DynamicField')\n\n if not isinstance(parts, (list, tuple)):\n parts = [parts]\n\n fields = []\n field = None\n\n for field_name in parts:\n # Handle ListField indexing:\n if field_name.isdigit() and isinstance(field, ListField):\n fields.append(field_name)\n continue\n\n # Look up first field from the document\n if field is None:\n if field_name == 'pk':\n # Deal with \"primary key\" alias\n field_name = cls._meta['id_field']\n\n if field_name in cls._fields:\n field = cls._fields[field_name]\n elif cls._dynamic:\n field = DynamicField(db_field=field_name)\n elif cls._meta.get('allow_inheritance') or cls._meta.get('abstract', False):\n # 744: in case the field is defined in a subclass\n for subcls in cls.__subclasses__():\n try:\n field = subcls._lookup_field([field_name])[0]\n except LookUpError:\n continue\n\n if field is not None:\n break\n else:\n raise LookUpError('Cannot resolve field \"%s\"' % field_name)\n else:\n raise LookUpError('Cannot resolve field \"%s\"' % field_name)\n else:\n ReferenceField = _import_class('ReferenceField')\n GenericReferenceField = _import_class('GenericReferenceField')\n\n # If previous field was a reference, throw an error (we\n # cannot look up fields that are on references).\n if isinstance(field, (ReferenceField, GenericReferenceField)):\n raise LookUpError('Cannot perform join in mongoDB: %s' %\n '__'.join(parts))\n\n # If the parent field has a \"field\" attribute which has a\n # lookup_member method, call it to find the field\n # corresponding to this iteration.\n if hasattr(getattr(field, 'field', None), 'lookup_member'):\n new_field = field.field.lookup_member(field_name)\n\n # If the parent field is a DynamicField or if it's part of\n # a DynamicDocument, mark current field as a DynamicField\n # with db_name equal to the field name.\n elif cls._dynamic and (isinstance(field, DynamicField) or\n getattr(getattr(field, 'document_type', None), '_dynamic', None)):\n new_field = DynamicField(db_field=field_name)\n\n # Else, try to use the parent field's lookup_member method\n # to find the subfield.\n elif hasattr(field, 'lookup_member'):\n new_field = field.lookup_member(field_name)\n\n # Raise a LookUpError if all the other conditions failed.\n else:\n raise LookUpError(\n 'Cannot resolve subfield or operator {} '\n 'on the field {}'.format(field_name, field.name)\n )\n\n # If current field still wasn't found and the parent field\n # is a ComplexBaseField, add the name current field name and\n # move on.\n if not new_field and isinstance(field, ComplexBaseField):\n fields.append(field_name)\n continue\n elif not new_field:\n raise LookUpError('Cannot resolve field \"%s\"' % field_name)\n\n field = new_field # update field to the new field type\n\n fields.append(field)\n\n return fields\n\n @classmethod\n def _translate_field_name(cls, field, sep='.'):\n \"\"\"Translate a field attribute name to a database field name.\n \"\"\"\n parts = field.split(sep)\n parts = [f.db_field for f in cls._lookup_field(parts)]\n return '.'.join(parts)\n\n def __set_field_display(self):\n \"\"\"For each field that specifies choices, create a\n get_<field>_display method.\n \"\"\"\n fields_with_choices = [(n, f) for n, f in self._fields.items()\n if f.choices]\n for attr_name, field in fields_with_choices:\n setattr(self,\n 'get_%s_display' % attr_name,\n partial(self.__get_field_display, field=field))\n\n def __get_field_display(self, field):\n \"\"\"Return the display value for a choice field\"\"\"\n value = getattr(self, field.name)\n if field.choices and isinstance(field.choices[0], (list, tuple)):\n if value is None:\n return None\n sep = getattr(field, 'display_sep', ' ')\n values = value if field.__class__.__name__ in ('ListField', 'SortedListField') else [value]\n return sep.join([\n six.text_type(dict(field.choices).get(val, val))\n for val in values or []])\n return value\n",
"path": "mongoengine/base/document.py"
}
] | [
{
"content": "import copy\nimport numbers\nfrom functools import partial\n\nfrom bson import ObjectId, json_util\nfrom bson.dbref import DBRef\nfrom bson.son import SON\nimport pymongo\nimport six\n\nfrom mongoengine import signals\nfrom mongoengine.base.common import get_document\nfrom mongoengine.base.datastructures import (BaseDict, BaseList,\n EmbeddedDocumentList,\n LazyReference,\n StrictDict)\nfrom mongoengine.base.fields import ComplexBaseField\nfrom mongoengine.common import _import_class\nfrom mongoengine.errors import (FieldDoesNotExist, InvalidDocumentError,\n LookUpError, OperationError, ValidationError)\nfrom mongoengine.python_support import Hashable\n\n__all__ = ('BaseDocument', 'NON_FIELD_ERRORS')\n\nNON_FIELD_ERRORS = '__all__'\n\n\nclass BaseDocument(object):\n __slots__ = ('_changed_fields', '_initialised', '_created', '_data',\n '_dynamic_fields', '_auto_id_field', '_db_field_map',\n '__weakref__')\n\n _dynamic = False\n _dynamic_lock = True\n STRICT = False\n\n def __init__(self, *args, **values):\n \"\"\"\n Initialise a document or embedded document\n\n :param __auto_convert: Try and will cast python objects to Object types\n :param values: A dictionary of values for the document\n \"\"\"\n self._initialised = False\n self._created = True\n if args:\n # Combine positional arguments with named arguments.\n # We only want named arguments.\n field = iter(self._fields_ordered)\n # If its an automatic id field then skip to the first defined field\n if getattr(self, '_auto_id_field', False):\n next(field)\n for value in args:\n name = next(field)\n if name in values:\n raise TypeError(\n 'Multiple values for keyword argument \"%s\"' % name)\n values[name] = value\n\n __auto_convert = values.pop('__auto_convert', True)\n\n # 399: set default values only to fields loaded from DB\n __only_fields = set(values.pop('__only_fields', values))\n\n _created = values.pop('_created', True)\n\n signals.pre_init.send(self.__class__, document=self, values=values)\n\n # Check if there are undefined fields supplied to the constructor,\n # if so raise an Exception.\n if not self._dynamic and (self._meta.get('strict', True) or _created):\n _undefined_fields = set(values.keys()) - set(\n self._fields.keys() + ['id', 'pk', '_cls', '_text_score'])\n if _undefined_fields:\n msg = (\n 'The fields \"{0}\" do not exist on the document \"{1}\"'\n ).format(_undefined_fields, self._class_name)\n raise FieldDoesNotExist(msg)\n\n if self.STRICT and not self._dynamic:\n self._data = StrictDict.create(allowed_keys=self._fields_ordered)()\n else:\n self._data = {}\n\n self._dynamic_fields = SON()\n\n # Assign default values to instance\n for key, field in self._fields.iteritems():\n if self._db_field_map.get(key, key) in __only_fields:\n continue\n value = getattr(self, key, None)\n setattr(self, key, value)\n\n if '_cls' not in values:\n self._cls = self._class_name\n\n # Set passed values after initialisation\n if self._dynamic:\n dynamic_data = {}\n for key, value in values.iteritems():\n if key in self._fields or key == '_id':\n setattr(self, key, value)\n else:\n dynamic_data[key] = value\n else:\n FileField = _import_class('FileField')\n for key, value in values.iteritems():\n key = self._reverse_db_field_map.get(key, key)\n if key in self._fields or key in ('id', 'pk', '_cls'):\n if __auto_convert and value is not None:\n field = self._fields.get(key)\n if field and not isinstance(field, FileField):\n value = field.to_python(value)\n setattr(self, key, value)\n else:\n self._data[key] = value\n\n # Set any get_<field>_display methods\n self.__set_field_display()\n\n if self._dynamic:\n self._dynamic_lock = False\n for key, value in dynamic_data.iteritems():\n setattr(self, key, value)\n\n # Flag initialised\n self._initialised = True\n self._created = _created\n signals.post_init.send(self.__class__, document=self)\n\n def __delattr__(self, *args, **kwargs):\n \"\"\"Handle deletions of fields\"\"\"\n field_name = args[0]\n if field_name in self._fields:\n default = self._fields[field_name].default\n if callable(default):\n default = default()\n setattr(self, field_name, default)\n else:\n super(BaseDocument, self).__delattr__(*args, **kwargs)\n\n def __setattr__(self, name, value):\n # Handle dynamic data only if an initialised dynamic document\n if self._dynamic and not self._dynamic_lock:\n\n if not hasattr(self, name) and not name.startswith('_'):\n DynamicField = _import_class('DynamicField')\n field = DynamicField(db_field=name, null=True)\n field.name = name\n self._dynamic_fields[name] = field\n self._fields_ordered += (name,)\n\n if not name.startswith('_'):\n value = self.__expand_dynamic_values(name, value)\n\n # Handle marking data as changed\n if name in self._dynamic_fields:\n self._data[name] = value\n if hasattr(self, '_changed_fields'):\n self._mark_as_changed(name)\n try:\n self__created = self._created\n except AttributeError:\n self__created = True\n\n if (\n self._is_document and\n not self__created and\n name in self._meta.get('shard_key', tuple()) and\n self._data.get(name) != value\n ):\n msg = 'Shard Keys are immutable. Tried to update %s' % name\n raise OperationError(msg)\n\n try:\n self__initialised = self._initialised\n except AttributeError:\n self__initialised = False\n # Check if the user has created a new instance of a class\n if (self._is_document and self__initialised and\n self__created and name == self._meta.get('id_field')):\n super(BaseDocument, self).__setattr__('_created', False)\n\n super(BaseDocument, self).__setattr__(name, value)\n\n def __getstate__(self):\n data = {}\n for k in ('_changed_fields', '_initialised', '_created',\n '_dynamic_fields', '_fields_ordered'):\n if hasattr(self, k):\n data[k] = getattr(self, k)\n data['_data'] = self.to_mongo()\n return data\n\n def __setstate__(self, data):\n if isinstance(data['_data'], SON):\n data['_data'] = self.__class__._from_son(data['_data'])._data\n for k in ('_changed_fields', '_initialised', '_created', '_data',\n '_dynamic_fields'):\n if k in data:\n setattr(self, k, data[k])\n if '_fields_ordered' in data:\n if self._dynamic:\n setattr(self, '_fields_ordered', data['_fields_ordered'])\n else:\n _super_fields_ordered = type(self)._fields_ordered\n setattr(self, '_fields_ordered', _super_fields_ordered)\n\n dynamic_fields = data.get('_dynamic_fields') or SON()\n for k in dynamic_fields.keys():\n setattr(self, k, data['_data'].get(k))\n\n def __iter__(self):\n return iter(self._fields_ordered)\n\n def __getitem__(self, name):\n \"\"\"Dictionary-style field access, return a field's value if present.\n \"\"\"\n try:\n if name in self._fields_ordered:\n return getattr(self, name)\n except AttributeError:\n pass\n raise KeyError(name)\n\n def __setitem__(self, name, value):\n \"\"\"Dictionary-style field access, set a field's value.\n \"\"\"\n # Ensure that the field exists before settings its value\n if not self._dynamic and name not in self._fields:\n raise KeyError(name)\n return setattr(self, name, value)\n\n def __contains__(self, name):\n try:\n val = getattr(self, name)\n return val is not None\n except AttributeError:\n return False\n\n def __len__(self):\n return len(self._data)\n\n def __repr__(self):\n try:\n u = self.__str__()\n except (UnicodeEncodeError, UnicodeDecodeError):\n u = '[Bad Unicode data]'\n repr_type = str if u is None else type(u)\n return repr_type('<%s: %s>' % (self.__class__.__name__, u))\n\n def __str__(self):\n # TODO this could be simpler?\n if hasattr(self, '__unicode__'):\n if six.PY3:\n return self.__unicode__()\n else:\n return six.text_type(self).encode('utf-8')\n return six.text_type('%s object' % self.__class__.__name__)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__) and hasattr(other, 'id') and other.id is not None:\n return self.id == other.id\n if isinstance(other, DBRef):\n return self._get_collection_name() == other.collection and self.id == other.id\n if self.id is None:\n return self is other\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def clean(self):\n \"\"\"\n Hook for doing document level data cleaning before validation is run.\n\n Any ValidationError raised by this method will not be associated with\n a particular field; it will have a special-case association with the\n field defined by NON_FIELD_ERRORS.\n \"\"\"\n pass\n\n def get_text_score(self):\n \"\"\"\n Get text score from text query\n \"\"\"\n\n if '_text_score' not in self._data:\n raise InvalidDocumentError('This document is not originally built from a text query')\n\n return self._data['_text_score']\n\n def to_mongo(self, use_db_field=True, fields=None):\n \"\"\"\n Return as SON data ready for use with MongoDB.\n \"\"\"\n if not fields:\n fields = []\n\n data = SON()\n data['_id'] = None\n data['_cls'] = self._class_name\n\n # only root fields ['test1.a', 'test2'] => ['test1', 'test2']\n root_fields = {f.split('.')[0] for f in fields}\n\n for field_name in self:\n if root_fields and field_name not in root_fields:\n continue\n\n value = self._data.get(field_name, None)\n field = self._fields.get(field_name)\n\n if field is None and self._dynamic:\n field = self._dynamic_fields.get(field_name)\n\n if value is not None:\n f_inputs = field.to_mongo.__code__.co_varnames\n ex_vars = {}\n if fields and 'fields' in f_inputs:\n key = '%s.' % field_name\n embedded_fields = [\n i.replace(key, '') for i in fields\n if i.startswith(key)]\n\n ex_vars['fields'] = embedded_fields\n\n if 'use_db_field' in f_inputs:\n ex_vars['use_db_field'] = use_db_field\n\n value = field.to_mongo(value, **ex_vars)\n\n # Handle self generating fields\n if value is None and field._auto_gen:\n value = field.generate()\n self._data[field_name] = value\n\n if (value is not None) or (field.null):\n if use_db_field:\n data[field.db_field] = value\n else:\n data[field.name] = value\n\n # Only add _cls if allow_inheritance is True\n if not self._meta.get('allow_inheritance'):\n data.pop('_cls')\n\n return data\n\n def validate(self, clean=True):\n \"\"\"Ensure that all fields' values are valid and that required fields\n are present.\n \"\"\"\n # Ensure that each field is matched to a valid value\n errors = {}\n if clean:\n try:\n self.clean()\n except ValidationError as error:\n errors[NON_FIELD_ERRORS] = error\n\n # Get a list of tuples of field names and their current values\n fields = [(self._fields.get(name, self._dynamic_fields.get(name)),\n self._data.get(name)) for name in self._fields_ordered]\n\n EmbeddedDocumentField = _import_class('EmbeddedDocumentField')\n GenericEmbeddedDocumentField = _import_class(\n 'GenericEmbeddedDocumentField')\n\n for field, value in fields:\n if value is not None:\n try:\n if isinstance(field, (EmbeddedDocumentField,\n GenericEmbeddedDocumentField)):\n field._validate(value, clean=clean)\n else:\n field._validate(value)\n except ValidationError as error:\n errors[field.name] = error.errors or error\n except (ValueError, AttributeError, AssertionError) as error:\n errors[field.name] = error\n elif field.required and not getattr(field, '_auto_gen', False):\n errors[field.name] = ValidationError('Field is required',\n field_name=field.name)\n\n if errors:\n pk = 'None'\n if hasattr(self, 'pk'):\n pk = self.pk\n elif self._instance and hasattr(self._instance, 'pk'):\n pk = self._instance.pk\n message = 'ValidationError (%s:%s) ' % (self._class_name, pk)\n raise ValidationError(message, errors=errors)\n\n def to_json(self, *args, **kwargs):\n \"\"\"Convert this document to JSON.\n\n :param use_db_field: Serialize field names as they appear in\n MongoDB (as opposed to attribute names on this document).\n Defaults to True.\n \"\"\"\n use_db_field = kwargs.pop('use_db_field', True)\n return json_util.dumps(self.to_mongo(use_db_field), *args, **kwargs)\n\n @classmethod\n def from_json(cls, json_data, created=False):\n \"\"\"Converts json data to a Document instance\n\n :param json_data: The json data to load into the Document\n :param created: If True, the document will be considered as a brand new document\n If False and an id is provided, it will consider that the data being\n loaded corresponds to what's already in the database (This has an impact of subsequent call to .save())\n If False and no id is provided, it will consider the data as a new document\n (default ``False``)\n \"\"\"\n return cls._from_son(json_util.loads(json_data), created=created)\n\n def __expand_dynamic_values(self, name, value):\n \"\"\"Expand any dynamic values to their correct types / values.\"\"\"\n if not isinstance(value, (dict, list, tuple)):\n return value\n\n # If the value is a dict with '_cls' in it, turn it into a document\n is_dict = isinstance(value, dict)\n if is_dict and '_cls' in value:\n cls = get_document(value['_cls'])\n return cls(**value)\n\n if is_dict:\n value = {\n k: self.__expand_dynamic_values(k, v)\n for k, v in value.items()\n }\n else:\n value = [self.__expand_dynamic_values(name, v) for v in value]\n\n # Convert lists / values so we can watch for any changes on them\n EmbeddedDocumentListField = _import_class('EmbeddedDocumentListField')\n if (isinstance(value, (list, tuple)) and\n not isinstance(value, BaseList)):\n if issubclass(type(self), EmbeddedDocumentListField):\n value = EmbeddedDocumentList(value, self, name)\n else:\n value = BaseList(value, self, name)\n elif isinstance(value, dict) and not isinstance(value, BaseDict):\n value = BaseDict(value, self, name)\n\n return value\n\n def _mark_as_changed(self, key):\n \"\"\"Mark a key as explicitly changed by the user.\"\"\"\n if not key:\n return\n\n if not hasattr(self, '_changed_fields'):\n return\n\n if '.' in key:\n key, rest = key.split('.', 1)\n key = self._db_field_map.get(key, key)\n key = '%s.%s' % (key, rest)\n else:\n key = self._db_field_map.get(key, key)\n\n if key not in self._changed_fields:\n levels, idx = key.split('.'), 1\n while idx <= len(levels):\n if '.'.join(levels[:idx]) in self._changed_fields:\n break\n idx += 1\n else:\n self._changed_fields.append(key)\n # remove lower level changed fields\n level = '.'.join(levels[:idx]) + '.'\n remove = self._changed_fields.remove\n for field in self._changed_fields[:]:\n if field.startswith(level):\n remove(field)\n\n def _clear_changed_fields(self):\n \"\"\"Using _get_changed_fields iterate and remove any fields that\n are marked as changed.\n \"\"\"\n for changed in self._get_changed_fields():\n parts = changed.split('.')\n data = self\n for part in parts:\n if isinstance(data, list):\n try:\n data = data[int(part)]\n except IndexError:\n data = None\n elif isinstance(data, dict):\n data = data.get(part, None)\n else:\n data = getattr(data, part, None)\n\n if not isinstance(data, LazyReference) and hasattr(data, '_changed_fields'):\n if getattr(data, '_is_document', False):\n continue\n\n data._changed_fields = []\n\n self._changed_fields = []\n\n def _nestable_types_changed_fields(self, changed_fields, base_key, data):\n \"\"\"Inspect nested data for changed fields\n\n :param changed_fields: Previously collected changed fields\n :param base_key: The base key that must be used to prepend changes to this data\n :param data: data to inspect for changes\n \"\"\"\n # Loop list / dict fields as they contain documents\n # Determine the iterator to use\n if not hasattr(data, 'items'):\n iterator = enumerate(data)\n else:\n iterator = data.iteritems()\n\n for index_or_key, value in iterator:\n item_key = '%s%s.' % (base_key, index_or_key)\n # don't check anything lower if this key is already marked\n # as changed.\n if item_key[:-1] in changed_fields:\n continue\n\n if hasattr(value, '_get_changed_fields'):\n changed = value._get_changed_fields()\n changed_fields += ['%s%s' % (item_key, k) for k in changed if k]\n elif isinstance(value, (list, tuple, dict)):\n self._nestable_types_changed_fields(\n changed_fields, item_key, value)\n\n def _get_changed_fields(self):\n \"\"\"Return a list of all fields that have explicitly been changed.\n \"\"\"\n EmbeddedDocument = _import_class('EmbeddedDocument')\n ReferenceField = _import_class('ReferenceField')\n GenericReferenceField = _import_class('GenericReferenceField')\n SortedListField = _import_class('SortedListField')\n\n changed_fields = []\n changed_fields += getattr(self, '_changed_fields', [])\n\n for field_name in self._fields_ordered:\n db_field_name = self._db_field_map.get(field_name, field_name)\n key = '%s.' % db_field_name\n data = self._data.get(field_name, None)\n field = self._fields.get(field_name)\n\n if db_field_name in changed_fields:\n # Whole field already marked as changed, no need to go further\n continue\n\n if isinstance(field, ReferenceField): # Don't follow referenced documents\n continue\n\n if isinstance(data, EmbeddedDocument):\n # Find all embedded fields that have been changed\n changed = data._get_changed_fields()\n changed_fields += ['%s%s' % (key, k) for k in changed if k]\n elif isinstance(data, (list, tuple, dict)):\n if (hasattr(field, 'field') and\n isinstance(field.field, (ReferenceField, GenericReferenceField))):\n continue\n elif isinstance(field, SortedListField) and field._ordering:\n # if ordering is affected whole list is changed\n if any(field._ordering in d._changed_fields for d in data):\n changed_fields.append(db_field_name)\n continue\n\n self._nestable_types_changed_fields(\n changed_fields, key, data)\n return changed_fields\n\n def _delta(self):\n \"\"\"Returns the delta (set, unset) of the changes for a document.\n Gets any values that have been explicitly changed.\n \"\"\"\n # Handles cases where not loaded from_son but has _id\n doc = self.to_mongo()\n\n set_fields = self._get_changed_fields()\n unset_data = {}\n if hasattr(self, '_changed_fields'):\n set_data = {}\n # Fetch each set item from its path\n for path in set_fields:\n parts = path.split('.')\n d = doc\n new_path = []\n for p in parts:\n if isinstance(d, (ObjectId, DBRef)):\n # Don't dig in the references\n break\n elif isinstance(d, list) and p.isdigit():\n # An item of a list (identified by its index) is updated\n d = d[int(p)]\n elif hasattr(d, 'get'):\n # dict-like (dict, embedded document)\n d = d.get(p)\n new_path.append(p)\n path = '.'.join(new_path)\n set_data[path] = d\n else:\n set_data = doc\n if '_id' in set_data:\n del set_data['_id']\n\n # Determine if any changed items were actually unset.\n for path, value in set_data.items():\n if value or isinstance(value, (numbers.Number, bool)): # Account for 0 and True that are truthy\n continue\n\n parts = path.split('.')\n\n if (self._dynamic and len(parts) and parts[0] in\n self._dynamic_fields):\n del set_data[path]\n unset_data[path] = 1\n continue\n\n # If we've set a value that ain't the default value don't unset it.\n default = None\n if path in self._fields:\n default = self._fields[path].default\n else: # Perform a full lookup for lists / embedded lookups\n d = self\n db_field_name = parts.pop()\n for p in parts:\n if isinstance(d, list) and p.isdigit():\n d = d[int(p)]\n elif (hasattr(d, '__getattribute__') and\n not isinstance(d, dict)):\n real_path = d._reverse_db_field_map.get(p, p)\n d = getattr(d, real_path)\n else:\n d = d.get(p)\n\n if hasattr(d, '_fields'):\n field_name = d._reverse_db_field_map.get(db_field_name,\n db_field_name)\n if field_name in d._fields:\n default = d._fields.get(field_name).default\n else:\n default = None\n\n if default is not None:\n default = default() if callable(default) else default\n\n if value != default:\n continue\n\n del set_data[path]\n unset_data[path] = 1\n return set_data, unset_data\n\n @classmethod\n def _get_collection_name(cls):\n \"\"\"Return the collection name for this class. None for abstract\n class.\n \"\"\"\n return cls._meta.get('collection', None)\n\n @classmethod\n def _from_son(cls, son, _auto_dereference=True, only_fields=None, created=False):\n \"\"\"Create an instance of a Document (subclass) from a PyMongo\n SON.\n \"\"\"\n if not only_fields:\n only_fields = []\n\n if son and not isinstance(son, dict):\n raise ValueError(\"The source SON object needs to be of type 'dict'\")\n\n # Get the class name from the document, falling back to the given\n # class if unavailable\n class_name = son.get('_cls', cls._class_name)\n\n # Convert SON to a data dict, making sure each key is a string and\n # corresponds to the right db field.\n data = {}\n for key, value in son.iteritems():\n key = str(key)\n key = cls._db_field_map.get(key, key)\n data[key] = value\n\n # Return correct subclass for document type\n if class_name != cls._class_name:\n cls = get_document(class_name)\n\n changed_fields = []\n errors_dict = {}\n\n fields = cls._fields\n if not _auto_dereference:\n fields = copy.deepcopy(fields)\n\n for field_name, field in fields.iteritems():\n field._auto_dereference = _auto_dereference\n if field.db_field in data:\n value = data[field.db_field]\n try:\n data[field_name] = (value if value is None\n else field.to_python(value))\n if field_name != field.db_field:\n del data[field.db_field]\n except (AttributeError, ValueError) as e:\n errors_dict[field_name] = e\n\n if errors_dict:\n errors = '\\n'.join(['%s - %s' % (k, v)\n for k, v in errors_dict.items()])\n msg = ('Invalid data to create a `%s` instance.\\n%s'\n % (cls._class_name, errors))\n raise InvalidDocumentError(msg)\n\n # In STRICT documents, remove any keys that aren't in cls._fields\n if cls.STRICT:\n data = {k: v for k, v in data.iteritems() if k in cls._fields}\n\n obj = cls(__auto_convert=False, _created=created, __only_fields=only_fields, **data)\n obj._changed_fields = changed_fields\n if not _auto_dereference:\n obj._fields = fields\n\n return obj\n\n @classmethod\n def _build_index_specs(cls, meta_indexes):\n \"\"\"Generate and merge the full index specs.\"\"\"\n geo_indices = cls._geo_indices()\n unique_indices = cls._unique_with_indexes()\n index_specs = [cls._build_index_spec(spec) for spec in meta_indexes]\n\n def merge_index_specs(index_specs, indices):\n \"\"\"Helper method for merging index specs.\"\"\"\n if not indices:\n return index_specs\n\n # Create a map of index fields to index spec. We're converting\n # the fields from a list to a tuple so that it's hashable.\n spec_fields = {\n tuple(index['fields']): index for index in index_specs\n }\n\n # For each new index, if there's an existing index with the same\n # fields list, update the existing spec with all data from the\n # new spec.\n for new_index in indices:\n candidate = spec_fields.get(tuple(new_index['fields']))\n if candidate is None:\n index_specs.append(new_index)\n else:\n candidate.update(new_index)\n\n return index_specs\n\n # Merge geo indexes and unique_with indexes into the meta index specs.\n index_specs = merge_index_specs(index_specs, geo_indices)\n index_specs = merge_index_specs(index_specs, unique_indices)\n return index_specs\n\n @classmethod\n def _build_index_spec(cls, spec):\n \"\"\"Build a PyMongo index spec from a MongoEngine index spec.\"\"\"\n if isinstance(spec, six.string_types):\n spec = {'fields': [spec]}\n elif isinstance(spec, (list, tuple)):\n spec = {'fields': list(spec)}\n elif isinstance(spec, dict):\n spec = dict(spec)\n\n index_list = []\n direction = None\n\n # Check to see if we need to include _cls\n allow_inheritance = cls._meta.get('allow_inheritance')\n include_cls = (\n allow_inheritance and\n not spec.get('sparse', False) and\n spec.get('cls', True) and\n '_cls' not in spec['fields']\n )\n\n # 733: don't include cls if index_cls is False unless there is an explicit cls with the index\n include_cls = include_cls and (spec.get('cls', False) or cls._meta.get('index_cls', True))\n if 'cls' in spec:\n spec.pop('cls')\n for key in spec['fields']:\n # If inherited spec continue\n if isinstance(key, (list, tuple)):\n continue\n\n # ASCENDING from +\n # DESCENDING from -\n # TEXT from $\n # HASHED from #\n # GEOSPHERE from (\n # GEOHAYSTACK from )\n # GEO2D from *\n direction = pymongo.ASCENDING\n if key.startswith('-'):\n direction = pymongo.DESCENDING\n elif key.startswith('$'):\n direction = pymongo.TEXT\n elif key.startswith('#'):\n direction = pymongo.HASHED\n elif key.startswith('('):\n direction = pymongo.GEOSPHERE\n elif key.startswith(')'):\n direction = pymongo.GEOHAYSTACK\n elif key.startswith('*'):\n direction = pymongo.GEO2D\n if key.startswith(('+', '-', '*', '$', '#', '(', ')')):\n key = key[1:]\n\n # Use real field name, do it manually because we need field\n # objects for the next part (list field checking)\n parts = key.split('.')\n if parts in (['pk'], ['id'], ['_id']):\n key = '_id'\n else:\n fields = cls._lookup_field(parts)\n parts = []\n for field in fields:\n try:\n if field != '_id':\n field = field.db_field\n except AttributeError:\n pass\n parts.append(field)\n key = '.'.join(parts)\n index_list.append((key, direction))\n\n # Don't add cls to a geo index\n if include_cls and direction not in (\n pymongo.GEO2D, pymongo.GEOHAYSTACK, pymongo.GEOSPHERE):\n index_list.insert(0, ('_cls', 1))\n\n if index_list:\n spec['fields'] = index_list\n\n return spec\n\n @classmethod\n def _unique_with_indexes(cls, namespace=''):\n \"\"\"Find unique indexes in the document schema and return them.\"\"\"\n unique_indexes = []\n for field_name, field in cls._fields.items():\n sparse = field.sparse\n\n # Generate a list of indexes needed by uniqueness constraints\n if field.unique:\n unique_fields = [field.db_field]\n\n # Add any unique_with fields to the back of the index spec\n if field.unique_with:\n if isinstance(field.unique_with, six.string_types):\n field.unique_with = [field.unique_with]\n\n # Convert unique_with field names to real field names\n unique_with = []\n for other_name in field.unique_with:\n parts = other_name.split('.')\n\n # Lookup real name\n parts = cls._lookup_field(parts)\n name_parts = [part.db_field for part in parts]\n unique_with.append('.'.join(name_parts))\n\n # Unique field should be required\n parts[-1].required = True\n sparse = (not sparse and\n parts[-1].name not in cls.__dict__)\n\n unique_fields += unique_with\n\n # Add the new index to the list\n fields = [\n ('%s%s' % (namespace, f), pymongo.ASCENDING)\n for f in unique_fields\n ]\n index = {'fields': fields, 'unique': True, 'sparse': sparse}\n unique_indexes.append(index)\n\n if field.__class__.__name__ == 'ListField':\n field = field.field\n\n # Grab any embedded document field unique indexes\n if (field.__class__.__name__ == 'EmbeddedDocumentField' and\n field.document_type != cls):\n field_namespace = '%s.' % field_name\n doc_cls = field.document_type\n unique_indexes += doc_cls._unique_with_indexes(field_namespace)\n\n return unique_indexes\n\n @classmethod\n def _geo_indices(cls, inspected=None, parent_field=None):\n inspected = inspected or []\n geo_indices = []\n inspected.append(cls)\n\n geo_field_type_names = ('EmbeddedDocumentField', 'GeoPointField',\n 'PointField', 'LineStringField',\n 'PolygonField')\n\n geo_field_types = tuple([_import_class(field)\n for field in geo_field_type_names])\n\n for field in cls._fields.values():\n if not isinstance(field, geo_field_types):\n continue\n\n if hasattr(field, 'document_type'):\n field_cls = field.document_type\n if field_cls in inspected:\n continue\n\n if hasattr(field_cls, '_geo_indices'):\n geo_indices += field_cls._geo_indices(\n inspected, parent_field=field.db_field)\n elif field._geo_index:\n field_name = field.db_field\n if parent_field:\n field_name = '%s.%s' % (parent_field, field_name)\n geo_indices.append({\n 'fields': [(field_name, field._geo_index)]\n })\n\n return geo_indices\n\n @classmethod\n def _lookup_field(cls, parts):\n \"\"\"Given the path to a given field, return a list containing\n the Field object associated with that field and all of its parent\n Field objects.\n\n Args:\n parts (str, list, or tuple) - path to the field. Should be a\n string for simple fields existing on this document or a list\n of strings for a field that exists deeper in embedded documents.\n\n Returns:\n A list of Field instances for fields that were found or\n strings for sub-fields that weren't.\n\n Example:\n >>> user._lookup_field('name')\n [<mongoengine.fields.StringField at 0x1119bff50>]\n\n >>> user._lookup_field('roles')\n [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>]\n\n >>> user._lookup_field(['roles', 'role'])\n [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,\n <mongoengine.fields.StringField at 0x1119ec050>]\n\n >>> user._lookup_field('doesnt_exist')\n raises LookUpError\n\n >>> user._lookup_field(['roles', 'doesnt_exist'])\n [<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,\n 'doesnt_exist']\n\n \"\"\"\n # TODO this method is WAY too complicated. Simplify it.\n # TODO don't think returning a string for embedded non-existent fields is desired\n\n ListField = _import_class('ListField')\n DynamicField = _import_class('DynamicField')\n\n if not isinstance(parts, (list, tuple)):\n parts = [parts]\n\n fields = []\n field = None\n\n for field_name in parts:\n # Handle ListField indexing:\n if field_name.isdigit() and isinstance(field, ListField):\n fields.append(field_name)\n continue\n\n # Look up first field from the document\n if field is None:\n if field_name == 'pk':\n # Deal with \"primary key\" alias\n field_name = cls._meta['id_field']\n\n if field_name in cls._fields:\n field = cls._fields[field_name]\n elif cls._dynamic:\n field = DynamicField(db_field=field_name)\n elif cls._meta.get('allow_inheritance') or cls._meta.get('abstract', False):\n # 744: in case the field is defined in a subclass\n for subcls in cls.__subclasses__():\n try:\n field = subcls._lookup_field([field_name])[0]\n except LookUpError:\n continue\n\n if field is not None:\n break\n else:\n raise LookUpError('Cannot resolve field \"%s\"' % field_name)\n else:\n raise LookUpError('Cannot resolve field \"%s\"' % field_name)\n else:\n ReferenceField = _import_class('ReferenceField')\n GenericReferenceField = _import_class('GenericReferenceField')\n\n # If previous field was a reference, throw an error (we\n # cannot look up fields that are on references).\n if isinstance(field, (ReferenceField, GenericReferenceField)):\n raise LookUpError('Cannot perform join in mongoDB: %s' %\n '__'.join(parts))\n\n # If the parent field has a \"field\" attribute which has a\n # lookup_member method, call it to find the field\n # corresponding to this iteration.\n if hasattr(getattr(field, 'field', None), 'lookup_member'):\n new_field = field.field.lookup_member(field_name)\n\n # If the parent field is a DynamicField or if it's part of\n # a DynamicDocument, mark current field as a DynamicField\n # with db_name equal to the field name.\n elif cls._dynamic and (isinstance(field, DynamicField) or\n getattr(getattr(field, 'document_type', None), '_dynamic', None)):\n new_field = DynamicField(db_field=field_name)\n\n # Else, try to use the parent field's lookup_member method\n # to find the subfield.\n elif hasattr(field, 'lookup_member'):\n new_field = field.lookup_member(field_name)\n\n # Raise a LookUpError if all the other conditions failed.\n else:\n raise LookUpError(\n 'Cannot resolve subfield or operator {} '\n 'on the field {}'.format(field_name, field.name)\n )\n\n # If current field still wasn't found and the parent field\n # is a ComplexBaseField, add the name current field name and\n # move on.\n if not new_field and isinstance(field, ComplexBaseField):\n fields.append(field_name)\n continue\n elif not new_field:\n raise LookUpError('Cannot resolve field \"%s\"' % field_name)\n\n field = new_field # update field to the new field type\n\n fields.append(field)\n\n return fields\n\n @classmethod\n def _translate_field_name(cls, field, sep='.'):\n \"\"\"Translate a field attribute name to a database field name.\n \"\"\"\n parts = field.split(sep)\n parts = [f.db_field for f in cls._lookup_field(parts)]\n return '.'.join(parts)\n\n def __set_field_display(self):\n \"\"\"For each field that specifies choices, create a\n get_<field>_display method.\n \"\"\"\n fields_with_choices = [(n, f) for n, f in self._fields.items()\n if f.choices]\n for attr_name, field in fields_with_choices:\n setattr(self,\n 'get_%s_display' % attr_name,\n partial(self.__get_field_display, field=field))\n\n def __get_field_display(self, field):\n \"\"\"Return the display value for a choice field\"\"\"\n value = getattr(self, field.name)\n if field.choices and isinstance(field.choices[0], (list, tuple)):\n if value is None:\n return None\n sep = getattr(field, 'display_sep', ' ')\n values = value if field.__class__.__name__ in ('ListField', 'SortedListField') else [value]\n return sep.join([\n six.text_type(dict(field.choices).get(val, val))\n for val in values or []])\n return value\n",
"path": "mongoengine/base/document.py"
}
] | diff --git a/docs/changelog.rst b/docs/changelog.rst
index 7fdf5e9c4..33578f018 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -5,6 +5,7 @@ Changelog
Development
===========
- (Fill this out as you fix issues and develop your features).
+- Fix `_cls` that is not set properly in Document constructor (regression) #1950
- Fix bug in _delta method - Update of a ListField depends on an unrelated dynamic field update #1733
- Remove deprecated `save()` method and used `insert_one()` #1899
diff --git a/mongoengine/base/document.py b/mongoengine/base/document.py
index c2f839322..e44ec2c9d 100644
--- a/mongoengine/base/document.py
+++ b/mongoengine/base/document.py
@@ -91,6 +91,9 @@ def __init__(self, *args, **values):
value = getattr(self, key, None)
setattr(self, key, value)
+ if '_cls' not in values:
+ self._cls = self._class_name
+
# Set passed values after initialisation
if self._dynamic:
dynamic_data = {}
diff --git a/tests/document/inheritance.py b/tests/document/inheritance.py
index b4ba60589..32e3ed297 100644
--- a/tests/document/inheritance.py
+++ b/tests/document/inheritance.py
@@ -2,11 +2,11 @@
import unittest
import warnings
-from tests.fixtures import Base
-
-from mongoengine import Document, EmbeddedDocument, connect, ReferenceField,\
- BooleanField, GenericReferenceField, IntField, StringField
+from mongoengine import (BooleanField, Document, EmbeddedDocument,
+ EmbeddedDocumentField, GenericReferenceField,
+ IntField, ReferenceField, StringField, connect)
from mongoengine.connection import get_db
+from tests.fixtures import Base
__all__ = ('InheritanceTest', )
@@ -23,6 +23,27 @@ def tearDown(self):
continue
self.db.drop_collection(collection)
+ def test_constructor_cls(self):
+ # Ensures _cls is properly set during construction
+ # and when object gets reloaded (prevent regression of #1950)
+ class EmbedData(EmbeddedDocument):
+ data = StringField()
+ meta = {'allow_inheritance': True}
+
+ class DataDoc(Document):
+ name = StringField()
+ embed = EmbeddedDocumentField(EmbedData)
+ meta = {'allow_inheritance': True}
+
+ test_doc = DataDoc(name='test', embed=EmbedData(data='data'))
+ assert test_doc._cls == 'DataDoc'
+ assert test_doc.embed._cls == 'EmbedData'
+ test_doc.save()
+ saved_doc = DataDoc.objects.with_id(test_doc.id)
+ assert test_doc._cls == saved_doc._cls
+ assert test_doc.embed._cls == saved_doc.embed._cls
+ test_doc.delete()
+
def test_superclasses(self):
"""Ensure that the correct list of superclasses is assembled.
"""
|
kserve__kserve-2018 | KServe 0.8 release tracking
/kind feature
**Describe the solution you'd like**
KServe 0.8 release tracking:
RC release Date: 12/30/2021
Release Date: 1/14/2021
KServe Model Serving:
- [x] torchserve v2 protocol
- https://github.com/kserve/kserve/pull/1870 @jagadeeshi2i
- [X] Transformer -> Predictor gRPC support
- https://github.com/kserve/kserve/pull/1933
- [X] MLServer 0.5 update
- https://github.com/kserve/kserve/pull/1853 @adriangonz
- [X] Scikit-Learn 1.0.1 and XGBoost 1.5.0 upgrade
- https://github.com/kserve/kserve/pull/1954 @yuzisun
- [X] Introduce ServingRuntime to single model serving @pvaneck @Suresh-Nakkeran
- https://github.com/kserve/kserve/pull/1901
- https://github.com/kserve/kserve/pull/1926
- [ ] Introduce new storage spec @Tomcli
- https://github.com/kserve/kserve/pull/1899
- [X] Storage initializer fixes
- https://github.com/kserve/kserve/pull/1883
- https://github.com/kserve/kserve/pull/1940
- [X] Helm chart for KServe and ModelMesh @yuzisun
- https://github.com/kserve/kserve/pull/1878
- [X] KServe SDK features and fixes
- https://github.com/kserve/kserve/pull/1949 @markwinter
- https://github.com/kserve/kserve/pull/1934 @markwinter
- https://github.com/kserve/kserve/pull/1918 @markwinter
ModelMesh:
- [X] Multi-namespace support for ModelMesh
- [X] Improve rest proxy support
- https://github.com/kserve/rest-proxy/pull/6
Models UI:
- [ ] Models Web App KServe migration @kimwnasptd
Website:
- [ ] Website doc update
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
| [
{
"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.8.0rc0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n",
"path": "python/kserve/setup.py"
}
] | [
{
"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.8.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n",
"path": "python/kserve/setup.py"
}
] | diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 0a650e99a40..8629a2eabe9 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,5 +1,5 @@
<!-- Thanks for sending a pull request! Here are some tips for you:
-1. If this is your first time, read our contributor guidelines https://www.kubeflow.org/docs/about/contributing/ and developer guide https://github.com/kubeflow/kfserving/blob/master/docs/DEVELOPER_GUIDE.md
+1. If this is your first time, read our contributor guidelines https://www.kubeflow.org/docs/about/contributing/ and developer guide https://github.com/kserve/kserve/blob/master/docs/DEVELOPER_GUIDE.md
2. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
3. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
4. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
@@ -10,10 +10,34 @@
**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #
+**Type of changes**
+Please delete options that are not relevant.
+
+- [ ] Bug fix (non-breaking change which fixes an issue)
+- [ ] New feature (non-breaking change which adds functionality)
+- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
+- [ ] This change requires a documentation update
+
+**Feature/Issue validation/testing**:
+
+Please describe the tests that you ran to verify your changes and relevent result summary. Provide instructions so it can be reproduced.
+Please also list any relevant details for your test configuration.
+
+- [ ] Test A
+- [ ] Test B
+
+- Logs
+
**Special notes for your reviewer**:
1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.
+**Checklist**:
+
+- [ ] Have you added unit/e2e tests that prove your fix is effective or that this feature works?
+- [ ] Has code been commented, particularly in hard-to-understand areas?
+- [ ] Have you made corresponding changes to the documentation?
+
**Release note**:
<!-- Write your release note:
1. Enter your extended release note in the below block. If the PR requires additional action from users switching to the new release, include the string "action required".
diff --git a/config/runtimes/kustomization.yaml b/config/runtimes/kustomization.yaml
index bd9368fe13e..b46c9781774 100644
--- a/config/runtimes/kustomization.yaml
+++ b/config/runtimes/kustomization.yaml
@@ -45,4 +45,4 @@ images:
- name: kserve-torchserve
newName: kserve/torchserve-kfs
- newTag: 0.5.2
+ newTag: 0.5.3
diff --git a/hack/generate-install.sh b/hack/generate-install.sh
index dc3690230c9..247a0374879 100755
--- a/hack/generate-install.sh
+++ b/hack/generate-install.sh
@@ -37,6 +37,7 @@ RELEASES=(
"v0.7.0-rc0"
"v0.7.0"
"v0.8.0-rc0"
+ "v0.8.0"
)
TAG=$1
diff --git a/install/v0.8.0/kserve.yaml b/install/v0.8.0/kserve.yaml
new file mode 100644
index 00000000000..325e32f4b63
--- /dev/null
+++ b/install/v0.8.0/kserve.yaml
@@ -0,0 +1,15487 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ istio-injection: disabled
+ name: kserve
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ creationTimestamp: null
+ name: clusterservingruntimes.serving.kserve.io
+spec:
+ group: serving.kserve.io
+ names:
+ kind: ClusterServingRuntime
+ listKind: ClusterServingRuntimeList
+ plural: clusterservingruntimes
+ singular: clusterservingruntime
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.disabled
+ name: Disabled
+ type: boolean
+ - jsonPath: .spec.supportedModelFormats[*].name
+ name: ModelType
+ type: string
+ - jsonPath: .spec.containers[*].name
+ name: Containers
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ builtInAdapter:
+ properties:
+ memBufferBytes:
+ type: integer
+ modelLoadingTimeoutMillis:
+ type: integer
+ runtimeManagementPort:
+ type: integer
+ serverType:
+ enum:
+ - triton
+ - mlserver
+ type: string
+ type: object
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ workingDir:
+ type: string
+ type: object
+ type: array
+ disabled:
+ type: boolean
+ grpcDataEndpoint:
+ type: string
+ grpcEndpoint:
+ type: string
+ httpDataEndpoint:
+ type: string
+ multiModel:
+ type: boolean
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ replicas:
+ type: integer
+ storageHelper:
+ properties:
+ disabled:
+ type: boolean
+ type: object
+ supportedModelFormats:
+ items:
+ properties:
+ autoSelect:
+ type: boolean
+ name:
+ type: string
+ version:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ required:
+ - containers
+ type: object
+ status:
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kserve/serving-cert
+ controller-gen.kubebuilder.io/version: v0.4.0
+ name: inferenceservices.serving.kserve.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kserve
+ path: /convert
+ conversionReviewVersions:
+ - v1beta1
+ group: serving.kserve.io
+ names:
+ kind: InferenceService
+ listKind: InferenceServiceList
+ plural: inferenceservices
+ shortNames:
+ - isvc
+ singular: inferenceservice
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.url
+ name: URL
+ type: string
+ - jsonPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - jsonPath: .status.components.predictor.traffic[?(@.tag=='prev')].percent
+ name: Prev
+ type: integer
+ - jsonPath: .status.components.predictor.traffic[?(@.latestRevision==true)].percent
+ name: Latest
+ type: integer
+ - jsonPath: .status.components.predictor.traffic[?(@.tag=='prev')].revisionName
+ name: PrevRolledoutRevision
+ type: string
+ - jsonPath: .status.components.predictor.traffic[?(@.latestRevision==true)].revisionName
+ name: LatestReadyRevision
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ explainer:
+ properties:
+ activeDeadlineSeconds:
+ format: int64
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ aix:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ config:
+ additionalProperties:
+ type: string
+ type: object
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ type:
+ type: string
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ alibi:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ config:
+ additionalProperties:
+ type: string
+ type: object
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ type:
+ type: string
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ art:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ config:
+ additionalProperties:
+ type: string
+ type: object
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ type:
+ type: string
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ batcher:
+ properties:
+ maxBatchSize:
+ type: integer
+ maxLatency:
+ type: integer
+ timeout:
+ type: integer
+ type: object
+ canaryTrafficPercent:
+ format: int64
+ type: integer
+ containerConcurrency:
+ format: int64
+ type: integer
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ logger:
+ properties:
+ mode:
+ enum:
+ - all
+ - request
+ - response
+ type: string
+ url:
+ type: string
+ type: object
+ maxReplicas:
+ type: integer
+ minReplicas:
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ format: int32
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ securityContext:
+ properties:
+ fsGroup:
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeout:
+ format: int64
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ maxSkew:
+ format: int32
+ type: integer
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ predictor:
+ properties:
+ activeDeadlineSeconds:
+ format: int64
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ batcher:
+ properties:
+ maxBatchSize:
+ type: integer
+ maxLatency:
+ type: integer
+ timeout:
+ type: integer
+ type: object
+ canaryTrafficPercent:
+ format: int64
+ type: integer
+ containerConcurrency:
+ format: int64
+ type: integer
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ lightgbm:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ logger:
+ properties:
+ mode:
+ enum:
+ - all
+ - request
+ - response
+ type: string
+ url:
+ type: string
+ type: object
+ maxReplicas:
+ type: integer
+ minReplicas:
+ type: integer
+ model:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ modelFormat:
+ properties:
+ name:
+ type: string
+ version:
+ type: string
+ required:
+ - name
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtime:
+ type: string
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ onnx:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ paddle:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ pmml:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ format: int32
+ type: integer
+ priorityClassName:
+ type: string
+ pytorch:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ securityContext:
+ properties:
+ fsGroup:
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ sklearn:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ subdomain:
+ type: string
+ tensorflow:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeout:
+ format: int64
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ maxSkew:
+ format: int32
+ type: integer
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ triton:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ xgboost:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: object
+ transformer:
+ properties:
+ activeDeadlineSeconds:
+ format: int64
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ batcher:
+ properties:
+ maxBatchSize:
+ type: integer
+ maxLatency:
+ type: integer
+ timeout:
+ type: integer
+ type: object
+ canaryTrafficPercent:
+ format: int64
+ type: integer
+ containerConcurrency:
+ format: int64
+ type: integer
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ logger:
+ properties:
+ mode:
+ enum:
+ - all
+ - request
+ - response
+ type: string
+ url:
+ type: string
+ type: object
+ maxReplicas:
+ type: integer
+ minReplicas:
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ format: int32
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ securityContext:
+ properties:
+ fsGroup:
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeout:
+ format: int64
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ maxSkew:
+ format: int32
+ type: integer
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ required:
+ - predictor
+ type: object
+ status:
+ properties:
+ address:
+ properties:
+ url:
+ type: string
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ components:
+ additionalProperties:
+ properties:
+ address:
+ properties:
+ url:
+ type: string
+ type: object
+ latestCreatedRevision:
+ type: string
+ latestReadyRevision:
+ type: string
+ latestRolledoutRevision:
+ type: string
+ previousRolledoutRevision:
+ type: string
+ traffic:
+ items:
+ properties:
+ configurationName:
+ type: string
+ latestRevision:
+ type: boolean
+ percent:
+ format: int64
+ type: integer
+ revisionName:
+ type: string
+ tag:
+ type: string
+ url:
+ type: string
+ type: object
+ type: array
+ url:
+ type: string
+ type: object
+ type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedGeneration:
+ format: int64
+ type: integer
+ url:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ creationTimestamp: null
+ name: servingruntimes.serving.kserve.io
+spec:
+ group: serving.kserve.io
+ names:
+ kind: ServingRuntime
+ listKind: ServingRuntimeList
+ plural: servingruntimes
+ singular: servingruntime
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.disabled
+ name: Disabled
+ type: boolean
+ - jsonPath: .spec.supportedModelFormats[*].name
+ name: ModelType
+ type: string
+ - jsonPath: .spec.containers[*].name
+ name: Containers
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ builtInAdapter:
+ properties:
+ memBufferBytes:
+ type: integer
+ modelLoadingTimeoutMillis:
+ type: integer
+ runtimeManagementPort:
+ type: integer
+ serverType:
+ enum:
+ - triton
+ - mlserver
+ type: string
+ type: object
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ workingDir:
+ type: string
+ type: object
+ type: array
+ disabled:
+ type: boolean
+ grpcDataEndpoint:
+ type: string
+ grpcEndpoint:
+ type: string
+ httpDataEndpoint:
+ type: string
+ multiModel:
+ type: boolean
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ replicas:
+ type: integer
+ storageHelper:
+ properties:
+ disabled:
+ type: boolean
+ type: object
+ supportedModelFormats:
+ items:
+ properties:
+ autoSelect:
+ type: boolean
+ name:
+ type: string
+ version:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ required:
+ - containers
+ type: object
+ status:
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ creationTimestamp: null
+ name: trainedmodels.serving.kserve.io
+spec:
+ group: serving.kserve.io
+ names:
+ kind: TrainedModel
+ listKind: TrainedModelList
+ plural: trainedmodels
+ shortNames:
+ - tm
+ singular: trainedmodel
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.url
+ name: URL
+ type: string
+ - jsonPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ inferenceService:
+ type: string
+ model:
+ properties:
+ framework:
+ type: string
+ memory:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageUri:
+ type: string
+ required:
+ - framework
+ - memory
+ - storageUri
+ type: object
+ required:
+ - inferenceService
+ - model
+ type: object
+ status:
+ properties:
+ address:
+ properties:
+ url:
+ type: string
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedGeneration:
+ format: int64
+ type: integer
+ url:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/instance: kserve-controller-manager
+ app.kubernetes.io/managed-by: kserve-controller-manager
+ app.kubernetes.io/name: kserve-controller-manager
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: leader-election-role
+ namespace: kserve
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: kserve-manager-role
+rules:
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - clusterservingruntimes
+ - clusterservingruntimes/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - clusterservingruntimes/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - inferenceservices
+ - inferenceservices/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - inferenceservices/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - servingruntimes
+ - servingruntimes/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - servingruntimes/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - trainedmodels
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - trainedmodels/status
+ verbs:
+ - get
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: kserve-proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: leader-election-rolebinding
+ namespace: kserve
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kserve-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kserve-manager-role
+subjects:
+- kind: ServiceAccount
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kserve-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kserve-proxy-role
+subjects:
+- kind: ServiceAccount
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: v1
+data:
+ agent: |-
+ {
+ "image" : "kserve/agent:v0.8.0",
+ "memoryRequest": "100Mi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "100m",
+ "cpuLimit": "1"
+ }
+ batcher: |-
+ {
+ "image" : "kserve/agent:v0.8.0",
+ "memoryRequest": "1Gi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "1",
+ "cpuLimit": "1"
+ }
+ credentials: |-
+ {
+ "gcs": {
+ "gcsCredentialFileName": "gcloud-application-credentials.json"
+ },
+ "s3": {
+ "s3AccessKeyIDName": "AWS_ACCESS_KEY_ID",
+ "s3SecretAccessKeyName": "AWS_SECRET_ACCESS_KEY"
+ }
+ }
+ deploy: |-
+ {
+ "defaultDeploymentMode": "Serverless"
+ }
+ explainers: |-
+ {
+ "alibi": {
+ "image" : "kserve/alibi-explainer",
+ "defaultImageVersion": "latest"
+ },
+ "aix": {
+ "image" : "kserve/aix-explainer",
+ "defaultImageVersion": "latest"
+ },
+ "art": {
+ "image" : "kserve/art-explainer",
+ "defaultImageVersion": "latest"
+ }
+ }
+ ingress: |-
+ {
+ "ingressGateway" : "knative-serving/knative-ingress-gateway",
+ "ingressService" : "istio-ingressgateway.istio-system.svc.cluster.local",
+ "localGateway" : "knative-serving/knative-local-gateway",
+ "localGatewayService" : "knative-local-gateway.istio-system.svc.cluster.local",
+ "ingressDomain" : "example.com"
+ }
+ logger: |-
+ {
+ "image" : "kserve/agent:v0.8.0",
+ "memoryRequest": "100Mi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "100m",
+ "cpuLimit": "1",
+ "defaultUrl": "http://default-broker"
+ }
+ predictors: |-
+ {
+ "tensorflow": {
+ "supportedFrameworks": [
+ "tensorflow"
+ ],
+ "multiModelServer": false
+ },
+ "onnx": {
+ "supportedFrameworks": [
+ "onnx"
+ ],
+ "multiModelServer": false
+ },
+ "sklearn": {
+ "v1": {
+ "supportedFrameworks": [
+ "sklearn"
+ ],
+ "multiModelServer": true
+ },
+ "v2": {
+ "supportedFrameworks": [
+ "sklearn"
+ ],
+ "multiModelServer": true
+ }
+ },
+ "xgboost": {
+ "v1": {
+ "supportedFrameworks": [
+ "xgboost"
+ ],
+ "multiModelServer": true
+ },
+ "v2": {
+ "supportedFrameworks": [
+ "xgboost"
+ ],
+ "multiModelServer": true
+ }
+ },
+ "pytorch": {
+ "supportedFrameworks": [
+ "pytorch"
+ ],
+ "multiModelServer": false
+ },
+ "triton": {
+ "supportedFrameworks": [
+ "tensorrt",
+ "tensorflow",
+ "onnx",
+ "pytorch"
+ ],
+ "multiModelServer": true
+ },
+ "pmml": {
+ "supportedFrameworks": [
+ "pmml"
+ ],
+ "multiModelServer": false
+ },
+ "lightgbm": {
+ "supportedFrameworks": [
+ "lightgbm"
+ ],
+ "multiModelServer": false
+ },
+ "paddle": {
+ "supportedFrameworks": [
+ "paddle"
+ ],
+ "multiModelServer": false
+ }
+ }
+ storageInitializer: |-
+ {
+ "image" : "kserve/storage-initializer:v0.8.0",
+ "memoryRequest": "100Mi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "100m",
+ "cpuLimit": "1"
+ }
+ transformers: |-
+ {
+ }
+kind: ConfigMap
+metadata:
+ name: inferenceservice-config
+ namespace: kserve
+---
+apiVersion: v1
+data:
+ ingressGateway: knative-serving/knative-ingress-gateway
+kind: ConfigMap
+metadata:
+ name: kserve-config
+ namespace: kserve
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: kserve-webhook-server-secret
+ namespace: kserve
+---
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "8443"
+ prometheus.io/scheme: https
+ prometheus.io/scrape: "true"
+ labels:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ name: kserve-controller-manager-metrics-service
+ namespace: kserve
+spec:
+ ports:
+ - name: https
+ port: 8443
+ targetPort: https
+ selector:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ name: kserve-controller-manager-service
+ namespace: kserve
+spec:
+ ports:
+ - port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kserve-webhook-server-service
+ namespace: kserve
+spec:
+ ports:
+ - port: 443
+ targetPort: webhook-server
+ selector:
+ control-plane: kserve-controller-manager
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ name: kserve-controller-manager
+ namespace: kserve
+spec:
+ selector:
+ matchLabels:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ serviceName: controller-manager-service
+ template:
+ metadata:
+ labels:
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ spec:
+ containers:
+ - args:
+ - --metrics-addr=127.0.0.1:8080
+ command:
+ - /manager
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SECRET_NAME
+ value: kserve-webhook-server-cert
+ image: kserve/kserve-controller:v0.8.0
+ imagePullPolicy: Always
+ name: manager
+ ports:
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 300Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --logtostderr=true
+ - --v=10
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: kserve-controller-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: kserve-webhook-server-cert
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: serving-cert
+ namespace: kserve
+spec:
+ commonName: kserve-webhook-server-service.kserve.svc
+ dnsNames:
+ - kserve-webhook-server-service.kserve.svc
+ issuerRef:
+ kind: Issuer
+ name: selfsigned-issuer
+ secretName: kserve-webhook-server-cert
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: selfsigned-issuer
+ namespace: kserve
+spec:
+ selfSigned: {}
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kserve/serving-cert
+ name: inferenceservice.serving.kserve.io
+webhooks:
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kserve
+ path: /mutate-serving-kserve-io-v1beta1-inferenceservice
+ failurePolicy: Fail
+ name: inferenceservice.kserve-webhook-server.defaulter
+ rules:
+ - apiGroups:
+ - serving.kserve.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - inferenceservices
+ sideEffects: None
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kserve
+ path: /mutate-pods
+ failurePolicy: Fail
+ name: inferenceservice.kserve-webhook-server.pod-mutator
+ namespaceSelector:
+ matchExpressions:
+ - key: control-plane
+ operator: DoesNotExist
+ objectSelector:
+ matchExpressions:
+ - key: serving.kserve.io/inferenceservice
+ operator: Exists
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - pods
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kserve/serving-cert
+ name: inferenceservice.serving.kserve.io
+webhooks:
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kserve
+ path: /validate-serving-kserve-io-v1beta1-inferenceservice
+ failurePolicy: Fail
+ name: inferenceservice.kserve-webhook-server.validator
+ rules:
+ - apiGroups:
+ - serving.kserve.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - inferenceservices
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kserve/serving-cert
+ name: trainedmodel.serving.kserve.io
+webhooks:
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kserve
+ path: /validate-serving-kserve-io-v1alpha1-trainedmodel
+ failurePolicy: Fail
+ name: trainedmodel.kserve-webhook-server.validator
+ rules:
+ - apiGroups:
+ - serving.kserve.io
+ apiVersions:
+ - v1alpha1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - trainedmodels
+ sideEffects: None
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-lgbserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ - --nthread=1
+ image: kserve/lgbserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: lightgbm
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-mlserver
+spec:
+ containers:
+ - env:
+ - name: MLSERVER_MODEL_IMPLEMENTATION
+ value: '{{.Labels.modelClass}}'
+ - name: MLSERVER_HTTP_PORT
+ value: "8080"
+ - name: MLSERVER_GRPC_PORT
+ value: "9000"
+ - name: MODELS_DIR
+ value: /mnt/models
+ image: docker.io/seldonio/mlserver:0.5.3
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - name: sklearn
+ version: "0"
+ - name: xgboost
+ version: "1"
+ - name: lightgbm
+ version: "3"
+ - autoSelect: true
+ name: mlflow
+ version: "1"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-paddleserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ image: kserve/paddleserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: paddle
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-pmmlserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ image: kserve/pmmlserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: pmml
+ version: "3"
+ - autoSelect: true
+ name: pmml
+ version: "4"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-sklearnserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ image: kserve/sklearnserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: sklearn
+ version: "1"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-tensorflow-serving
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --port=9000
+ - --rest_api_port=8080
+ - --model_base_path=/mnt/models
+ - --rest_api_timeout_in_ms=60000
+ command:
+ - /usr/bin/tensorflow_model_server
+ image: tensorflow/serving:2.6.2
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: tensorflow
+ version: "1"
+ - autoSelect: true
+ name: tensorflow
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-torchserve
+spec:
+ containers:
+ - args:
+ - torchserve
+ - --start
+ - --model-store=/mnt/models/model-store
+ - --ts-config=/mnt/models/config/config.properties
+ env:
+ - name: TS_SERVICE_ENVELOPE
+ value: '{{.Labels.serviceEnvelope}}'
+ image: kserve/torchserve-kfs:0.5.3
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: pytorch
+ version: "1"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-tritonserver
+spec:
+ containers:
+ - args:
+ - tritonserver
+ - --model-store=/mnt/models
+ - --grpc-port=9000
+ - --http-port=8080
+ - --allow-grpc=true
+ - --allow-http=true
+ image: nvcr.io/nvidia/tritonserver:21.09-py3
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - name: tensorrt
+ version: "8"
+ - name: tensorflow
+ version: "1"
+ - name: tensorflow
+ version: "2"
+ - autoSelect: true
+ name: onnx
+ version: "1"
+ - name: pytorch
+ version: "1"
+ - autoSelect: true
+ name: triton
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-xgbserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ - --nthread=1
+ image: kserve/xgbserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: xgboost
+ version: "1"
diff --git a/install/v0.8.0/kserve_kubeflow.yaml b/install/v0.8.0/kserve_kubeflow.yaml
new file mode 100644
index 00000000000..196be601f64
--- /dev/null
+++ b/install/v0.8.0/kserve_kubeflow.yaml
@@ -0,0 +1,15644 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ creationTimestamp: null
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: clusterservingruntimes.serving.kserve.io
+spec:
+ group: serving.kserve.io
+ names:
+ kind: ClusterServingRuntime
+ listKind: ClusterServingRuntimeList
+ plural: clusterservingruntimes
+ singular: clusterservingruntime
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.disabled
+ name: Disabled
+ type: boolean
+ - jsonPath: .spec.supportedModelFormats[*].name
+ name: ModelType
+ type: string
+ - jsonPath: .spec.containers[*].name
+ name: Containers
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ builtInAdapter:
+ properties:
+ memBufferBytes:
+ type: integer
+ modelLoadingTimeoutMillis:
+ type: integer
+ runtimeManagementPort:
+ type: integer
+ serverType:
+ enum:
+ - triton
+ - mlserver
+ type: string
+ type: object
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ workingDir:
+ type: string
+ type: object
+ type: array
+ disabled:
+ type: boolean
+ grpcDataEndpoint:
+ type: string
+ grpcEndpoint:
+ type: string
+ httpDataEndpoint:
+ type: string
+ multiModel:
+ type: boolean
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ replicas:
+ type: integer
+ storageHelper:
+ properties:
+ disabled:
+ type: boolean
+ type: object
+ supportedModelFormats:
+ items:
+ properties:
+ autoSelect:
+ type: boolean
+ name:
+ type: string
+ version:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ required:
+ - containers
+ type: object
+ status:
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kubeflow/serving-cert
+ controller-gen.kubebuilder.io/version: v0.4.0
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: inferenceservices.serving.kserve.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kubeflow
+ path: /convert
+ conversionReviewVersions:
+ - v1beta1
+ group: serving.kserve.io
+ names:
+ kind: InferenceService
+ listKind: InferenceServiceList
+ plural: inferenceservices
+ shortNames:
+ - isvc
+ singular: inferenceservice
+ preserveUnknownFields: false
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.url
+ name: URL
+ type: string
+ - jsonPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - jsonPath: .status.components.predictor.traffic[?(@.tag=='prev')].percent
+ name: Prev
+ type: integer
+ - jsonPath: .status.components.predictor.traffic[?(@.latestRevision==true)].percent
+ name: Latest
+ type: integer
+ - jsonPath: .status.components.predictor.traffic[?(@.tag=='prev')].revisionName
+ name: PrevRolledoutRevision
+ type: string
+ - jsonPath: .status.components.predictor.traffic[?(@.latestRevision==true)].revisionName
+ name: LatestReadyRevision
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ explainer:
+ properties:
+ activeDeadlineSeconds:
+ format: int64
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ aix:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ config:
+ additionalProperties:
+ type: string
+ type: object
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ type:
+ type: string
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ alibi:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ config:
+ additionalProperties:
+ type: string
+ type: object
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ type:
+ type: string
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ art:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ config:
+ additionalProperties:
+ type: string
+ type: object
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ type:
+ type: string
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ batcher:
+ properties:
+ maxBatchSize:
+ type: integer
+ maxLatency:
+ type: integer
+ timeout:
+ type: integer
+ type: object
+ canaryTrafficPercent:
+ format: int64
+ type: integer
+ containerConcurrency:
+ format: int64
+ type: integer
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ logger:
+ properties:
+ mode:
+ enum:
+ - all
+ - request
+ - response
+ type: string
+ url:
+ type: string
+ type: object
+ maxReplicas:
+ type: integer
+ minReplicas:
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ format: int32
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ securityContext:
+ properties:
+ fsGroup:
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeout:
+ format: int64
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ maxSkew:
+ format: int32
+ type: integer
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ predictor:
+ properties:
+ activeDeadlineSeconds:
+ format: int64
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ batcher:
+ properties:
+ maxBatchSize:
+ type: integer
+ maxLatency:
+ type: integer
+ timeout:
+ type: integer
+ type: object
+ canaryTrafficPercent:
+ format: int64
+ type: integer
+ containerConcurrency:
+ format: int64
+ type: integer
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ lightgbm:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ logger:
+ properties:
+ mode:
+ enum:
+ - all
+ - request
+ - response
+ type: string
+ url:
+ type: string
+ type: object
+ maxReplicas:
+ type: integer
+ minReplicas:
+ type: integer
+ model:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ modelFormat:
+ properties:
+ name:
+ type: string
+ version:
+ type: string
+ required:
+ - name
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtime:
+ type: string
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ onnx:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ paddle:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ pmml:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ format: int32
+ type: integer
+ priorityClassName:
+ type: string
+ pytorch:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ securityContext:
+ properties:
+ fsGroup:
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ sklearn:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ subdomain:
+ type: string
+ tensorflow:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeout:
+ format: int64
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ maxSkew:
+ format: int32
+ type: integer
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ triton:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ xgboost:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ protocolVersion:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ runtimeVersion:
+ type: string
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ storageUri:
+ type: string
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ type: object
+ type: object
+ transformer:
+ properties:
+ activeDeadlineSeconds:
+ format: int64
+ type: integer
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ type: boolean
+ batcher:
+ properties:
+ maxBatchSize:
+ type: integer
+ maxLatency:
+ type: integer
+ timeout:
+ type: integer
+ type: object
+ canaryTrafficPercent:
+ format: int64
+ type: integer
+ containerConcurrency:
+ format: int64
+ type: integer
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ items:
+ properties:
+ configMapRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ prefix:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ lifecycle:
+ properties:
+ postStart:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ ports:
+ items:
+ properties:
+ containerPort:
+ format: int32
+ type: integer
+ hostIP:
+ type: string
+ hostPort:
+ format: int32
+ type: integer
+ name:
+ type: string
+ protocol:
+ default: TCP
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ securityContext:
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ capabilities:
+ properties:
+ add:
+ items:
+ type: string
+ type: array
+ drop:
+ items:
+ type: string
+ type: array
+ type: object
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ type: boolean
+ stdinOnce:
+ type: boolean
+ terminationMessagePath:
+ type: string
+ terminationMessagePolicy:
+ type: string
+ tty:
+ type: boolean
+ volumeDevices:
+ items:
+ properties:
+ devicePath:
+ type: string
+ name:
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ items:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ properties:
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ type: string
+ enableServiceLinks:
+ type: boolean
+ hostAliases:
+ items:
+ properties:
+ hostnames:
+ items:
+ type: string
+ type: array
+ ip:
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ type: boolean
+ hostNetwork:
+ type: boolean
+ hostPID:
+ type: boolean
+ hostname:
+ type: string
+ imagePullSecrets:
+ items:
+ properties:
+ name:
+ type: string
+ type: object
+ type: array
+ logger:
+ properties:
+ mode:
+ enum:
+ - all
+ - request
+ - response
+ type: string
+ url:
+ type: string
+ type: object
+ maxReplicas:
+ type: integer
+ minReplicas:
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ preemptionPolicy:
+ type: string
+ priority:
+ format: int32
+ type: integer
+ priorityClassName:
+ type: string
+ readinessGates:
+ items:
+ properties:
+ conditionType:
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ restartPolicy:
+ type: string
+ runtimeClassName:
+ type: string
+ schedulerName:
+ type: string
+ securityContext:
+ properties:
+ fsGroup:
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ format: int64
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ format: int64
+ type: integer
+ seLinuxOptions:
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ type: object
+ seccompProfile:
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ type: string
+ serviceAccountName:
+ type: string
+ setHostnameAsFQDN:
+ type: boolean
+ shareProcessNamespace:
+ type: boolean
+ subdomain:
+ type: string
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeout:
+ format: int64
+ type: integer
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ maxSkew:
+ format: int32
+ type: integer
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ items:
+ properties:
+ awsElasticBlockStore:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ properties:
+ cachingMode:
+ type: string
+ diskName:
+ type: string
+ diskURI:
+ type: string
+ fsType:
+ type: string
+ kind:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ properties:
+ readOnly:
+ type: boolean
+ secretName:
+ type: string
+ shareName:
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ properties:
+ monitors:
+ items:
+ type: string
+ type: array
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ secretFile:
+ type: string
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ csi:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ nodePublishSecretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ properties:
+ volumeClaimTemplate:
+ properties:
+ metadata:
+ type: object
+ spec:
+ properties:
+ accessModes:
+ items:
+ type: string
+ type: array
+ dataSource:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ selector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ storageClassName:
+ type: string
+ volumeMode:
+ type: string
+ volumeName:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ properties:
+ fsType:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ readOnly:
+ type: boolean
+ targetWWNs:
+ items:
+ type: string
+ type: array
+ wwids:
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ properties:
+ driver:
+ type: string
+ fsType:
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ type: object
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ required:
+ - driver
+ type: object
+ flocker:
+ properties:
+ datasetName:
+ type: string
+ datasetUUID:
+ type: string
+ type: object
+ gcePersistentDisk:
+ properties:
+ fsType:
+ type: string
+ partition:
+ format: int32
+ type: integer
+ pdName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ properties:
+ directory:
+ type: string
+ repository:
+ type: string
+ revision:
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ properties:
+ endpoints:
+ type: string
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ properties:
+ path:
+ type: string
+ type:
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ properties:
+ chapAuthDiscovery:
+ type: boolean
+ chapAuthSession:
+ type: boolean
+ fsType:
+ type: string
+ initiatorName:
+ type: string
+ iqn:
+ type: string
+ iscsiInterface:
+ type: string
+ lun:
+ format: int32
+ type: integer
+ portals:
+ items:
+ type: string
+ type: array
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ targetPortal:
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ type: string
+ nfs:
+ properties:
+ path:
+ type: string
+ readOnly:
+ type: boolean
+ server:
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ properties:
+ fsType:
+ type: string
+ pdID:
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ volumeID:
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ sources:
+ items:
+ properties:
+ configMap:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ downwardAPI:
+ properties:
+ items:
+ items:
+ properties:
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ properties:
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ type: string
+ optional:
+ type: boolean
+ type: object
+ serviceAccountToken:
+ properties:
+ audience:
+ type: string
+ expirationSeconds:
+ format: int64
+ type: integer
+ path:
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ properties:
+ group:
+ type: string
+ readOnly:
+ type: boolean
+ registry:
+ type: string
+ tenant:
+ type: string
+ user:
+ type: string
+ volume:
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ properties:
+ fsType:
+ type: string
+ image:
+ type: string
+ keyring:
+ type: string
+ monitors:
+ items:
+ type: string
+ type: array
+ pool:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ user:
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ properties:
+ fsType:
+ type: string
+ gateway:
+ type: string
+ protectionDomain:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ sslEnabled:
+ type: boolean
+ storageMode:
+ type: string
+ storagePool:
+ type: string
+ system:
+ type: string
+ volumeName:
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ properties:
+ defaultMode:
+ format: int32
+ type: integer
+ items:
+ items:
+ properties:
+ key:
+ type: string
+ mode:
+ format: int32
+ type: integer
+ path:
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ type: object
+ storageos:
+ properties:
+ fsType:
+ type: string
+ readOnly:
+ type: boolean
+ secretRef:
+ properties:
+ name:
+ type: string
+ type: object
+ volumeName:
+ type: string
+ volumeNamespace:
+ type: string
+ type: object
+ vsphereVolume:
+ properties:
+ fsType:
+ type: string
+ storagePolicyID:
+ type: string
+ storagePolicyName:
+ type: string
+ volumePath:
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ required:
+ - predictor
+ type: object
+ status:
+ properties:
+ address:
+ properties:
+ url:
+ type: string
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ components:
+ additionalProperties:
+ properties:
+ address:
+ properties:
+ url:
+ type: string
+ type: object
+ latestCreatedRevision:
+ type: string
+ latestReadyRevision:
+ type: string
+ latestRolledoutRevision:
+ type: string
+ previousRolledoutRevision:
+ type: string
+ traffic:
+ items:
+ properties:
+ configurationName:
+ type: string
+ latestRevision:
+ type: boolean
+ percent:
+ format: int64
+ type: integer
+ revisionName:
+ type: string
+ tag:
+ type: string
+ url:
+ type: string
+ type: object
+ type: array
+ url:
+ type: string
+ type: object
+ type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedGeneration:
+ format: int64
+ type: integer
+ url:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ creationTimestamp: null
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: servingruntimes.serving.kserve.io
+spec:
+ group: serving.kserve.io
+ names:
+ kind: ServingRuntime
+ listKind: ServingRuntimeList
+ plural: servingruntimes
+ singular: servingruntime
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.disabled
+ name: Disabled
+ type: boolean
+ - jsonPath: .spec.supportedModelFormats[*].name
+ name: ModelType
+ type: string
+ - jsonPath: .spec.containers[*].name
+ name: Containers
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ affinity:
+ properties:
+ nodeAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ preference:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ properties:
+ nodeSelectorTerms:
+ items:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ podAffinityTerm:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ items:
+ properties:
+ labelSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaceSelector:
+ properties:
+ matchExpressions:
+ items:
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ namespaces:
+ items:
+ type: string
+ type: array
+ topologyKey:
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ builtInAdapter:
+ properties:
+ memBufferBytes:
+ type: integer
+ modelLoadingTimeoutMillis:
+ type: integer
+ runtimeManagementPort:
+ type: integer
+ serverType:
+ enum:
+ - triton
+ - mlserver
+ type: string
+ type: object
+ containers:
+ items:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ command:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ valueFrom:
+ properties:
+ configMapKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ properties:
+ apiVersion:
+ type: string
+ fieldPath:
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ properties:
+ containerName:
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ image:
+ type: string
+ imagePullPolicy:
+ type: string
+ livenessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ name:
+ type: string
+ readinessProbe:
+ properties:
+ exec:
+ properties:
+ command:
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ format: int32
+ type: integer
+ httpGet:
+ properties:
+ host:
+ type: string
+ httpHeaders:
+ items:
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ scheme:
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ format: int32
+ type: integer
+ periodSeconds:
+ format: int32
+ type: integer
+ successThreshold:
+ format: int32
+ type: integer
+ tcpSocket:
+ properties:
+ host:
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ format: int64
+ type: integer
+ timeoutSeconds:
+ format: int32
+ type: integer
+ type: object
+ resources:
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ type: object
+ workingDir:
+ type: string
+ type: object
+ type: array
+ disabled:
+ type: boolean
+ grpcDataEndpoint:
+ type: string
+ grpcEndpoint:
+ type: string
+ httpDataEndpoint:
+ type: string
+ multiModel:
+ type: boolean
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ replicas:
+ type: integer
+ storageHelper:
+ properties:
+ disabled:
+ type: boolean
+ type: object
+ supportedModelFormats:
+ items:
+ properties:
+ autoSelect:
+ type: boolean
+ name:
+ type: string
+ version:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ tolerations:
+ items:
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ format: int64
+ type: integer
+ value:
+ type: string
+ type: object
+ type: array
+ required:
+ - containers
+ type: object
+ status:
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ creationTimestamp: null
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: trainedmodels.serving.kserve.io
+spec:
+ group: serving.kserve.io
+ names:
+ kind: TrainedModel
+ listKind: TrainedModelList
+ plural: trainedmodels
+ shortNames:
+ - tm
+ singular: trainedmodel
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.url
+ name: URL
+ type: string
+ - jsonPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ inferenceService:
+ type: string
+ model:
+ properties:
+ framework:
+ type: string
+ memory:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ storageUri:
+ type: string
+ required:
+ - framework
+ - memory
+ - storageUri
+ type: object
+ required:
+ - inferenceService
+ - model
+ type: object
+ status:
+ properties:
+ address:
+ properties:
+ url:
+ type: string
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedGeneration:
+ format: int64
+ type: integer
+ url:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/instance: kserve-controller-manager
+ app.kubernetes.io/managed-by: kserve-controller-manager
+ app.kubernetes.io/name: kserve
+ name: kserve-controller-manager
+ namespace: kubeflow
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: leader-election-role
+ namespace: kubeflow
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-manager-role
+rules:
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - clusterservingruntimes
+ - clusterservingruntimes/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - clusterservingruntimes/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - inferenceservices
+ - inferenceservices/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - inferenceservices/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - servingruntimes
+ - servingruntimes/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - servingruntimes/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - trainedmodels
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - trainedmodels/status
+ verbs:
+ - get
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.authorization.kubeflow.org/aggregate-to-kubeflow-kserve-admin: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ rbac.authorization.kubeflow.org/aggregate-to-kubeflow-admin: "true"
+ name: kubeflow-kserve-admin
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ rbac.authorization.kubeflow.org/aggregate-to-kubeflow-edit: "true"
+ rbac.authorization.kubeflow.org/aggregate-to-kubeflow-kserve-admin: "true"
+ name: kubeflow-kserve-edit
+rules:
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - inferenceservices
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - deletecollection
+ - patch
+ - update
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services
+ - services/status
+ - routes
+ - routes/status
+ - configurations
+ - configurations/status
+ - revisions
+ - revisions/status
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - deletecollection
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ rbac.authorization.kubeflow.org/aggregate-to-kubeflow-view: "true"
+ name: kubeflow-kserve-view
+rules:
+- apiGroups:
+ - serving.kserve.io
+ resources:
+ - inferenceservices
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - serving.knative.dev
+ resources:
+ - services
+ - services/status
+ - routes
+ - routes/status
+ - configurations
+ - configurations/status
+ - revisions
+ - revisions/status
+ verbs:
+ - get
+ - list
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: leader-election-rolebinding
+ namespace: kubeflow
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kserve-manager-role
+subjects:
+- kind: ServiceAccount
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kserve-proxy-role
+subjects:
+- kind: ServiceAccount
+ name: kserve-controller-manager
+ namespace: kserve
+---
+apiVersion: v1
+data:
+ agent: |-
+ {
+ "image" : "kserve/agent:v0.8.0",
+ "memoryRequest": "100Mi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "100m",
+ "cpuLimit": "1"
+ }
+ batcher: |-
+ {
+ "image" : "kserve/agent:v0.8.0",
+ "memoryRequest": "1Gi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "1",
+ "cpuLimit": "1"
+ }
+ credentials: |-
+ {
+ "gcs": {
+ "gcsCredentialFileName": "gcloud-application-credentials.json"
+ },
+ "s3": {
+ "s3AccessKeyIDName": "AWS_ACCESS_KEY_ID",
+ "s3SecretAccessKeyName": "AWS_SECRET_ACCESS_KEY"
+ }
+ }
+ deploy: |-
+ {
+ "defaultDeploymentMode": "Serverless"
+ }
+ explainers: |-
+ {
+ "alibi": {
+ "image" : "kserve/alibi-explainer",
+ "defaultImageVersion": "latest"
+ },
+ "aix": {
+ "image" : "kserve/aix-explainer",
+ "defaultImageVersion": "latest"
+ },
+ "art": {
+ "image" : "kserve/art-explainer",
+ "defaultImageVersion": "latest"
+ }
+ }
+ ingress: |-
+ {
+ "ingressGateway" : "kubeflow/kubeflow-gateway",
+ "ingressService" : "istio-ingressgateway.istio-system.svc.cluster.local",
+ "localGateway" : "knative-serving/knative-local-gateway",
+ "localGatewayService" : "knative-local-gateway.istio-system.svc.cluster.local",
+ "ingressDomain" : "example.com"
+ }
+ logger: |-
+ {
+ "image" : "kserve/agent:v0.8.0",
+ "memoryRequest": "100Mi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "100m",
+ "cpuLimit": "1",
+ "defaultUrl": "http://default-broker"
+ }
+ predictors: |-
+ {
+ "tensorflow": {
+ "supportedFrameworks": [
+ "tensorflow"
+ ],
+ "multiModelServer": false
+ },
+ "onnx": {
+ "supportedFrameworks": [
+ "onnx"
+ ],
+ "multiModelServer": false
+ },
+ "sklearn": {
+ "v1": {
+ "supportedFrameworks": [
+ "sklearn"
+ ],
+ "multiModelServer": true
+ },
+ "v2": {
+ "supportedFrameworks": [
+ "sklearn"
+ ],
+ "multiModelServer": true
+ }
+ },
+ "xgboost": {
+ "v1": {
+ "supportedFrameworks": [
+ "xgboost"
+ ],
+ "multiModelServer": true
+ },
+ "v2": {
+ "supportedFrameworks": [
+ "xgboost"
+ ],
+ "multiModelServer": true
+ }
+ },
+ "pytorch": {
+ "supportedFrameworks": [
+ "pytorch"
+ ],
+ "multiModelServer": false
+ },
+ "triton": {
+ "supportedFrameworks": [
+ "tensorrt",
+ "tensorflow",
+ "onnx",
+ "pytorch"
+ ],
+ "multiModelServer": true
+ },
+ "pmml": {
+ "supportedFrameworks": [
+ "pmml"
+ ],
+ "multiModelServer": false
+ },
+ "lightgbm": {
+ "supportedFrameworks": [
+ "lightgbm"
+ ],
+ "multiModelServer": false
+ },
+ "paddle": {
+ "supportedFrameworks": [
+ "paddle"
+ ],
+ "multiModelServer": false
+ }
+ }
+ storageInitializer: |-
+ {
+ "image" : "kserve/storage-initializer:v0.8.0",
+ "memoryRequest": "100Mi",
+ "memoryLimit": "1Gi",
+ "cpuRequest": "100m",
+ "cpuLimit": "1"
+ }
+ transformers: |-
+ {
+ }
+kind: ConfigMap
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: inferenceservice-config
+ namespace: kubeflow
+---
+apiVersion: v1
+data:
+ ingressGateway: kubeflow/kubeflow-gateway
+kind: ConfigMap
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-config
+ namespace: kubeflow
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-webhook-server-secret
+ namespace: kubeflow
+---
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "8443"
+ prometheus.io/scheme: https
+ prometheus.io/scrape: "true"
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ name: kserve-controller-manager-metrics-service
+ namespace: kubeflow
+spec:
+ ports:
+ - name: https
+ port: 8443
+ targetPort: https
+ selector:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ name: kserve-controller-manager-service
+ namespace: kubeflow
+spec:
+ ports:
+ - port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: kserve-webhook-server-service
+ namespace: kubeflow
+spec:
+ ports:
+ - port: 443
+ targetPort: webhook-server
+ selector:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ name: kserve-controller-manager
+ namespace: kubeflow
+spec:
+ selector:
+ matchLabels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ serviceName: controller-manager-service
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ control-plane: kserve-controller-manager
+ controller-tools.k8s.io: "1.0"
+ spec:
+ containers:
+ - args:
+ - --metrics-addr=127.0.0.1:8080
+ command:
+ - /manager
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SECRET_NAME
+ value: kserve-webhook-server-cert
+ image: kserve/kserve-controller:v0.8.0
+ imagePullPolicy: Always
+ name: manager
+ ports:
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ memory: 300Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --logtostderr=true
+ - --v=10
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: kserve-controller-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: kserve-webhook-server-cert
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: serving-cert
+ namespace: kubeflow
+spec:
+ commonName: kserve-webhook-server-service.kubeflow.svc
+ dnsNames:
+ - kserve-webhook-server-service.kubeflow.svc
+ issuerRef:
+ kind: Issuer
+ name: selfsigned-issuer
+ secretName: kserve-webhook-server-cert
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: selfsigned-issuer
+ namespace: kubeflow
+spec:
+ selfSigned: {}
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kubeflow/serving-cert
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: inferenceservice.serving.kserve.io
+webhooks:
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kubeflow
+ path: /mutate-serving-kserve-io-v1beta1-inferenceservice
+ failurePolicy: Fail
+ name: inferenceservice.kserve-webhook-server.defaulter
+ rules:
+ - apiGroups:
+ - serving.kserve.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - inferenceservices
+ sideEffects: None
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kubeflow
+ path: /mutate-pods
+ failurePolicy: Fail
+ name: inferenceservice.kserve-webhook-server.pod-mutator
+ namespaceSelector:
+ matchExpressions:
+ - key: control-plane
+ operator: DoesNotExist
+ objectSelector:
+ matchExpressions:
+ - key: serving.kserve.io/inferenceservice
+ operator: Exists
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - pods
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kubeflow/serving-cert
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: inferenceservice.serving.kserve.io
+webhooks:
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kubeflow
+ path: /validate-serving-kserve-io-v1beta1-inferenceservice
+ failurePolicy: Fail
+ name: inferenceservice.kserve-webhook-server.validator
+ rules:
+ - apiGroups:
+ - serving.kserve.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - inferenceservices
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: kubeflow/serving-cert
+ labels:
+ app: kserve
+ app.kubernetes.io/name: kserve
+ name: trainedmodel.serving.kserve.io
+webhooks:
+- admissionReviewVersions:
+ - v1beta1
+ clientConfig:
+ caBundle: Cg==
+ service:
+ name: kserve-webhook-server-service
+ namespace: kubeflow
+ path: /validate-serving-kserve-io-v1alpha1-trainedmodel
+ failurePolicy: Fail
+ name: trainedmodel.kserve-webhook-server.validator
+ rules:
+ - apiGroups:
+ - serving.kserve.io
+ apiVersions:
+ - v1alpha1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - trainedmodels
+ sideEffects: None
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-lgbserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ - --nthread=1
+ image: kserve/lgbserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: lightgbm
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-mlserver
+spec:
+ containers:
+ - env:
+ - name: MLSERVER_MODEL_IMPLEMENTATION
+ value: '{{.Labels.modelClass}}'
+ - name: MLSERVER_HTTP_PORT
+ value: "8080"
+ - name: MLSERVER_GRPC_PORT
+ value: "9000"
+ - name: MODELS_DIR
+ value: /mnt/models
+ image: docker.io/seldonio/mlserver:0.5.3
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - name: sklearn
+ version: "0"
+ - name: xgboost
+ version: "1"
+ - name: lightgbm
+ version: "3"
+ - autoSelect: true
+ name: mlflow
+ version: "1"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-paddleserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ image: kserve/paddleserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: paddle
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-pmmlserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ image: kserve/pmmlserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: pmml
+ version: "3"
+ - autoSelect: true
+ name: pmml
+ version: "4"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-sklearnserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ image: kserve/sklearnserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: sklearn
+ version: "1"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-tensorflow-serving
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --port=9000
+ - --rest_api_port=8080
+ - --model_base_path=/mnt/models
+ - --rest_api_timeout_in_ms=60000
+ command:
+ - /usr/bin/tensorflow_model_server
+ image: tensorflow/serving:2.6.2
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: tensorflow
+ version: "1"
+ - autoSelect: true
+ name: tensorflow
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-torchserve
+spec:
+ containers:
+ - args:
+ - torchserve
+ - --start
+ - --model-store=/mnt/models/model-store
+ - --ts-config=/mnt/models/config/config.properties
+ env:
+ - name: TS_SERVICE_ENVELOPE
+ value: '{{.Labels.serviceEnvelope}}'
+ image: kserve/torchserve-kfs:0.5.3
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: pytorch
+ version: "1"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-tritonserver
+spec:
+ containers:
+ - args:
+ - tritonserver
+ - --model-store=/mnt/models
+ - --grpc-port=9000
+ - --http-port=8080
+ - --allow-grpc=true
+ - --allow-http=true
+ image: nvcr.io/nvidia/tritonserver:21.09-py3
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - name: tensorrt
+ version: "8"
+ - name: tensorflow
+ version: "1"
+ - name: tensorflow
+ version: "2"
+ - autoSelect: true
+ name: onnx
+ version: "1"
+ - name: pytorch
+ version: "1"
+ - autoSelect: true
+ name: triton
+ version: "2"
+---
+apiVersion: serving.kserve.io/v1alpha1
+kind: ClusterServingRuntime
+metadata:
+ name: kserve-xgbserver
+spec:
+ containers:
+ - args:
+ - --model_name={{.Name}}
+ - --model_dir=/mnt/models
+ - --http_port=8080
+ - --nthread=1
+ image: kserve/xgbserver:v0.8.0
+ name: kserve-container
+ resources:
+ limits:
+ cpu: "1"
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ supportedModelFormats:
+ - autoSelect: true
+ name: xgboost
+ version: "1"
diff --git a/manifests/charts/values.yaml b/manifests/charts/values.yaml
index d6d5c40e730..334a190bb78 100644
--- a/manifests/charts/values.yaml
+++ b/manifests/charts/values.yaml
@@ -7,7 +7,7 @@ kserve:
accessKeyIdName: AWS_ACCESS_KEY_ID
secretAccessKeyName: AWS_SECRET_ACCESS_KEY
controller:
- deploymentMode: "RawDeployment"
+ deploymentMode: "Serverless"
gateway:
domain: example.com
localGateway:
diff --git a/python/kserve/setup.py b/python/kserve/setup.py
index b327def88a5..67aef373eef 100644
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -28,7 +28,7 @@
setuptools.setup(
name='kserve',
- version='0.8.0rc0',
+ version='0.8.0',
author="The KServe Authors",
author_email='[email protected], [email protected], [email protected]',
license="Apache License Version 2.0",
diff --git a/test/e2e/predictor/test_lightgbm.py b/test/e2e/predictor/test_lightgbm.py
index d7003a933e5..46e7bf81284 100644
--- a/test/e2e/predictor/test_lightgbm.py
+++ b/test/e2e/predictor/test_lightgbm.py
@@ -33,7 +33,7 @@ def test_lightgbm_kserve():
predictor = V1beta1PredictorSpec(
min_replicas=1,
lightgbm=V1beta1LightGBMSpec(
- storage_uri="gs://kfserving-examples/models/lightgbm",
+ storage_uri="gs://kfserving-examples/models/lightgbm/iris",
resources=V1ResourceRequirements(
requests={"cpu": "100m", "memory": "256Mi"},
limits={"cpu": "100m", "memory": "256Mi"},
@@ -67,7 +67,7 @@ def test_lightgbm_runtime_kserve():
model_format=V1beta1ModelFormat(
name="lightgbm",
),
- storage_uri="gs://kfserving-examples/models/lightgbm",
+ storage_uri="gs://kfserving-examples/models/lightgbm/iris",
resources=V1ResourceRequirements(
requests={"cpu": "100m", "memory": "256Mi"},
limits={"cpu": "100m", "memory": "256Mi"},
diff --git a/test/scripts/post-e2e-tests.sh b/test/scripts/post-e2e-tests.sh
index 1970a78f15a..258d08c8cdf 100755
--- a/test/scripts/post-e2e-tests.sh
+++ b/test/scripts/post-e2e-tests.sh
@@ -25,6 +25,7 @@ pip3 install awscli --upgrade --user
aws eks update-kubeconfig --region=${AWS_REGION} --name=${CLUSTER_NAME}
# Print e2e test events
+kubectl describe pods -n kserve-ci-e2e-test
kubectl get events -n kserve-ci-e2e-test
# Print controller logs
|
strawberry-graphql__strawberry-128 | Lists being marked as Optional
When defining a list the resulting schema marks the list as optional (or nullable in GraphQL terms) even if it wasn't wrapped in `typing.Optional`, we should fix that :)
| [
{
"content": "from collections.abc import AsyncGenerator\n\nfrom graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n GraphQLID,\n GraphQLInt,\n GraphQLList,\n GraphQLNonNull,\n GraphQLString,\n GraphQLUnionType,\n)\n\nfrom .exceptions import UnallowedReturnTypeForUnion, WrongReturnTypeForUnion\nfrom .scalars import ID\nfrom .utils.typing import is_union\n\n\nREGISTRY = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n bool: GraphQLBoolean,\n ID: GraphQLID,\n}\n\n\n# TODO: make so that we don't pass force optional\n# we use that when trying to get the type for a\n# option field (which can either be a scalar or an object type)\ndef get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n):\n # TODO: this might lead to issues with types that have a field value\n is_field_optional = force_optional\n\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n annotation_name = getattr(annotation, \"_name\", None)\n\n if annotation_name == \"List\":\n list_of_type = get_graphql_type_for_annotation(\n annotation.__args__[0], field_name\n )\n\n return GraphQLList(list_of_type)\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n if annotation_origin == AsyncGenerator:\n # async generators are used in subscription, we only need the yield type\n # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator\n return get_graphql_type_for_annotation(annotation.__args__[0], field_name)\n\n elif is_union(annotation):\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n\n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n is_field_optional = True\n graphql_type = get_graphql_type_for_annotation(\n non_none_types[0], field_name, force_optional=True\n )\n else:\n is_field_optional = None.__class__ in types\n\n def _resolve_type(self, value, _type):\n if not hasattr(self, \"field\"):\n raise WrongReturnTypeForUnion(value.field_name, str(type(self)))\n\n if self.field not in _type.types:\n raise UnallowedReturnTypeForUnion(\n value.field_name, str(type(self)), _type.types\n )\n\n return self.field\n\n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n # also we want to make sure we have been passed\n # strawberry types\n graphql_type = GraphQLUnionType(\n field_name, [type.field for type in types]\n )\n graphql_type.resolve_type = _resolve_type\n else:\n graphql_type = REGISTRY.get(annotation)\n\n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n\n if is_field_optional:\n return graphql_type\n\n return GraphQLNonNull(graphql_type)\n",
"path": "strawberry/type_converter.py"
}
] | [
{
"content": "from collections.abc import AsyncGenerator\n\nfrom graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n GraphQLID,\n GraphQLInt,\n GraphQLList,\n GraphQLNonNull,\n GraphQLString,\n GraphQLUnionType,\n)\n\nfrom .exceptions import UnallowedReturnTypeForUnion, WrongReturnTypeForUnion\nfrom .scalars import ID\nfrom .utils.typing import is_union\n\n\nREGISTRY = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n bool: GraphQLBoolean,\n ID: GraphQLID,\n}\n\n\n# TODO: make so that we don't pass force optional\n# we use that when trying to get the type for a\n# option field (which can either be a scalar or an object type)\ndef get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n):\n # TODO: this might lead to issues with types that have a field value\n is_field_optional = force_optional\n\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n annotation_name = getattr(annotation, \"_name\", None)\n\n if annotation_name == \"List\":\n list_of_type = get_graphql_type_for_annotation(\n annotation.__args__[0], field_name\n )\n\n list_type = GraphQLList(list_of_type)\n\n return list_type if is_field_optional else GraphQLNonNull(list_type)\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n if annotation_origin == AsyncGenerator:\n # async generators are used in subscription, we only need the yield type\n # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator\n return get_graphql_type_for_annotation(annotation.__args__[0], field_name)\n\n elif is_union(annotation):\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n\n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n is_field_optional = True\n graphql_type = get_graphql_type_for_annotation(\n non_none_types[0], field_name, force_optional=True\n )\n else:\n is_field_optional = None.__class__ in types\n\n def _resolve_type(self, value, _type):\n if not hasattr(self, \"field\"):\n raise WrongReturnTypeForUnion(value.field_name, str(type(self)))\n\n if self.field not in _type.types:\n raise UnallowedReturnTypeForUnion(\n value.field_name, str(type(self)), _type.types\n )\n\n return self.field\n\n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n # also we want to make sure we have been passed\n # strawberry types\n graphql_type = GraphQLUnionType(\n field_name, [type.field for type in types]\n )\n graphql_type.resolve_type = _resolve_type\n else:\n graphql_type = REGISTRY.get(annotation)\n\n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n\n if is_field_optional:\n return graphql_type\n\n return GraphQLNonNull(graphql_type)\n",
"path": "strawberry/type_converter.py"
}
] | diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 0000000000..3e34662bf1
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,3 @@
+Release type: patch
+
+Fix List types being converted to Optional GraphQL lists.
diff --git a/strawberry/type_converter.py b/strawberry/type_converter.py
index 0340e4297f..580e698314 100644
--- a/strawberry/type_converter.py
+++ b/strawberry/type_converter.py
@@ -44,7 +44,9 @@ def get_graphql_type_for_annotation(
annotation.__args__[0], field_name
)
- return GraphQLList(list_of_type)
+ list_type = GraphQLList(list_of_type)
+
+ return list_type if is_field_optional else GraphQLNonNull(list_type)
annotation_origin = getattr(annotation, "__origin__", None)
diff --git a/tests/test_type_converter.py b/tests/test_type_converter.py
index deffb7e53d..5d40478ae1 100644
--- a/tests/test_type_converter.py
+++ b/tests/test_type_converter.py
@@ -63,34 +63,64 @@ class A:
def test_list_of_scalar():
- field = get_graphql_type_for_annotation(List[str], "Example")
+ field = get_graphql_type_for_annotation(Optional[List[str]], "Example")
assert type(field) == GraphQLList
assert type(field.of_type) == GraphQLNonNull
assert type(field.of_type.of_type) == GraphQLScalarType
assert field.of_type.of_type.name == "String"
- field = get_graphql_type_for_annotation(List[Optional[str]], "Example")
+ field = get_graphql_type_for_annotation(List[str], "Example")
+
+ assert type(field) == GraphQLNonNull
+ assert type(field.of_type) == GraphQLList
+ assert type(field.of_type.of_type) == GraphQLNonNull
+ assert type(field.of_type.of_type.of_type) == GraphQLScalarType
+ assert field.of_type.of_type.of_type.name == "String"
+
+ field = get_graphql_type_for_annotation(Optional[List[Optional[str]]], "Example")
assert type(field) == GraphQLList
assert type(field.of_type) == GraphQLScalarType
assert field.of_type.name == "String"
+ field = get_graphql_type_for_annotation(List[Optional[str]], "Example")
+
+ assert type(field) == GraphQLNonNull
+ assert type(field.of_type) == GraphQLList
+ assert type(field.of_type.of_type) == GraphQLScalarType
+ assert field.of_type.of_type.name == "String"
+
def test_list_of_object_types():
@strawberry.type
class A:
x: int
- field = get_graphql_type_for_annotation(List[A], "Example")
+ field = get_graphql_type_for_annotation(Optional[List[A]], "Example")
assert type(field) == GraphQLList
assert type(field.of_type) == GraphQLNonNull
assert type(field.of_type.of_type) == GraphQLObjectType
assert field.of_type.of_type.name == "A"
- field = get_graphql_type_for_annotation(List[Optional[A]], "Example")
+ field = get_graphql_type_for_annotation(List[A], "Example")
+
+ assert type(field) == GraphQLNonNull
+ assert type(field.of_type) == GraphQLList
+ assert type(field.of_type.of_type) == GraphQLNonNull
+ assert type(field.of_type.of_type.of_type) == GraphQLObjectType
+ assert field.of_type.of_type.of_type.name == "A"
+
+ field = get_graphql_type_for_annotation(Optional[List[Optional[A]]], "Example")
assert type(field) == GraphQLList
assert type(field.of_type) == GraphQLObjectType
assert field.of_type.name == "A"
+
+ field = get_graphql_type_for_annotation(List[Optional[A]], "Example")
+
+ assert type(field) == GraphQLNonNull
+ assert type(field.of_type) == GraphQLList
+ assert type(field.of_type.of_type) == GraphQLObjectType
+ assert field.of_type.of_type.name == "A"
|
pypi__warehouse-3974 | Unexpected Results on Filtering in Browse by Licence
Given the indentation on the licence tree in the Browse by Licence view I was surprised by the following results:
- Filter on [OSI Approved](https://pypi.org/search/?q=&o=&c=License+%3A%3A+OSI+Approved): **654** Projects
- Add [MIT Filter](https://pypi.org/search/?q=&o=&c=License+%3A%3A+OSI+Approved&c=License+%3A%3A+OSI+Approved+%3A%3A+MIT+License): **95** projects.
- Remove [OSI Approved](https://pypi.org/search/?q=&o=&c=License+%3A%3A+OSI+Approved+%3A%3A+MIT+License): **10,000+** Projects.
Knowing that MIT is OSI Approved, the indentation, _and the inclusion of OSI Approved in the MIT filter URL,_ led me to expect that filtering on OSI Approved was the same as ticking all of the boxes for all of the OSI Approved licences but it seems that instead this box is filtering on packages that explicitly set "OSI Approved" in the meta data and adding MIT, _(or any other),_ restricts this to packages what explicitly set **both** "OSI Approved" **and** "IOS Approved::MIT"!
This is not what I, _and I suspect most people,_ would expect. I do know that really this is the package authors "fault" in that they should have set **both** tags in the meta data but I am not going to raise tickets on over 10, 000 projects and expect a quick resolution.
Possible resolutions:
1. Have a tick box within OSI Approved for Specifically OSI Approved.
2. Any of:
- remove the tick box for OSI Approved and make just a grouping
- remove the grouping
- have toggling OSI Approved (category) toogle ticks on all of the nested lines
- change the filter for OSI Approved (category) from ::OSI Approved to ::OSI Approved* _or whatever the appropriate wildcard is_
- Re-Index the packages so that all packages with any of the OSI Approved licences are automatically included in the OSI Approved category.
| [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport re\n\nimport elasticsearch\n\nfrom pyramid.httpexceptions import (\n HTTPException,\n HTTPSeeOther,\n HTTPMovedPermanently,\n HTTPNotFound,\n HTTPBadRequest,\n HTTPServiceUnavailable,\n exception_response,\n)\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\nfrom pyramid.view import (\n notfound_view_config,\n forbidden_view_config,\n exception_view_config,\n view_config,\n)\nfrom elasticsearch_dsl import Q\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased, joinedload\nfrom sqlalchemy.sql import exists\n\nfrom warehouse.db import DatabaseNotAvailable\nfrom warehouse.accounts import REDIRECT_FIELD_NAME\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.cache.http import add_vary, cache_control\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.models import Project, Release, File, release_classifiers\nfrom warehouse.search.queries import SEARCH_BOOSTS, SEARCH_FIELDS, SEARCH_FILTER_ORDER\nfrom warehouse.utils.row_counter import RowCount\nfrom warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory\n\n\n# 403, 404, 410, 500,\n\n\n@view_config(context=HTTPException)\n@notfound_view_config(append_slash=HTTPMovedPermanently)\ndef httpexception_view(exc, request):\n # This special case exists for the easter egg that appears on the 404\n # response page. We don't generally allow youtube embeds, but we make an\n # except for this one.\n if isinstance(exc, HTTPNotFound):\n request.find_service(name=\"csp\").merge(\n {\n \"frame-src\": [\"https://www.youtube-nocookie.com\"],\n \"script-src\": [\"https://www.youtube.com\", \"https://s.ytimg.com\"],\n }\n )\n try:\n # Lightweight version of 404 page for `/simple/`\n if isinstance(exc, HTTPNotFound) and request.path.startswith(\"/simple/\"):\n response = Response(body=\"404 Not Found\", content_type=\"text/plain\")\n else:\n response = render_to_response(\n \"{}.html\".format(exc.status_code), {}, request=request\n )\n except LookupError:\n # We don't have a customized template for this error, so we'll just let\n # the default happen instead.\n return exc\n\n # Copy over the important values from our HTTPException to our new response\n # object.\n response.status = exc.status\n response.headers.extend(\n (k, v) for k, v in exc.headers.items() if k not in response.headers\n )\n\n return response\n\n\n@forbidden_view_config()\n@exception_view_config(PredicateMismatch)\ndef forbidden(exc, request, redirect_to=\"accounts.login\"):\n # If the forbidden error is because the user isn't logged in, then we'll\n # redirect them to the log in page.\n if request.authenticated_userid is None:\n url = request.route_url(\n redirect_to, _query={REDIRECT_FIELD_NAME: request.path_qs}\n )\n return HTTPSeeOther(url)\n\n # If we've reached here, then the user is logged in and they are genuinely\n # not allowed to access this page.\n return httpexception_view(exc, request)\n\n\n@forbidden_view_config(path_info=r\"^/_includes/\")\n@exception_view_config(PredicateMismatch, path_info=r\"^/_includes/\")\ndef forbidden_include(exc, request):\n # If the forbidden error is for a client-side-include, just return an empty\n # response instead of redirecting\n return Response(status=403)\n\n\n@view_config(context=DatabaseNotAvailable)\ndef service_unavailable(exc, request):\n return httpexception_view(HTTPServiceUnavailable(), request)\n\n\n@view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef robotstxt(request):\n request.response.content_type = \"text/plain\"\n return {}\n\n\n@view_config(\n route_name=\"opensearch.xml\",\n renderer=\"opensearch.xml\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef opensearchxml(request):\n request.response.content_type = \"text/xml\"\n return {}\n\n\n@view_config(\n route_name=\"index\",\n renderer=\"index.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\", \"trending\"],\n )\n ],\n)\ndef index(request):\n project_ids = [\n r[0]\n for r in (\n request.db.query(Project.id)\n .order_by(Project.zscore.desc().nullslast(), func.random())\n .limit(5)\n .all()\n )\n ]\n release_a = aliased(\n Release,\n request.db.query(Release)\n .distinct(Release.project_id)\n .filter(Release.project_id.in_(project_ids))\n .order_by(\n Release.project_id,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .subquery(),\n )\n trending_projects = (\n request.db.query(release_a)\n .options(joinedload(release_a.project))\n .order_by(func.array_idx(project_ids, release_a.project_id))\n .all()\n )\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(5)\n .all()\n )\n\n counts = dict(\n request.db.query(RowCount.table_name, RowCount.count)\n .filter(\n RowCount.table_name.in_(\n [\n Project.__tablename__,\n Release.__tablename__,\n File.__tablename__,\n User.__tablename__,\n ]\n )\n )\n .all()\n )\n\n return {\n \"latest_releases\": latest_releases,\n \"trending_projects\": trending_projects,\n \"num_projects\": counts.get(Project.__tablename__, 0),\n \"num_releases\": counts.get(Release.__tablename__, 0),\n \"num_files\": counts.get(File.__tablename__, 0),\n \"num_users\": counts.get(User.__tablename__, 0),\n }\n\n\n@view_config(route_name=\"classifiers\", renderer=\"pages/classifiers.html\")\ndef classifiers(request):\n classifiers = (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(False))\n .order_by(Classifier.classifier)\n .all()\n )\n\n return {\"classifiers\": classifiers}\n\n\n@view_config(\n route_name=\"search\",\n renderer=\"search/results.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\"],\n )\n ],\n)\ndef search(request):\n metrics = request.find_service(IMetricsService, context=None)\n\n q = request.params.get(\"q\", \"\")\n q = q.replace(\"'\", '\"')\n\n if q:\n bool_query = gather_es_queries(q)\n\n query = request.es.query(bool_query)\n\n query = query.suggest(\"name_suggestion\", q, term={\"field\": \"name\"})\n else:\n query = request.es.query()\n\n if request.params.get(\"o\"):\n sort_key = request.params[\"o\"]\n if sort_key.startswith(\"-\"):\n sort = {sort_key[1:]: {\"order\": \"desc\", \"unmapped_type\": \"long\"}}\n else:\n sort = {sort_key: {\"unmapped_type\": \"long\"}}\n\n query = query.sort(sort)\n\n # Require match to all specified classifiers\n for classifier in request.params.getall(\"c\"):\n query = query.filter(\"terms\", classifiers=[classifier])\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\")\n\n try:\n page = ElasticsearchPage(\n query, page=page_num, url_maker=paginate_url_factory(request)\n )\n except elasticsearch.TransportError:\n metrics.increment(\"warehouse.views.search.error\")\n raise HTTPServiceUnavailable\n\n if page.page_count and page_num > page.page_count:\n raise HTTPNotFound\n\n available_filters = collections.defaultdict(list)\n\n classifiers_q = (\n request.db.query(Classifier)\n .with_entities(Classifier.classifier)\n .filter(Classifier.deprecated.is_(False))\n .filter(\n exists([release_classifiers.c.trove_id]).where(\n release_classifiers.c.trove_id == Classifier.id\n )\n )\n .order_by(Classifier.classifier)\n )\n\n for cls in classifiers_q:\n first, *_ = cls.classifier.split(\" :: \")\n available_filters[first].append(cls.classifier)\n\n def filter_key(item):\n try:\n return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]\n except ValueError:\n return 1, 0, item[0]\n\n def form_filters_tree(split_list):\n \"\"\"\n Takes a list of lists, each of them containing a filter and\n one of its children.\n Returns a dictionary, each key being a filter and each value being\n the filter's children.\n \"\"\"\n d = {}\n for l in split_list:\n current_level = d\n for part in l:\n if part not in current_level:\n current_level[part] = {}\n current_level = current_level[part]\n return d\n\n def process_available_filters():\n \"\"\"\n Processes available filters and returns a list of dictionaries.\n The value of a key in the dictionary represents its children\n \"\"\"\n sorted_filters = sorted(available_filters.items(), key=filter_key)\n output = []\n for f in sorted_filters:\n classifier_list = f[1]\n split_list = [i.split(\" :: \") for i in classifier_list]\n tree = form_filters_tree(split_list)\n output.append(tree)\n return output\n\n metrics = request.find_service(IMetricsService, context=None)\n metrics.histogram(\"warehouse.views.search.results\", page.item_count)\n\n return {\n \"page\": page,\n \"term\": q,\n \"order\": request.params.get(\"o\", \"\"),\n \"available_filters\": process_available_filters(),\n \"applied_filters\": request.params.getall(\"c\"),\n }\n\n\n@view_config(\n route_name=\"stats\",\n renderer=\"pages/stats.html\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\n@view_config(\n route_name=\"stats.json\",\n renderer=\"json\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n accept=\"application/json\",\n)\ndef stats(request):\n total_size_query = request.db.query(func.sum(File.size)).all()\n top_100_packages = (\n request.db.query(Project.name, func.sum(File.size))\n .join(Release)\n .join(File)\n .group_by(Project.name)\n .order_by(func.sum(File.size).desc())\n .limit(100)\n .all()\n )\n # Move top packages into a dict to make JSON more self describing\n top_packages = {\n pkg_name: {\"size\": pkg_bytes} for pkg_name, pkg_bytes in top_100_packages\n }\n\n return {\"total_packages_size\": total_size_query[0][0], \"top_packages\": top_packages}\n\n\n@view_config(\n route_name=\"includes.current-user-indicator\",\n renderer=\"includes/current-user-indicator.html\",\n uses_session=True,\n)\ndef current_user_indicator(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.flash-messages\",\n renderer=\"includes/flash-messages.html\",\n uses_session=True,\n)\ndef flash_messages(request):\n return {}\n\n\n@view_config(route_name=\"health\", renderer=\"string\")\ndef health(request):\n # This will ensure that we can access the database and run queries against\n # it without doing anything that will take a lock or block other queries.\n request.db.execute(\"SELECT 1\")\n\n # Nothing will actually check this, but it's a little nicer to have\n # something to return besides an empty body.\n return \"OK\"\n\n\n@view_config(route_name=\"force-status\")\ndef force_status(request):\n try:\n raise exception_response(int(request.matchdict[\"status\"]))\n except KeyError:\n raise exception_response(404) from None\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef gather_es_queries(q):\n quoted_string, unquoted_string = filter_query(q)\n must = [form_query(\"phrase\", i) for i in quoted_string] + [\n form_query(\"best_fields\", i) for i in unquoted_string\n ]\n\n bool_query = Q(\"bool\", must=must)\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(q) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n return bool_query\n",
"path": "warehouse/views.py"
}
] | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport re\n\nimport elasticsearch\n\nfrom pyramid.httpexceptions import (\n HTTPException,\n HTTPSeeOther,\n HTTPMovedPermanently,\n HTTPNotFound,\n HTTPBadRequest,\n HTTPServiceUnavailable,\n exception_response,\n)\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.renderers import render_to_response\nfrom pyramid.response import Response\nfrom pyramid.view import (\n notfound_view_config,\n forbidden_view_config,\n exception_view_config,\n view_config,\n)\nfrom elasticsearch_dsl import Q\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import aliased, joinedload\nfrom sqlalchemy.sql import exists\n\nfrom warehouse.db import DatabaseNotAvailable\nfrom warehouse.accounts import REDIRECT_FIELD_NAME\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.cache.http import add_vary, cache_control\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.models import Project, Release, File, release_classifiers\nfrom warehouse.search.queries import SEARCH_BOOSTS, SEARCH_FIELDS, SEARCH_FILTER_ORDER\nfrom warehouse.utils.row_counter import RowCount\nfrom warehouse.utils.paginate import ElasticsearchPage, paginate_url_factory\n\n\n# 403, 404, 410, 500,\n\n\n@view_config(context=HTTPException)\n@notfound_view_config(append_slash=HTTPMovedPermanently)\ndef httpexception_view(exc, request):\n # This special case exists for the easter egg that appears on the 404\n # response page. We don't generally allow youtube embeds, but we make an\n # except for this one.\n if isinstance(exc, HTTPNotFound):\n request.find_service(name=\"csp\").merge(\n {\n \"frame-src\": [\"https://www.youtube-nocookie.com\"],\n \"script-src\": [\"https://www.youtube.com\", \"https://s.ytimg.com\"],\n }\n )\n try:\n # Lightweight version of 404 page for `/simple/`\n if isinstance(exc, HTTPNotFound) and request.path.startswith(\"/simple/\"):\n response = Response(body=\"404 Not Found\", content_type=\"text/plain\")\n else:\n response = render_to_response(\n \"{}.html\".format(exc.status_code), {}, request=request\n )\n except LookupError:\n # We don't have a customized template for this error, so we'll just let\n # the default happen instead.\n return exc\n\n # Copy over the important values from our HTTPException to our new response\n # object.\n response.status = exc.status\n response.headers.extend(\n (k, v) for k, v in exc.headers.items() if k not in response.headers\n )\n\n return response\n\n\n@forbidden_view_config()\n@exception_view_config(PredicateMismatch)\ndef forbidden(exc, request, redirect_to=\"accounts.login\"):\n # If the forbidden error is because the user isn't logged in, then we'll\n # redirect them to the log in page.\n if request.authenticated_userid is None:\n url = request.route_url(\n redirect_to, _query={REDIRECT_FIELD_NAME: request.path_qs}\n )\n return HTTPSeeOther(url)\n\n # If we've reached here, then the user is logged in and they are genuinely\n # not allowed to access this page.\n return httpexception_view(exc, request)\n\n\n@forbidden_view_config(path_info=r\"^/_includes/\")\n@exception_view_config(PredicateMismatch, path_info=r\"^/_includes/\")\ndef forbidden_include(exc, request):\n # If the forbidden error is for a client-side-include, just return an empty\n # response instead of redirecting\n return Response(status=403)\n\n\n@view_config(context=DatabaseNotAvailable)\ndef service_unavailable(exc, request):\n return httpexception_view(HTTPServiceUnavailable(), request)\n\n\n@view_config(\n route_name=\"robots.txt\",\n renderer=\"robots.txt\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef robotstxt(request):\n request.response.content_type = \"text/plain\"\n return {}\n\n\n@view_config(\n route_name=\"opensearch.xml\",\n renderer=\"opensearch.xml\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=6 * 60 * 60, # 6 hours\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef opensearchxml(request):\n request.response.content_type = \"text/xml\"\n return {}\n\n\n@view_config(\n route_name=\"index\",\n renderer=\"index.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_while_revalidate=10 * 60, # 10 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\", \"trending\"],\n )\n ],\n)\ndef index(request):\n project_ids = [\n r[0]\n for r in (\n request.db.query(Project.id)\n .order_by(Project.zscore.desc().nullslast(), func.random())\n .limit(5)\n .all()\n )\n ]\n release_a = aliased(\n Release,\n request.db.query(Release)\n .distinct(Release.project_id)\n .filter(Release.project_id.in_(project_ids))\n .order_by(\n Release.project_id,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .subquery(),\n )\n trending_projects = (\n request.db.query(release_a)\n .options(joinedload(release_a.project))\n .order_by(func.array_idx(project_ids, release_a.project_id))\n .all()\n )\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(5)\n .all()\n )\n\n counts = dict(\n request.db.query(RowCount.table_name, RowCount.count)\n .filter(\n RowCount.table_name.in_(\n [\n Project.__tablename__,\n Release.__tablename__,\n File.__tablename__,\n User.__tablename__,\n ]\n )\n )\n .all()\n )\n\n return {\n \"latest_releases\": latest_releases,\n \"trending_projects\": trending_projects,\n \"num_projects\": counts.get(Project.__tablename__, 0),\n \"num_releases\": counts.get(Release.__tablename__, 0),\n \"num_files\": counts.get(File.__tablename__, 0),\n \"num_users\": counts.get(User.__tablename__, 0),\n }\n\n\n@view_config(route_name=\"classifiers\", renderer=\"pages/classifiers.html\")\ndef classifiers(request):\n classifiers = (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(False))\n .order_by(Classifier.classifier)\n .all()\n )\n\n return {\"classifiers\": classifiers}\n\n\n@view_config(\n route_name=\"search\",\n renderer=\"search/results.html\",\n decorator=[\n origin_cache(\n 1 * 60 * 60, # 1 hour\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n keys=[\"all-projects\"],\n )\n ],\n)\ndef search(request):\n metrics = request.find_service(IMetricsService, context=None)\n\n q = request.params.get(\"q\", \"\")\n q = q.replace(\"'\", '\"')\n\n if q:\n bool_query = gather_es_queries(q)\n\n query = request.es.query(bool_query)\n\n query = query.suggest(\"name_suggestion\", q, term={\"field\": \"name\"})\n else:\n query = request.es.query()\n\n if request.params.get(\"o\"):\n sort_key = request.params[\"o\"]\n if sort_key.startswith(\"-\"):\n sort = {sort_key[1:]: {\"order\": \"desc\", \"unmapped_type\": \"long\"}}\n else:\n sort = {sort_key: {\"unmapped_type\": \"long\"}}\n\n query = query.sort(sort)\n\n # Require match to all specified classifiers\n for classifier in request.params.getall(\"c\"):\n query = query.query(\"prefix\", classifiers=classifier)\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\")\n\n try:\n page = ElasticsearchPage(\n query, page=page_num, url_maker=paginate_url_factory(request)\n )\n except elasticsearch.TransportError:\n metrics.increment(\"warehouse.views.search.error\")\n raise HTTPServiceUnavailable\n\n if page.page_count and page_num > page.page_count:\n raise HTTPNotFound\n\n available_filters = collections.defaultdict(list)\n\n classifiers_q = (\n request.db.query(Classifier)\n .with_entities(Classifier.classifier)\n .filter(Classifier.deprecated.is_(False))\n .filter(\n exists([release_classifiers.c.trove_id]).where(\n release_classifiers.c.trove_id == Classifier.id\n )\n )\n .order_by(Classifier.classifier)\n )\n\n for cls in classifiers_q:\n first, *_ = cls.classifier.split(\" :: \")\n available_filters[first].append(cls.classifier)\n\n def filter_key(item):\n try:\n return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0]\n except ValueError:\n return 1, 0, item[0]\n\n def form_filters_tree(split_list):\n \"\"\"\n Takes a list of lists, each of them containing a filter and\n one of its children.\n Returns a dictionary, each key being a filter and each value being\n the filter's children.\n \"\"\"\n d = {}\n for l in split_list:\n current_level = d\n for part in l:\n if part not in current_level:\n current_level[part] = {}\n current_level = current_level[part]\n return d\n\n def process_available_filters():\n \"\"\"\n Processes available filters and returns a list of dictionaries.\n The value of a key in the dictionary represents its children\n \"\"\"\n sorted_filters = sorted(available_filters.items(), key=filter_key)\n output = []\n for f in sorted_filters:\n classifier_list = f[1]\n split_list = [i.split(\" :: \") for i in classifier_list]\n tree = form_filters_tree(split_list)\n output.append(tree)\n return output\n\n metrics = request.find_service(IMetricsService, context=None)\n metrics.histogram(\"warehouse.views.search.results\", page.item_count)\n\n return {\n \"page\": page,\n \"term\": q,\n \"order\": request.params.get(\"o\", \"\"),\n \"available_filters\": process_available_filters(),\n \"applied_filters\": request.params.getall(\"c\"),\n }\n\n\n@view_config(\n route_name=\"stats\",\n renderer=\"pages/stats.html\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\n@view_config(\n route_name=\"stats.json\",\n renderer=\"json\",\n decorator=[\n add_vary(\"Accept\"),\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n accept=\"application/json\",\n)\ndef stats(request):\n total_size_query = request.db.query(func.sum(File.size)).all()\n top_100_packages = (\n request.db.query(Project.name, func.sum(File.size))\n .join(Release)\n .join(File)\n .group_by(Project.name)\n .order_by(func.sum(File.size).desc())\n .limit(100)\n .all()\n )\n # Move top packages into a dict to make JSON more self describing\n top_packages = {\n pkg_name: {\"size\": pkg_bytes} for pkg_name, pkg_bytes in top_100_packages\n }\n\n return {\"total_packages_size\": total_size_query[0][0], \"top_packages\": top_packages}\n\n\n@view_config(\n route_name=\"includes.current-user-indicator\",\n renderer=\"includes/current-user-indicator.html\",\n uses_session=True,\n)\ndef current_user_indicator(request):\n return {}\n\n\n@view_config(\n route_name=\"includes.flash-messages\",\n renderer=\"includes/flash-messages.html\",\n uses_session=True,\n)\ndef flash_messages(request):\n return {}\n\n\n@view_config(route_name=\"health\", renderer=\"string\")\ndef health(request):\n # This will ensure that we can access the database and run queries against\n # it without doing anything that will take a lock or block other queries.\n request.db.execute(\"SELECT 1\")\n\n # Nothing will actually check this, but it's a little nicer to have\n # something to return besides an empty body.\n return \"OK\"\n\n\n@view_config(route_name=\"force-status\")\ndef force_status(request):\n try:\n raise exception_response(int(request.matchdict[\"status\"]))\n except KeyError:\n raise exception_response(404) from None\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef gather_es_queries(q):\n quoted_string, unquoted_string = filter_query(q)\n must = [form_query(\"phrase\", i) for i in quoted_string] + [\n form_query(\"best_fields\", i) for i in unquoted_string\n ]\n\n bool_query = Q(\"bool\", must=must)\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(q) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n return bool_query\n",
"path": "warehouse/views.py"
}
] | diff --git a/tests/unit/test_views.py b/tests/unit/test_views.py
index 3196bd711dba..4d8a3e9eab6a 100644
--- a/tests/unit/test_views.py
+++ b/tests/unit/test_views.py
@@ -410,6 +410,7 @@ def test_with_classifiers(self, monkeypatch, db_request, metrics, page):
es_query = pretend.stub(
suggest=pretend.call_recorder(lambda *a, **kw: es_query),
filter=pretend.call_recorder(lambda *a, **kw: es_query),
+ query=pretend.call_recorder(lambda *a, **kw: es_query),
sort=pretend.call_recorder(lambda *a, **kw: es_query),
)
db_request.es = pretend.stub(
@@ -460,9 +461,9 @@ def test_with_classifiers(self, monkeypatch, db_request, metrics, page):
assert es_query.suggest.calls == [
pretend.call("name_suggestion", params["q"], term={"field": "name"})
]
- assert es_query.filter.calls == [
- pretend.call("terms", classifiers=["foo :: bar"]),
- pretend.call("terms", classifiers=["fiz :: buz"]),
+ assert es_query.query.calls == [
+ pretend.call("prefix", classifiers="foo :: bar"),
+ pretend.call("prefix", classifiers="fiz :: buz"),
]
assert metrics.histogram.calls == [
pretend.call("warehouse.views.search.results", 1000)
diff --git a/warehouse/views.py b/warehouse/views.py
index 510abf41385d..2ae52cee3bba 100644
--- a/warehouse/views.py
+++ b/warehouse/views.py
@@ -276,7 +276,7 @@ def search(request):
# Require match to all specified classifiers
for classifier in request.params.getall("c"):
- query = query.filter("terms", classifiers=[classifier])
+ query = query.query("prefix", classifiers=classifier)
try:
page_num = int(request.params.get("page", 1))
|
ansible__ansible-42038 | "pause" module fails with NameError when "curses" is not installed
<!---
Verify first that your issue/request is not already reported on GitHub.
THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED.
Also test if the latest release, and devel branch are affected too.
ALWAYS add information AFTER (OUTSIDE) these html comments.
Otherwise it may end up being automatically closed by our bot. -->
##### SUMMARY
The "pause" module fails with a NameError when the curses Python module is not available.
```
Traceback (most recent call last):
File "/usr/local/python/bin/ansible-playbook", line 118, in <module>
exit_code = cli.run()
File "/usr/local/python/lib/python2.7/site-packages/ansible/cli/playbook.py", line 122, in run
results = pbex.run()
File "/usr/local/python/lib/python2.7/site-packages/ansible/executor/playbook_executor.py", line 159, in run
result = self._tqm.run(play=play)
File "/usr/local/python/lib/python2.7/site-packages/ansible/executor/task_queue_manager.py", line 289, in run
play_return = strategy.run(iterator, play_context)
File "/usr/local/python/lib/python2.7/site-packages/ansible/plugins/strategy/linear.py", line 247, in run
action = action_loader.get(task.action, class_only=True)
File "/usr/local/python/lib/python2.7/site-packages/ansible/plugins/loader.py", line 366, in get
self._module_cache[path] = self._load_module_source(name, path)
File "/usr/local/python/lib/python2.7/site-packages/ansible/plugins/loader.py", line 345, in _load_module_source
module = imp.load_source(full_name, path, module_file)
File "/usr/local/python/lib/python2.7/site-packages/ansible/plugins/action/pause.py", line 44, in <module>
except (ImportError, curses.error):
NameError: name 'curses' is not defined
```
Caused by this code block introduced by https://github.com/ansible/ansible/pull/40134 to fix #35372:
```python
try:
import curses
curses.setupterm()
HAS_CURSES = True
except (ImportError, curses.error):
HAS_CURSES = False
```
A better fix would be to nest the curses.error try-catch within the block.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
plugins/action/pause.py
##### ANSIBLE VERSION
ansible 2.5.4
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/etc/ansible/modules']
ansible python module location = /usr/local/python/lib/python2.7/site-packages/ansible
executable location = /usr/local/python/bin/ansible
python version = 2.7.10 (default, Oct 11 2015, 23:13:31) [GCC 4.8.3 20140911 (Red Hat 4.8.3-9)]
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
On a machine without "curses", run:
> $ ansible localhost -m pause -a 'seconds=1'
> ERROR! Unexpected Exception, this is probably a bug: name 'curses' is not defined
> to see the full traceback, use -vvv
##### EXPECTED RESULTS
N/A
##### ACTUAL RESULTS
See above.
| [
{
"content": "# Copyright 2012, Tim Bielawa <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport datetime\nimport signal\nimport sys\nimport termios\nimport time\nimport tty\n\nfrom os import isatty\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils._text import to_text, to_native\nfrom ansible.module_utils.parsing.convert_bool import boolean\nfrom ansible.module_utils.six import PY3\nfrom ansible.plugins.action import ActionBase\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\ntry:\n import curses\n curses.setupterm()\n HAS_CURSES = True\nexcept (ImportError, curses.error):\n HAS_CURSES = False\n\nif HAS_CURSES:\n MOVE_TO_BOL = curses.tigetstr('cr')\n CLEAR_TO_EOL = curses.tigetstr('el')\nelse:\n MOVE_TO_BOL = b'\\r'\n CLEAR_TO_EOL = b'\\x1b[K'\n\n\nclass AnsibleTimeoutExceeded(Exception):\n pass\n\n\ndef timeout_handler(signum, frame):\n raise AnsibleTimeoutExceeded\n\n\ndef clear_line(stdout):\n stdout.write(b'\\x1b[%s' % MOVE_TO_BOL)\n stdout.write(b'\\x1b[%s' % CLEAR_TO_EOL)\n\n\nclass ActionModule(ActionBase):\n ''' pauses execution for a length or time, or until input is received '''\n\n PAUSE_TYPES = ['seconds', 'minutes', 'prompt', 'echo', '']\n BYPASS_HOST_LOOP = True\n\n def run(self, tmp=None, task_vars=None):\n ''' run the pause action module '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n duration_unit = 'minutes'\n prompt = None\n seconds = None\n echo = True\n echo_prompt = ''\n result.update(dict(\n changed=False,\n rc=0,\n stderr='',\n stdout='',\n start=None,\n stop=None,\n delta=None,\n echo=echo\n ))\n\n if not set(self._task.args.keys()) <= set(self.PAUSE_TYPES):\n result['failed'] = True\n result['msg'] = \"Invalid argument given. Must be one of: %s\" % \", \".join(self.PAUSE_TYPES)\n return result\n\n # Should keystrokes be echoed to stdout?\n if 'echo' in self._task.args:\n try:\n echo = boolean(self._task.args['echo'])\n except TypeError as e:\n result['failed'] = True\n result['msg'] = to_native(e)\n return result\n\n # Add a note saying the output is hidden if echo is disabled\n if not echo:\n echo_prompt = ' (output is hidden)'\n\n # Is 'prompt' a key in 'args'?\n if 'prompt' in self._task.args:\n prompt = \"[%s]\\n%s%s:\" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt)\n else:\n # If no custom prompt is specified, set a default prompt\n prompt = \"[%s]\\n%s%s:\" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)\n\n # Are 'minutes' or 'seconds' keys that exist in 'args'?\n if 'minutes' in self._task.args or 'seconds' in self._task.args:\n try:\n if 'minutes' in self._task.args:\n # The time() command operates in seconds so we need to\n # recalculate for minutes=X values.\n seconds = int(self._task.args['minutes']) * 60\n else:\n seconds = int(self._task.args['seconds'])\n duration_unit = 'seconds'\n\n except ValueError as e:\n result['failed'] = True\n result['msg'] = u\"non-integer value given for prompt duration:\\n%s\" % to_text(e)\n return result\n\n ########################################################################\n # Begin the hard work!\n\n start = time.time()\n result['start'] = to_text(datetime.datetime.now())\n result['user_input'] = b''\n\n fd = None\n old_settings = None\n try:\n if seconds is not None:\n if seconds < 1:\n seconds = 1\n\n # setup the alarm handler\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(seconds)\n\n # show the timer and control prompts\n display.display(\"Pausing for %d seconds%s\" % (seconds, echo_prompt))\n display.display(\"(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\\r\"),\n\n # show the prompt specified in the task\n if 'prompt' in self._task.args:\n display.display(prompt)\n\n else:\n display.display(prompt)\n\n # save the attributes on the existing (duped) stdin so\n # that we can restore them later after we set raw mode\n fd = None\n try:\n if PY3:\n stdin = self._connection._new_stdin.buffer\n stdout = sys.stdout.buffer\n else:\n stdin = self._connection._new_stdin\n stdout = sys.stdout\n fd = stdin.fileno()\n except (ValueError, AttributeError):\n # ValueError: someone is using a closed file descriptor as stdin\n # AttributeError: someone is using a null file descriptor as stdin on windoez\n stdin = None\n\n if fd is not None:\n if isatty(fd):\n\n # grab actual Ctrl+C sequence\n try:\n intr = termios.tcgetattr(fd)[6][termios.VINTR]\n except Exception:\n # unsupported/not present, use default\n intr = b'\\x03' # value for Ctrl+C\n\n # get backspace sequences\n try:\n backspace = termios.tcgetattr(fd)[6][termios.VERASE]\n except Exception:\n backspace = [b'\\x7f', b'\\x08']\n\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n tty.setraw(stdout.fileno())\n\n # Only echo input if no timeout is specified\n if not seconds and echo:\n new_settings = termios.tcgetattr(fd)\n new_settings[3] = new_settings[3] | termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, new_settings)\n\n # flush the buffer to make sure no previous key presses\n # are read in below\n termios.tcflush(stdin, termios.TCIFLUSH)\n\n while True:\n try:\n if fd is not None:\n key_pressed = stdin.read(1)\n if key_pressed == intr: # value for Ctrl+C\n clear_line(stdout)\n raise KeyboardInterrupt\n\n if not seconds:\n if fd is None or not isatty(fd):\n display.warning(\"Not waiting for response to prompt as stdin is not interactive\")\n break\n\n # read key presses and act accordingly\n if key_pressed in (b'\\r', b'\\n'):\n clear_line(stdout)\n break\n elif key_pressed in backspace:\n # delete a character if backspace is pressed\n result['user_input'] = result['user_input'][:-1]\n clear_line(stdout)\n if echo:\n stdout.write(result['user_input'])\n stdout.flush()\n else:\n result['user_input'] += key_pressed\n\n except KeyboardInterrupt:\n signal.alarm(0)\n display.display(\"Press 'C' to continue the play or 'A' to abort \\r\"),\n if self._c_or_a(stdin):\n clear_line(stdout)\n break\n\n clear_line(stdout)\n\n raise AnsibleError('user requested abort!')\n\n except AnsibleTimeoutExceeded:\n # this is the exception we expect when the alarm signal\n # fires, so we simply ignore it to move into the cleanup\n pass\n finally:\n # cleanup and save some information\n # restore the old settings for the duped stdin fd\n if not(None in (fd, old_settings)) and isatty(fd):\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\n duration = time.time() - start\n result['stop'] = to_text(datetime.datetime.now())\n result['delta'] = int(duration)\n\n if duration_unit == 'minutes':\n duration = round(duration / 60.0, 2)\n else:\n duration = round(duration, 2)\n result['stdout'] = \"Paused for %s %s\" % (duration, duration_unit)\n\n result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')\n return result\n\n def _c_or_a(self, stdin):\n while True:\n key_pressed = stdin.read(1)\n if key_pressed.lower() == b'a':\n return False\n elif key_pressed.lower() == b'c':\n return True\n",
"path": "lib/ansible/plugins/action/pause.py"
}
] | [
{
"content": "# Copyright 2012, Tim Bielawa <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport datetime\nimport signal\nimport sys\nimport termios\nimport time\nimport tty\n\nfrom os import isatty\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils._text import to_text, to_native\nfrom ansible.module_utils.parsing.convert_bool import boolean\nfrom ansible.module_utils.six import PY3\nfrom ansible.plugins.action import ActionBase\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\ntry:\n import curses\n\n # Nest the try except since curses.error is not available if curses did not import\n try:\n curses.setupterm()\n HAS_CURSES = True\n except curses.error:\n HAS_CURSES = False\nexcept ImportError:\n HAS_CURSES = False\n\nif HAS_CURSES:\n MOVE_TO_BOL = curses.tigetstr('cr')\n CLEAR_TO_EOL = curses.tigetstr('el')\nelse:\n MOVE_TO_BOL = b'\\r'\n CLEAR_TO_EOL = b'\\x1b[K'\n\n\nclass AnsibleTimeoutExceeded(Exception):\n pass\n\n\ndef timeout_handler(signum, frame):\n raise AnsibleTimeoutExceeded\n\n\ndef clear_line(stdout):\n stdout.write(b'\\x1b[%s' % MOVE_TO_BOL)\n stdout.write(b'\\x1b[%s' % CLEAR_TO_EOL)\n\n\nclass ActionModule(ActionBase):\n ''' pauses execution for a length or time, or until input is received '''\n\n PAUSE_TYPES = ['seconds', 'minutes', 'prompt', 'echo', '']\n BYPASS_HOST_LOOP = True\n\n def run(self, tmp=None, task_vars=None):\n ''' run the pause action module '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n duration_unit = 'minutes'\n prompt = None\n seconds = None\n echo = True\n echo_prompt = ''\n result.update(dict(\n changed=False,\n rc=0,\n stderr='',\n stdout='',\n start=None,\n stop=None,\n delta=None,\n echo=echo\n ))\n\n if not set(self._task.args.keys()) <= set(self.PAUSE_TYPES):\n result['failed'] = True\n result['msg'] = \"Invalid argument given. Must be one of: %s\" % \", \".join(self.PAUSE_TYPES)\n return result\n\n # Should keystrokes be echoed to stdout?\n if 'echo' in self._task.args:\n try:\n echo = boolean(self._task.args['echo'])\n except TypeError as e:\n result['failed'] = True\n result['msg'] = to_native(e)\n return result\n\n # Add a note saying the output is hidden if echo is disabled\n if not echo:\n echo_prompt = ' (output is hidden)'\n\n # Is 'prompt' a key in 'args'?\n if 'prompt' in self._task.args:\n prompt = \"[%s]\\n%s%s:\" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt)\n else:\n # If no custom prompt is specified, set a default prompt\n prompt = \"[%s]\\n%s%s:\" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)\n\n # Are 'minutes' or 'seconds' keys that exist in 'args'?\n if 'minutes' in self._task.args or 'seconds' in self._task.args:\n try:\n if 'minutes' in self._task.args:\n # The time() command operates in seconds so we need to\n # recalculate for minutes=X values.\n seconds = int(self._task.args['minutes']) * 60\n else:\n seconds = int(self._task.args['seconds'])\n duration_unit = 'seconds'\n\n except ValueError as e:\n result['failed'] = True\n result['msg'] = u\"non-integer value given for prompt duration:\\n%s\" % to_text(e)\n return result\n\n ########################################################################\n # Begin the hard work!\n\n start = time.time()\n result['start'] = to_text(datetime.datetime.now())\n result['user_input'] = b''\n\n fd = None\n old_settings = None\n try:\n if seconds is not None:\n if seconds < 1:\n seconds = 1\n\n # setup the alarm handler\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(seconds)\n\n # show the timer and control prompts\n display.display(\"Pausing for %d seconds%s\" % (seconds, echo_prompt))\n display.display(\"(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\\r\"),\n\n # show the prompt specified in the task\n if 'prompt' in self._task.args:\n display.display(prompt)\n\n else:\n display.display(prompt)\n\n # save the attributes on the existing (duped) stdin so\n # that we can restore them later after we set raw mode\n fd = None\n try:\n if PY3:\n stdin = self._connection._new_stdin.buffer\n stdout = sys.stdout.buffer\n else:\n stdin = self._connection._new_stdin\n stdout = sys.stdout\n fd = stdin.fileno()\n except (ValueError, AttributeError):\n # ValueError: someone is using a closed file descriptor as stdin\n # AttributeError: someone is using a null file descriptor as stdin on windoez\n stdin = None\n\n if fd is not None:\n if isatty(fd):\n\n # grab actual Ctrl+C sequence\n try:\n intr = termios.tcgetattr(fd)[6][termios.VINTR]\n except Exception:\n # unsupported/not present, use default\n intr = b'\\x03' # value for Ctrl+C\n\n # get backspace sequences\n try:\n backspace = termios.tcgetattr(fd)[6][termios.VERASE]\n except Exception:\n backspace = [b'\\x7f', b'\\x08']\n\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n tty.setraw(stdout.fileno())\n\n # Only echo input if no timeout is specified\n if not seconds and echo:\n new_settings = termios.tcgetattr(fd)\n new_settings[3] = new_settings[3] | termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, new_settings)\n\n # flush the buffer to make sure no previous key presses\n # are read in below\n termios.tcflush(stdin, termios.TCIFLUSH)\n\n while True:\n try:\n if fd is not None:\n key_pressed = stdin.read(1)\n if key_pressed == intr: # value for Ctrl+C\n clear_line(stdout)\n raise KeyboardInterrupt\n\n if not seconds:\n if fd is None or not isatty(fd):\n display.warning(\"Not waiting for response to prompt as stdin is not interactive\")\n break\n\n # read key presses and act accordingly\n if key_pressed in (b'\\r', b'\\n'):\n clear_line(stdout)\n break\n elif key_pressed in backspace:\n # delete a character if backspace is pressed\n result['user_input'] = result['user_input'][:-1]\n clear_line(stdout)\n if echo:\n stdout.write(result['user_input'])\n stdout.flush()\n else:\n result['user_input'] += key_pressed\n\n except KeyboardInterrupt:\n signal.alarm(0)\n display.display(\"Press 'C' to continue the play or 'A' to abort \\r\"),\n if self._c_or_a(stdin):\n clear_line(stdout)\n break\n\n clear_line(stdout)\n\n raise AnsibleError('user requested abort!')\n\n except AnsibleTimeoutExceeded:\n # this is the exception we expect when the alarm signal\n # fires, so we simply ignore it to move into the cleanup\n pass\n finally:\n # cleanup and save some information\n # restore the old settings for the duped stdin fd\n if not(None in (fd, old_settings)) and isatty(fd):\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\n duration = time.time() - start\n result['stop'] = to_text(datetime.datetime.now())\n result['delta'] = int(duration)\n\n if duration_unit == 'minutes':\n duration = round(duration / 60.0, 2)\n else:\n duration = round(duration, 2)\n result['stdout'] = \"Paused for %s %s\" % (duration, duration_unit)\n\n result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')\n return result\n\n def _c_or_a(self, stdin):\n while True:\n key_pressed = stdin.read(1)\n if key_pressed.lower() == b'a':\n return False\n elif key_pressed.lower() == b'c':\n return True\n",
"path": "lib/ansible/plugins/action/pause.py"
}
] | diff --git a/changelogs/fragments/pause-try-except-curses.yaml b/changelogs/fragments/pause-try-except-curses.yaml
new file mode 100644
index 00000000000000..1e8316f9a0de46
--- /dev/null
+++ b/changelogs/fragments/pause-try-except-curses.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - pause - nest try except when importing curses to gracefully fail if curses is not present (https://github.com/ansible/ansible/issues/42004)
diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
index bdf9c9a51c893d..09efbf641ca276 100644
--- a/lib/ansible/plugins/action/pause.py
+++ b/lib/ansible/plugins/action/pause.py
@@ -39,9 +39,14 @@
try:
import curses
- curses.setupterm()
- HAS_CURSES = True
-except (ImportError, curses.error):
+
+ # Nest the try except since curses.error is not available if curses did not import
+ try:
+ curses.setupterm()
+ HAS_CURSES = True
+ except curses.error:
+ HAS_CURSES = False
+except ImportError:
HAS_CURSES = False
if HAS_CURSES:
|
jupyterhub__jupyterhub-108 | Missing zmq and jsonschema
It seems like two additional dependencies are missing.
``` bash
Traceback (most recent call last):
File "/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/utils/zmqrelated.py", line 35, in check_for_zmq
import zmq
ImportError: No module named 'zmq'
```
``` bash
Traceback (most recent call last):
File "/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/nbformat/validator.py", line 10, in <module>
from jsonschema import ValidationError
ImportError: No module named 'jsonschema'
```
| [
{
"content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: Jupyter Hub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\n\nfrom glob import glob\n\nfrom distutils.core import setup\nfrom subprocess import check_call\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter')\nstatic = pjoin(share_jupyter, 'static')\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here) + 1\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n version = ns['__version__'],\n description = \"\"\"JupyterHub: A multi-user server for Jupyter notebooks\"\"\",\n long_description = \"\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.install import install\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n \n def run(self):\n try:\n check_call(['bower', 'install', '--allow-root'])\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install bower with `npm install -g bower`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def run(self):\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n try:\n check_call([\n 'lessc', '-x', '--verbose',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ])\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install less with `npm install -g less`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n# ensure bower is run as part of install\ninstall.sub_commands.insert(0, ('js', None))\ninstall.sub_commands.insert(1, ('css', None))\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n}\n\n\n# setuptools requirements\n\nif 'setuptools' in sys.modules:\n setup_args['zip_safe'] = False\n from setuptools.command.develop import develop\n class develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\n setup_args['cmdclass']['develop'] = develop_js_css\n setup_args['install_requires'] = install_requires = []\n\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith(('-e', '#')):\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: Jupyter Hub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\n\nfrom glob import glob\n\nfrom distutils.core import setup\nfrom subprocess import check_call\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter')\nstatic = pjoin(share_jupyter, 'static')\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here) + 1\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n version = ns['__version__'],\n description = \"\"\"JupyterHub: A multi-user server for Jupyter notebooks\"\"\",\n long_description = \"\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.install import install\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n \n def run(self):\n try:\n check_call(['bower', 'install', '--allow-root'])\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install bower with `npm install -g bower`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def run(self):\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n try:\n check_call([\n 'lessc', '-x', '--verbose',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ])\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install less with `npm install -g less`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n# ensure bower is run as part of install\ninstall.sub_commands.insert(0, ('js', None))\ninstall.sub_commands.insert(1, ('css', None))\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n}\n\n\n# setuptools requirements\n\nif 'setuptools' in sys.modules:\n setup_args['zip_safe'] = False\n from setuptools.command.develop import develop\n class develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\n setup_args['cmdclass']['develop'] = develop_js_css\n setup_args['install_requires'] = install_requires = []\n\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index 19e55f0b43..218f6a966f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
--e git+https://github.com/ipython/ipython.git#egg=ipython[notebook]
+git+https://github.com/ipython/ipython.git#egg=ipython[notebook]
tornado>=4
jinja2
simplepam
diff --git a/setup.py b/setup.py
index 85da9bcc3d..d1eda5af35 100755
--- a/setup.py
+++ b/setup.py
@@ -190,7 +190,7 @@ def run(self):
with open('requirements.txt') as f:
for line in f.readlines():
req = line.strip()
- if not req or req.startswith(('-e', '#')):
+ if not req or req.startswith('#') or '://' in req:
continue
install_requires.append(req)
|
ktbyers__netmiko-3108 | SSHDetect - OneAccess device not detected
Hi Kirk,
The [SSHDetect](https://github.com/ktbyers/netmiko/blob/develop/netmiko/ssh_autodetect.py#L309) class is not able to detect OneAccess equipment.
This is due to missing entry in the SSH_MAPPER_DICT dictionnary.
The following entry should make it:
```
"oneaccess_oneos": {
"cmd": "show version",
"search_patterns": [r"OneOS"],
"priority": 99,
"dispatch": "_autodetect_std",
},
```
Do you mind if I send a pull request for this ?
| [
{
"content": "\"\"\"\nThe ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate\na new SSH connection with a remote host. This auto-detection is based on a unique class called\n**SSHDetect**.\n\nNotes\n-----\n\nThe **SSHDetect** class is instantiated using the same parameters than a standard Netmiko\nconnection (see the *netmiko.ssh_dispatacher.ConnectHandler* function). The only acceptable value\nfor the 'device_type' argument is 'autodetect'.\n\nThe auto-detection is solely based on *SSH_MAPPER_BASE*. The keys are the name of\nthe 'device_type' supported for auto-detection and the value is another dictionary describing how\nto handle the auto-detection.\n\n* \"cmd\" : The command to send to the remote device. **The command output must not require paging.**\n* \"search_patterns\" : A list of regex to compare with the output of the command\n* \"priority\" : An integer (0-99) which specifies the confidence of the match above\n* \"dispatch\" : The function to call to try the autodetection (per default SSHDetect._autodetect_std)\n\nExamples\n--------\n\n# Auto-detection section\n>>> from netmiko.ssh_autodetect import SSHDetect\n>>> from netmiko.ssh_dispatcher import ConnectHandler\n>>> remote_device = {'device_type': 'autodetect',\n 'host': 'remote.host',\n 'username': 'test',\n 'password': 'foo'}\n>>> guesser = SSHDetect(**remote_device)\n>>> best_match = guesser.autodetect()\n>>> print(best_match) # Name of the best device_type to use further\n>>> print(guesser.potential_matches) # Dictionary of the whole matching result\n\n# Netmiko connection creation section\n>>> remote_device['device_type'] = best_match\n>>> connection = ConnectHandler(**remote_device)\n\"\"\"\nfrom typing import Any, List, Optional, Union, Dict\nimport re\nimport time\n\nimport paramiko\n\nfrom netmiko.ssh_dispatcher import ConnectHandler\nfrom netmiko.base_connection import BaseConnection\n\n\n# 'dispatch' key is the SSHDetect method to call. dispatch key will be popped off dictionary\n# remaining keys indicate kwargs that will be passed to dispatch method.\n# Note, the 'cmd' needs to avoid output paging.\nSSH_MAPPER_DICT = {\n \"alcatel_aos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"Alcatel-Lucent\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"alcatel_sros\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\"Nokia\", \"Alcatel\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"apresia_aeos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\"Apresia\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"arista_eos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Arista\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"arris_cer\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"CER\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"casa_cmts\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Casa\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ciena_saos\": {\n \"cmd\": \"software show\",\n \"search_patterns\": [r\"saos\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_asa\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Adaptive Security Appliance\", r\"Cisco ASA\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_ios\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n \"Cisco IOS Software\",\n \"Cisco Internetwork Operating System Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_xe\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco IOS XE Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_nxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Nexus Operating System\", r\"NX-OS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_xr\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco IOS XR\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_force10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Real Time Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os9\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\n r\"Dell Application Software Version: 9\",\n r\"Dell Networking OS Version : 9\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Dell EMC Networking OS10.Enterprise\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_powerconnect\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"PowerConnect\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_tmsh\": {\n \"cmd\": \"show sys version\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_linux\": {\n \"cmd\": \"cat /etc/issue\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"hp_comware\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\"HPE Comware\", \"HP Comware\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"huawei\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\n r\"Huawei Technologies\",\n r\"Huawei Versatile Routing Platform Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"juniper_junos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n r\"JUNOS Software Release\",\n r\"JUNOS .+ Software\",\n r\"JUNOS OS Kernel\",\n r\"JUNOS Base Version\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"linux\": {\n \"cmd\": \"uname -a\",\n \"search_patterns\": [r\"Linux\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ericsson_ipos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Ericsson IPOS Version\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_exos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"ExtremeXOS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_netiron\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"(NetIron|MLX)\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_slx\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"SLX-OS Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_tierra\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"TierraOS Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ubiquiti_edgeswitch\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"EdgeSwitch\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_wlc\": {\n \"cmd\": \"\",\n \"dispatch\": \"_autodetect_remote_version\",\n \"search_patterns\": [r\"CISCO_WLC\"],\n \"priority\": 99,\n },\n \"cisco_wlc_85\": {\n \"cmd\": \"show inventory\",\n \"dispatch\": \"_autodetect_std\",\n \"search_patterns\": [r\"Cisco.*Wireless.*Controller\"],\n \"priority\": 99,\n },\n \"mellanox_mlnxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Onyx\", r\"SX_PPC_M460EX\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"yamaha\": {\n \"cmd\": \"show copyright\",\n \"search_patterns\": [r\"Yamaha Corporation\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"fortinet\": {\n \"cmd\": \"get system status\",\n \"search_patterns\": [r\"FortiOS\", r\"FortiGate\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"paloalto_panos\": {\n \"cmd\": \"show system info\",\n \"search_patterns\": [r\"model:\\s+PA\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"supermicro_smis\": {\n \"cmd\": \"show system info\",\n \"search_patterns\": [r\"Super Micro Computer\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"flexvnf\": {\n \"cmd\": \"show system package-info\",\n \"search_patterns\": [r\"Versa FlexVNF\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_viptela\": {\n \"cmd\": \"show system status\",\n \"search_patterns\": [r\"Viptela, Inc\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n}\n\n# Sort SSH_MAPPER_DICT such that the most common commands are first\ncmd_count: Dict[str, int] = {}\nfor k, v in SSH_MAPPER_DICT.items():\n my_cmd = v[\"cmd\"]\n assert isinstance(my_cmd, str)\n count = cmd_count.setdefault(my_cmd, 0)\n cmd_count[my_cmd] = count + 1\ncmd_count = {k: v for k, v in sorted(cmd_count.items(), key=lambda item: item[1])}\n\n# SSH_MAPPER_BASE is a list\nSSH_MAPPER_BASE = sorted(\n SSH_MAPPER_DICT.items(), key=lambda item: int(cmd_count[str(item[1][\"cmd\"])])\n)\nSSH_MAPPER_BASE.reverse()\n\n\nclass SSHDetect(object):\n \"\"\"\n The SSHDetect class tries to automatically guess the device type running on the SSH remote end.\n Be careful that the kwargs 'device_type' must be set to 'autodetect', otherwise it won't work at\n all.\n\n Parameters\n ----------\n *args : list\n The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n *kwargs : dict\n The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n\n Attributes\n ----------\n connection : netmiko.terminal_server.TerminalServerSSH\n A basic connection to the remote SSH end.\n potential_matches: dict\n Dict of (device_type, accuracy) that is populated through an interaction with the\n remote end.\n\n Methods\n -------\n autodetect()\n Try to determine the device type.\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n Constructor of the SSHDetect class\n \"\"\"\n if kwargs[\"device_type\"] != \"autodetect\":\n raise ValueError(\"The connection device_type must be 'autodetect'\")\n # Always set cmd_verify to False for autodetect\n kwargs[\"global_cmd_verify\"] = False\n self.connection = ConnectHandler(*args, **kwargs)\n\n # Add additional sleep to let the login complete.\n time.sleep(3)\n\n # Call the _test_channel_read() in base to clear initial data\n output = BaseConnection._test_channel_read(self.connection)\n self.initial_buffer = output\n self.potential_matches: Dict[str, int] = {}\n self._results_cache: Dict[str, str] = {}\n\n def autodetect(self) -> Union[str, None]:\n \"\"\"\n Try to guess the best 'device_type' based on patterns defined in SSH_MAPPER_BASE\n\n Returns\n -------\n best_match : str or None\n The device type that is currently the best to use to interact with the device\n \"\"\"\n for device_type, autodetect_dict in SSH_MAPPER_BASE:\n tmp_dict = autodetect_dict.copy()\n call_method = tmp_dict.pop(\"dispatch\")\n assert isinstance(call_method, str)\n autodetect_method = getattr(self, call_method)\n accuracy = autodetect_method(**tmp_dict)\n if accuracy:\n self.potential_matches[device_type] = accuracy\n if accuracy >= 99: # Stop the loop as we are sure of our match\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n # WLC needs two different auto-dectect solutions\n if \"cisco_wlc_85\" in best_match[0]:\n best_match[0] = (\"cisco_wlc\", 99)\n\n self.connection.disconnect()\n return best_match[0][0]\n\n if not self.potential_matches:\n self.connection.disconnect()\n return None\n\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n self.connection.disconnect()\n return best_match[0][0]\n\n def _send_command(self, cmd: str = \"\") -> str:\n \"\"\"\n Handle reading/writing channel directly. It is also sanitizing the output received.\n\n Parameters\n ----------\n cmd : str, optional\n The command to send to the remote device (default : \"\", just send a new line)\n\n Returns\n -------\n output : str\n The output from the command sent\n \"\"\"\n self.connection.write_channel(cmd + \"\\n\")\n time.sleep(1)\n output = self.connection.read_channel_timing(last_read=6.0)\n output = self.connection.strip_backspaces(output)\n return output\n\n def _send_command_wrapper(self, cmd: str) -> str:\n \"\"\"\n Send command to the remote device with a caching feature to avoid sending the same command\n twice based on the SSH_MAPPER_BASE dict cmd key.\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n\n Returns\n -------\n response : str\n The response from the remote device.\n \"\"\"\n cached_results = self._results_cache.get(cmd)\n if not cached_results:\n response = self._send_command(cmd)\n self._results_cache[cmd] = response\n return response\n else:\n return cached_results\n\n def _autodetect_remote_version(\n self,\n search_patterns: Optional[List[str]] = None,\n re_flags: int = re.IGNORECASE,\n priority: int = 99,\n **kwargs: Any\n ) -> int:\n \"\"\"\n Method to try auto-detect the device type, by matching a regular expression on the reported\n remote version of the SSH server.\n\n Parameters\n ----------\n search_patterns : list\n A list of regular expression to look for in the reported remote SSH version\n (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [r\"^$\"]\n\n if not search_patterns:\n return 0\n\n try:\n remote_conn = self.connection.remote_conn\n assert isinstance(remote_conn, paramiko.Channel)\n assert remote_conn.transport is not None\n remote_version = remote_conn.transport.remote_version\n for pattern in invalid_responses:\n match = re.search(pattern, remote_version, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, remote_version, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n\n def _autodetect_std(\n self,\n cmd: str = \"\",\n search_patterns: Optional[List[str]] = None,\n re_flags: int = re.IGNORECASE,\n priority: int = 99,\n ) -> int:\n \"\"\"\n Standard method to try to auto-detect the device type. This method will be called for each\n device_type present in SSH_MAPPER_BASE dict ('dispatch' key). It will attempt to send a\n command and match some regular expression from the ouput for each entry in SSH_MAPPER_BASE\n ('cmd' and 'search_pattern' keys).\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n search_patterns : list\n A list of regular expression to look for in the command's output (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [\n r\"% Invalid input detected\",\n r\"syntax error, expecting\",\n r\"Error: Unrecognized command\",\n r\"%Error\",\n r\"command not found\",\n r\"Syntax Error: unexpected argument\",\n r\"% Unrecognized command found at\",\n ]\n if not cmd or not search_patterns:\n return 0\n try:\n # _send_command_wrapper will use already cached results if available\n response = self._send_command_wrapper(cmd)\n # Look for error conditions in output\n for pattern in invalid_responses:\n match = re.search(pattern, response, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, response, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n",
"path": "netmiko/ssh_autodetect.py"
}
] | [
{
"content": "\"\"\"\nThe ssh_autodetect module is used to auto-detect the netmiko device_type to use to further initiate\na new SSH connection with a remote host. This auto-detection is based on a unique class called\n**SSHDetect**.\n\nNotes\n-----\n\nThe **SSHDetect** class is instantiated using the same parameters than a standard Netmiko\nconnection (see the *netmiko.ssh_dispatacher.ConnectHandler* function). The only acceptable value\nfor the 'device_type' argument is 'autodetect'.\n\nThe auto-detection is solely based on *SSH_MAPPER_BASE*. The keys are the name of\nthe 'device_type' supported for auto-detection and the value is another dictionary describing how\nto handle the auto-detection.\n\n* \"cmd\" : The command to send to the remote device. **The command output must not require paging.**\n* \"search_patterns\" : A list of regex to compare with the output of the command\n* \"priority\" : An integer (0-99) which specifies the confidence of the match above\n* \"dispatch\" : The function to call to try the autodetection (per default SSHDetect._autodetect_std)\n\nExamples\n--------\n\n# Auto-detection section\n>>> from netmiko.ssh_autodetect import SSHDetect\n>>> from netmiko.ssh_dispatcher import ConnectHandler\n>>> remote_device = {'device_type': 'autodetect',\n 'host': 'remote.host',\n 'username': 'test',\n 'password': 'foo'}\n>>> guesser = SSHDetect(**remote_device)\n>>> best_match = guesser.autodetect()\n>>> print(best_match) # Name of the best device_type to use further\n>>> print(guesser.potential_matches) # Dictionary of the whole matching result\n\n# Netmiko connection creation section\n>>> remote_device['device_type'] = best_match\n>>> connection = ConnectHandler(**remote_device)\n\"\"\"\nfrom typing import Any, List, Optional, Union, Dict\nimport re\nimport time\n\nimport paramiko\n\nfrom netmiko.ssh_dispatcher import ConnectHandler\nfrom netmiko.base_connection import BaseConnection\n\n\n# 'dispatch' key is the SSHDetect method to call. dispatch key will be popped off dictionary\n# remaining keys indicate kwargs that will be passed to dispatch method.\n# Note, the 'cmd' needs to avoid output paging.\nSSH_MAPPER_DICT = {\n \"alcatel_aos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"Alcatel-Lucent\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"alcatel_sros\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\"Nokia\", \"Alcatel\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"apresia_aeos\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\"Apresia\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"arista_eos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Arista\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"arris_cer\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"CER\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"casa_cmts\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Casa\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ciena_saos\": {\n \"cmd\": \"software show\",\n \"search_patterns\": [r\"saos\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_asa\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Adaptive Security Appliance\", r\"Cisco ASA\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_ios\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n \"Cisco IOS Software\",\n \"Cisco Internetwork Operating System Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_xe\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco IOS XE Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_nxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco Nexus Operating System\", r\"NX-OS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_xr\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Cisco IOS XR\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_force10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Real Time Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os9\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [\n r\"Dell Application Software Version: 9\",\n r\"Dell Networking OS Version : 9\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_os10\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Dell EMC Networking OS10.Enterprise\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"dell_powerconnect\": {\n \"cmd\": \"show system\",\n \"search_patterns\": [r\"PowerConnect\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_tmsh\": {\n \"cmd\": \"show sys version\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"f5_linux\": {\n \"cmd\": \"cat /etc/issue\",\n \"search_patterns\": [r\"BIG-IP\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"hp_comware\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\"HPE Comware\", \"HP Comware\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"huawei\": {\n \"cmd\": \"display version\",\n \"search_patterns\": [\n r\"Huawei Technologies\",\n r\"Huawei Versatile Routing Platform Software\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"juniper_junos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [\n r\"JUNOS Software Release\",\n r\"JUNOS .+ Software\",\n r\"JUNOS OS Kernel\",\n r\"JUNOS Base Version\",\n ],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"linux\": {\n \"cmd\": \"uname -a\",\n \"search_patterns\": [r\"Linux\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ericsson_ipos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Ericsson IPOS Version\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_exos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"ExtremeXOS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_netiron\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"(NetIron|MLX)\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_slx\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"SLX-OS Operating System Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"extreme_tierra\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"TierraOS Software\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"ubiquiti_edgeswitch\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"EdgeSwitch\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_wlc\": {\n \"cmd\": \"\",\n \"dispatch\": \"_autodetect_remote_version\",\n \"search_patterns\": [r\"CISCO_WLC\"],\n \"priority\": 99,\n },\n \"cisco_wlc_85\": {\n \"cmd\": \"show inventory\",\n \"dispatch\": \"_autodetect_std\",\n \"search_patterns\": [r\"Cisco.*Wireless.*Controller\"],\n \"priority\": 99,\n },\n \"mellanox_mlnxos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"Onyx\", r\"SX_PPC_M460EX\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"yamaha\": {\n \"cmd\": \"show copyright\",\n \"search_patterns\": [r\"Yamaha Corporation\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"fortinet\": {\n \"cmd\": \"get system status\",\n \"search_patterns\": [r\"FortiOS\", r\"FortiGate\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"paloalto_panos\": {\n \"cmd\": \"show system info\",\n \"search_patterns\": [r\"model:\\s+PA\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"supermicro_smis\": {\n \"cmd\": \"show system info\",\n \"search_patterns\": [r\"Super Micro Computer\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"flexvnf\": {\n \"cmd\": \"show system package-info\",\n \"search_patterns\": [r\"Versa FlexVNF\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"cisco_viptela\": {\n \"cmd\": \"show system status\",\n \"search_patterns\": [r\"Viptela, Inc\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n \"oneaccess_oneos\": {\n \"cmd\": \"show version\",\n \"search_patterns\": [r\"OneOS\"],\n \"priority\": 99,\n \"dispatch\": \"_autodetect_std\",\n },\n}\n\n# Sort SSH_MAPPER_DICT such that the most common commands are first\ncmd_count: Dict[str, int] = {}\nfor k, v in SSH_MAPPER_DICT.items():\n my_cmd = v[\"cmd\"]\n assert isinstance(my_cmd, str)\n count = cmd_count.setdefault(my_cmd, 0)\n cmd_count[my_cmd] = count + 1\ncmd_count = {k: v for k, v in sorted(cmd_count.items(), key=lambda item: item[1])}\n\n# SSH_MAPPER_BASE is a list\nSSH_MAPPER_BASE = sorted(\n SSH_MAPPER_DICT.items(), key=lambda item: int(cmd_count[str(item[1][\"cmd\"])])\n)\nSSH_MAPPER_BASE.reverse()\n\n\nclass SSHDetect(object):\n \"\"\"\n The SSHDetect class tries to automatically guess the device type running on the SSH remote end.\n Be careful that the kwargs 'device_type' must be set to 'autodetect', otherwise it won't work at\n all.\n\n Parameters\n ----------\n *args : list\n The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n *kwargs : dict\n The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.\n\n Attributes\n ----------\n connection : netmiko.terminal_server.TerminalServerSSH\n A basic connection to the remote SSH end.\n potential_matches: dict\n Dict of (device_type, accuracy) that is populated through an interaction with the\n remote end.\n\n Methods\n -------\n autodetect()\n Try to determine the device type.\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n Constructor of the SSHDetect class\n \"\"\"\n if kwargs[\"device_type\"] != \"autodetect\":\n raise ValueError(\"The connection device_type must be 'autodetect'\")\n # Always set cmd_verify to False for autodetect\n kwargs[\"global_cmd_verify\"] = False\n self.connection = ConnectHandler(*args, **kwargs)\n\n # Add additional sleep to let the login complete.\n time.sleep(3)\n\n # Call the _test_channel_read() in base to clear initial data\n output = BaseConnection._test_channel_read(self.connection)\n self.initial_buffer = output\n self.potential_matches: Dict[str, int] = {}\n self._results_cache: Dict[str, str] = {}\n\n def autodetect(self) -> Union[str, None]:\n \"\"\"\n Try to guess the best 'device_type' based on patterns defined in SSH_MAPPER_BASE\n\n Returns\n -------\n best_match : str or None\n The device type that is currently the best to use to interact with the device\n \"\"\"\n for device_type, autodetect_dict in SSH_MAPPER_BASE:\n tmp_dict = autodetect_dict.copy()\n call_method = tmp_dict.pop(\"dispatch\")\n assert isinstance(call_method, str)\n autodetect_method = getattr(self, call_method)\n accuracy = autodetect_method(**tmp_dict)\n if accuracy:\n self.potential_matches[device_type] = accuracy\n if accuracy >= 99: # Stop the loop as we are sure of our match\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n # WLC needs two different auto-dectect solutions\n if \"cisco_wlc_85\" in best_match[0]:\n best_match[0] = (\"cisco_wlc\", 99)\n\n self.connection.disconnect()\n return best_match[0][0]\n\n if not self.potential_matches:\n self.connection.disconnect()\n return None\n\n best_match = sorted(\n self.potential_matches.items(), key=lambda t: t[1], reverse=True\n )\n self.connection.disconnect()\n return best_match[0][0]\n\n def _send_command(self, cmd: str = \"\") -> str:\n \"\"\"\n Handle reading/writing channel directly. It is also sanitizing the output received.\n\n Parameters\n ----------\n cmd : str, optional\n The command to send to the remote device (default : \"\", just send a new line)\n\n Returns\n -------\n output : str\n The output from the command sent\n \"\"\"\n self.connection.write_channel(cmd + \"\\n\")\n time.sleep(1)\n output = self.connection.read_channel_timing(last_read=6.0)\n output = self.connection.strip_backspaces(output)\n return output\n\n def _send_command_wrapper(self, cmd: str) -> str:\n \"\"\"\n Send command to the remote device with a caching feature to avoid sending the same command\n twice based on the SSH_MAPPER_BASE dict cmd key.\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n\n Returns\n -------\n response : str\n The response from the remote device.\n \"\"\"\n cached_results = self._results_cache.get(cmd)\n if not cached_results:\n response = self._send_command(cmd)\n self._results_cache[cmd] = response\n return response\n else:\n return cached_results\n\n def _autodetect_remote_version(\n self,\n search_patterns: Optional[List[str]] = None,\n re_flags: int = re.IGNORECASE,\n priority: int = 99,\n **kwargs: Any\n ) -> int:\n \"\"\"\n Method to try auto-detect the device type, by matching a regular expression on the reported\n remote version of the SSH server.\n\n Parameters\n ----------\n search_patterns : list\n A list of regular expression to look for in the reported remote SSH version\n (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [r\"^$\"]\n\n if not search_patterns:\n return 0\n\n try:\n remote_conn = self.connection.remote_conn\n assert isinstance(remote_conn, paramiko.Channel)\n assert remote_conn.transport is not None\n remote_version = remote_conn.transport.remote_version\n for pattern in invalid_responses:\n match = re.search(pattern, remote_version, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, remote_version, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n\n def _autodetect_std(\n self,\n cmd: str = \"\",\n search_patterns: Optional[List[str]] = None,\n re_flags: int = re.IGNORECASE,\n priority: int = 99,\n ) -> int:\n \"\"\"\n Standard method to try to auto-detect the device type. This method will be called for each\n device_type present in SSH_MAPPER_BASE dict ('dispatch' key). It will attempt to send a\n command and match some regular expression from the ouput for each entry in SSH_MAPPER_BASE\n ('cmd' and 'search_pattern' keys).\n\n Parameters\n ----------\n cmd : str\n The command to send to the remote device after checking cache.\n search_patterns : list\n A list of regular expression to look for in the command's output (default: None).\n re_flags: re.flags, optional\n Any flags from the python re module to modify the regular expression (default: re.I).\n priority: int, optional\n The confidence the match is right between 0 and 99 (default: 99).\n \"\"\"\n invalid_responses = [\n r\"% Invalid input detected\",\n r\"syntax error, expecting\",\n r\"Error: Unrecognized command\",\n r\"%Error\",\n r\"command not found\",\n r\"Syntax Error: unexpected argument\",\n r\"% Unrecognized command found at\",\n ]\n if not cmd or not search_patterns:\n return 0\n try:\n # _send_command_wrapper will use already cached results if available\n response = self._send_command_wrapper(cmd)\n # Look for error conditions in output\n for pattern in invalid_responses:\n match = re.search(pattern, response, flags=re.I)\n if match:\n return 0\n for pattern in search_patterns:\n match = re.search(pattern, response, flags=re_flags)\n if match:\n return priority\n except Exception:\n return 0\n return 0\n",
"path": "netmiko/ssh_autodetect.py"
}
] | diff --git a/netmiko/ssh_autodetect.py b/netmiko/ssh_autodetect.py
index d84632df5..b17d520f2 100644
--- a/netmiko/ssh_autodetect.py
+++ b/netmiko/ssh_autodetect.py
@@ -288,6 +288,12 @@
"priority": 99,
"dispatch": "_autodetect_std",
},
+ "oneaccess_oneos": {
+ "cmd": "show version",
+ "search_patterns": [r"OneOS"],
+ "priority": 99,
+ "dispatch": "_autodetect_std",
+ },
}
# Sort SSH_MAPPER_DICT such that the most common commands are first
|
rasterio__rasterio-490 | rio merge error
When running `rio merge` on the latest version (`0.27`), I am receiving this error:
```
Traceback (most recent call last):
File "/Users/dnomadb/venv/bin/rio", line 11, in <module>
sys.exit(main_group())
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 1027, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/click/decorators.py", line 16, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/dnomadb/venv/lib/python2.7/site-packages/rasterio/rio/merge.py", line 54, in merge
from rasterio.tools.merge import merge as merge_tool
ImportError: No module named tools.merge
```
When reverting back to `0.26`, I don't see it and the operation completes as expected.
cc @sgillies
| [
{
"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1',\n 'click-plugins']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n clip=rasterio.rio.convert:clip\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1',\n 'click-plugins']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n clip=rasterio.rio.convert:clip\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index fe4cf4d17..ed3b07f19 100755
--- a/setup.py
+++ b/setup.py
@@ -227,7 +227,7 @@ def copy_data_tree(datadir, destdir):
url='https://github.com/mapbox/rasterio',
license='BSD',
package_dir={'': '.'},
- packages=['rasterio', 'rasterio.rio'],
+ packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],
entry_points='''
[console_scripts]
rio=rasterio.rio.main:main_group
|
spack__spack-38843 | Installation issue: apptainer fails to build with cflags="-O2" after 7dc485d2887c70e3e1ebdc6f680cda926444da72
### Steps to reproduce the issue
```console
$ spack spec -I [email protected]
Input spec
--------------------------------
- [email protected]
Concretized
--------------------------------
- [email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +network+suid build_system=makefile arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=makefile arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~libmount build_system=generic patches=fa31180 tracing=none arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~debuginfod+nls build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~debug~pic+shared build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~pic build_system=autotools libs=shared,static arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +programs build_system=makefile compression=none libs=shared,static arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +bzip2+curses+git~libunistring+libxml2+tar+xz build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~python build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~symlinks+termlib abi=none build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools zip=pigz arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=makefile arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools patches=26f26c6 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools libs=shared,static arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=python_pip patches=0f0b1bd arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +re2c build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +bz2+crypt+ctypes+dbm~debug+libxml2+lzma~nis~optimizations+pic+pyexpat+pythoncmd+readline+shared+sqlite3+ssl~tix+tkinter+uuid+zlib build_system=generic patches=0d98e93,f2fd060 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~jit+multibyte build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +cpanm+open+shared+threads build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +cxx~docs+stl build_system=autotools patches=26090f4,b231fcc arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +bz2+crypt+ctypes+dbm~debug+libxml2+lzma~nis+optimizations+pic+pyexpat+pythoncmd+readline+shared+sqlite3+ssl~tix+tkinter+uuid+zlib build_system=generic patches=0d98e93,f2fd060 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~obsolete_api build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +column_metadata+dynamic_extensions+fts~functions+rtree build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +xft+xss build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools fonts=encodings,font-adobe-100dpi,font-adobe-75dpi,font-adobe-utopia-100dpi,font-adobe-utopia-75dpi,font-adobe-utopia-type1,font-alias,font-arabic-misc,font-bh-100dpi,font-bh-75dpi,font-bh-lucidatypewriter-100dpi,font-bh-lucidatypewriter-75dpi,font-bh-type1,font-bitstream-100dpi,font-bitstream-75dpi,font-bitstream-speedo,font-bitstream-type1,font-cronyx-cyrillic,font-cursor-misc,font-daewoo-misc,font-dec-misc,font-ibm-type1,font-isas-misc,font-jis-misc,font-micro-misc,font-misc-cyrillic,font-misc-ethiopic,font-misc-meltho,font-misc-misc,font-mutt-misc,font-schumacher-misc,font-screen-cyrillic,font-sun-misc,font-winitzki-cyrillic,font-xfree86-type1 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~ipo build_system=cmake build_type=Release generator=make libs=shared,static arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~doc+ncurses+ownlibs~qt build_system=generic build_type=Release arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~guile build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +man+nls+perl+subtree~svn~tcltk build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools patches=35c4492,7793209,a49dd5b arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~gssapi~ldap~libidn2~librtmp~libssh~libssh2~nghttp2 build_system=autotools libs=shared,static tls=openssl arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +libbsd build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +sigsegv build_system=autotools patches=9dc5fbd,bfdffa7 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +gssapi build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +shared build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~docs~shared build_system=generic certs=mozilla arch=linux-rhel9-x86_64_v3
[+] ^ca-certificates-mozilla@2023-01-10%[email protected] cflags="-O2 -O2" cppflags="-O2 -O2" cxxflags="-O2 -O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" ~nls build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +cxx build_system=autotools libs=shared,static patches=69ad2e2 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools libs=shared,static arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools patches=bbf97f1 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +python build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=python_pip patches=71de066 arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=generic arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +gzip~lz4~lzo~xz~zstd build_system=makefile default_compression=gzip arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" +optimize+pic~shared build_system=makefile arch=linux-rhel9-x86_64_v3
[+] ^[email protected]%[email protected] cflags="-O2" cppflags="-O2" cxxflags="-O2" build_system=autotools arch=linux-rhel9-x86_64_v3
```
### Error message
<details><summary>Error message</summary>
<pre>
make: Entering directory '/tmp/root/spack-stage/spack-stage-apptainer-1.1.7-lbknpe6tp4kr2xp354hjxzpcp5lhyt4c/src/github.com/apptainer/apptainer/builddir'
GEN GO DEP /tmp/root/spack-stage/spack-stage-apptainer-1.1.7-lbknpe6tp4kr2xp354hjxzpcp5lhyt4c/src/github.com/apptainer/apptainer/builddir/apptainer.deps
GEN GO DEP /tmp/root/spack-stage/spack-stage-apptainer-1.1.7-lbknpe6tp4kr2xp354hjxzpcp5lhyt4c/src/github.com/apptainer/apptainer/builddir/starter.deps
GEN /tmp/root/spack-stage/spack-stage-apptainer-1.1.7-lbknpe6tp4kr2xp354hjxzpcp5lhyt4c/src/github.com/apptainer/apptainer/scripts/go-generate
GO apptainer
[+] GO_TAGS "containers_image_openpgp sylog oci_engine apptainer_engine fakeroot_engine apparmor selinux seccomp"
# github.com/seccomp/libseccomp-golang
cgo-dwarf-inference:4:8: error: enumerator value for '__cgo_enum__2' is not an integer constant
cgo-dwarf-inference:6:8: error: enumerator value for '__cgo_enum__3' is not an integer constant
cgo-dwarf-inference:8:8: error: enumerator value for '__cgo_enum__4' is not an integer constant
cgo-dwarf-inference:10:8: error: enumerator value for '__cgo_enum__5' is not an integer constant
cgo-dwarf-inference:12:8: error: enumerator value for '__cgo_enum__6' is not an integer constant
cgo-dwarf-inference:14:8: error: enumerator value for '__cgo_enum__7' is not an integer constant
cgo-dwarf-inference:16:8: error: enumerator value for '__cgo_enum__8' is not an integer constant
cgo-dwarf-inference:18:8: error: enumerator value for '__cgo_enum__9' is not an integer constant
cgo-dwarf-inference:20:8: error: enumerator value for '__cgo_enum__10' is not an integer constant
cgo-dwarf-inference:22:8: error: enumerator value for '__cgo_enum__11' is not an integer constant
cgo-dwarf-inference:24:8: error: enumerator value for '__cgo_enum__12' is not an integer constant
cgo-dwarf-inference:26:8: error: enumerator value for '__cgo_enum__13' is not an integer constant
cgo-dwarf-inference:28:8: error: enumerator value for '__cgo_enum__14' is not an integer constant
cgo-dwarf-inference:30:8: error: enumerator value for '__cgo_enum__15' is not an integer constant
cgo-dwarf-inference:32:8: error: enumerator value for '__cgo_enum__16' is not an integer constant
cgo-dwarf-inference:34:8: error: enumerator value for '__cgo_enum__17' is not an integer constant
cgo-dwarf-inference:36:8: error: enumerator value for '__cgo_enum__18' is not an integer constant
cgo-dwarf-inference:38:8: error: enumerator value for '__cgo_enum__19' is not an integer constant
cgo-dwarf-inference:40:8: error: enumerator value for '__cgo_enum__20' is not an integer constant
cgo-dwarf-inference:42:8: error: enumerator value for '__cgo_enum__21' is not an integer constant
cgo-dwarf-inference:44:8: error: enumerator value for '__cgo_enum__22' is not an integer constant
cgo-dwarf-inference:46:8: error: enumerator value for '__cgo_enum__23' is not an integer constant
cgo-dwarf-inference:48:8: error: enumerator value for '__cgo_enum__24' is not an integer constant
cgo-dwarf-inference:50:8: error: enumerator value for '__cgo_enum__25' is not an integer constant
cgo-dwarf-inference:52:8: error: enumerator value for '__cgo_enum__26' is not an integer constant
cgo-dwarf-inference:54:8: error: enumerator value for '__cgo_enum__27' is not an integer constant
cgo-dwarf-inference:56:8: error: enumerator value for '__cgo_enum__28' is not an integer constant
cgo-dwarf-inference:58:8: error: enumerator value for '__cgo_enum__29' is not an integer constant
cgo-dwarf-inference:60:8: error: enumerator value for '__cgo_enum__30' is not an integer constant
cgo-dwarf-inference:62:8: error: enumerator value for '__cgo_enum__31' is not an integer constant
cgo-dwarf-inference:64:8: error: enumerator value for '__cgo_enum__32' is not an integer constant
cgo-dwarf-inference:66:8: error: enumerator value for '__cgo_enum__33' is not an integer constant
cgo-dwarf-inference:68:8: error: enumerator value for '__cgo_enum__34' is not an integer constant
cgo-dwarf-inference:70:8: error: enumerator value for '__cgo_enum__35' is not an integer constant
cgo-dwarf-inference:72:8: error: enumerator value for '__cgo_enum__36' is not an integer constant
cgo-dwarf-inference:74:8: error: enumerator value for '__cgo_enum__37' is not an integer constant
cgo-dwarf-inference:76:8: error: enumerator value for '__cgo_enum__38' is not an integer constant
cgo-dwarf-inference:78:8: error: enumerator value for '__cgo_enum__39' is not an integer constant
cgo-dwarf-inference:80:8: error: enumerator value for '__cgo_enum__40' is not an integer constant
cgo-dwarf-inference:82:8: error: enumerator value for '__cgo_enum__41' is not an integer constant
cgo-dwarf-inference:84:8: error: enumerator value for '__cgo_enum__42' is not an integer constant
cgo-dwarf-inference:86:8: error: enumerator value for '__cgo_enum__43' is not an integer constant
cgo-dwarf-inference:88:8: error: enumerator value for '__cgo_enum__44' is not an integer constant
cgo-dwarf-inference:90:8: error: enumerator value for '__cgo_enum__45' is not an integer constant
make: *** [Makefile:181: apptainer] Error 1
make: Leaving directory '/tmp/root/spack-stage/spack-stage-apptainer-1.1.7-lbknpe6tp4kr2xp354hjxzpcp5lhyt4c/src/github.com/apptainer/apptainer/builddir'
</pre></details>
### Information on your system
* **Spack:** 0.21.0.dev0 (5951bd3d04bf51613b5abf7223e4b385c24a6821)
* **Python:** 3.9.10
* **Platform:** linux-rhel9-zen2
* **Concretizer:** clingo
### Additional information
[spack-build-out.txt](https://github.com/spack/spack/files/11826583/spack-build-out.txt)
[spack-build-env.txt](https://github.com/spack/spack/files/11826585/spack-build-env.txt)
### General information
- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [X] I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers
- [X] I have uploaded the build log and environment files
- [X] I have searched the issues of this repo and believe this is not a duplicate
| [
{
"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack.package import *\nfrom spack.pkg.builtin.singularityce import SingularityBase\n\n\n# Apptainer is the new name of Singularity, piggy-back on the original package\nclass Apptainer(SingularityBase):\n \"\"\"Apptainer is an open source container platform designed to be simple, fast, and\n secure. Many container platforms are available, but Apptainer is designed for\n ease-of-use on shared systems and in high performance computing (HPC)\n environments.\n\n Needs post-install chmod/chown steps to enable full functionality.\n See package definition or `spack-build-out.txt` build log for details,\n e.g.:\n\n tail -15 $(spack location -i apptainer)/.spack/spack-build-out.txt\n \"\"\"\n\n homepage = \"https://apptainer.org\"\n url = \"https://github.com/apptainer/apptainer/releases/download/v1.0.2/apptainer-1.0.2.tar.gz\"\n git = \"https://github.com/apptainer/apptainer.git\"\n\n version(\"main\", branch=\"main\")\n version(\"1.1.9\", sha256=\"c615777539154288542cf393d3fd44c04ccb3260bc6330dc324d4e4ebe902bfa\")\n version(\"1.1.7\", sha256=\"e6d3956a26c3965703402e17f153ba07f59bf710068806462b314d2d04e825e7\")\n version(\"1.1.6\", sha256=\"5f32d305279a51ce8bdbe69e733c4ac12b1efdcb77758fab8ec9463e96a8fd82\")\n version(\"1.1.5\", sha256=\"3eadb26b6656a89a111abe29c7e50eab0023e9a8718f1e77e46ca871398bfa67\")\n version(\"1.1.4\", sha256=\"b1ab9d5842002803e66da8f456ee00f352ea2bb43436d5b668f19ef7475ed4a5\")\n version(\"1.1.3\", sha256=\"c7bf7f4d5955e1868739627928238d02f94ca9fd0caf110b0243d65548427899\")\n version(\"1.0.2\", sha256=\"2d7a9d0a76d5574459d249c3415e21423980d9154ce85e8c34b0600782a7dfd3\")\n\n depends_on(\"[email protected]:\", when=\"@1.1.0:\")\n depends_on(\"squashfuse\", type=\"run\")\n\n singularity_org = \"apptainer\"\n singularity_name = \"apptainer\"\n singularity_security_urls = (\n \"https://apptainer.org/docs/admin/main/security.html\",\n \"https://apptainer.org/docs/admin/main/admin_quickstart.html#apptainer-security\",\n )\n\n # Override config options from SingularityBase\n @property\n def config_options(self):\n spec = self.spec\n options = []\n if spec.satisfies(\"@1.1.0: +suid\"):\n options.append(\"--with-suid\")\n return options\n",
"path": "var/spack/repos/builtin/packages/apptainer/package.py"
}
] | [
{
"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\nfrom spack.package import *\nfrom spack.pkg.builtin.singularityce import SingularityBase\n\n\n# Apptainer is the new name of Singularity, piggy-back on the original package\nclass Apptainer(SingularityBase):\n \"\"\"Apptainer is an open source container platform designed to be simple, fast, and\n secure. Many container platforms are available, but Apptainer is designed for\n ease-of-use on shared systems and in high performance computing (HPC)\n environments.\n\n Needs post-install chmod/chown steps to enable full functionality.\n See package definition or `spack-build-out.txt` build log for details,\n e.g.:\n\n tail -15 $(spack location -i apptainer)/.spack/spack-build-out.txt\n \"\"\"\n\n homepage = \"https://apptainer.org\"\n url = \"https://github.com/apptainer/apptainer/releases/download/v1.0.2/apptainer-1.0.2.tar.gz\"\n git = \"https://github.com/apptainer/apptainer.git\"\n\n version(\"main\", branch=\"main\")\n version(\"1.1.9\", sha256=\"c615777539154288542cf393d3fd44c04ccb3260bc6330dc324d4e4ebe902bfa\")\n version(\"1.1.7\", sha256=\"e6d3956a26c3965703402e17f153ba07f59bf710068806462b314d2d04e825e7\")\n version(\"1.1.6\", sha256=\"5f32d305279a51ce8bdbe69e733c4ac12b1efdcb77758fab8ec9463e96a8fd82\")\n version(\"1.1.5\", sha256=\"3eadb26b6656a89a111abe29c7e50eab0023e9a8718f1e77e46ca871398bfa67\")\n version(\"1.1.4\", sha256=\"b1ab9d5842002803e66da8f456ee00f352ea2bb43436d5b668f19ef7475ed4a5\")\n version(\"1.1.3\", sha256=\"c7bf7f4d5955e1868739627928238d02f94ca9fd0caf110b0243d65548427899\")\n version(\"1.0.2\", sha256=\"2d7a9d0a76d5574459d249c3415e21423980d9154ce85e8c34b0600782a7dfd3\")\n\n depends_on(\"[email protected]:\", when=\"@1.1.0:\")\n depends_on(\"squashfuse\", type=\"run\")\n\n singularity_org = \"apptainer\"\n singularity_name = \"apptainer\"\n singularity_security_urls = (\n \"https://apptainer.org/docs/admin/main/security.html\",\n \"https://apptainer.org/docs/admin/main/admin_quickstart.html#apptainer-security\",\n )\n\n # Override config options from SingularityBase\n @property\n def config_options(self):\n spec = self.spec\n options = []\n if spec.satisfies(\"@1.1.0: +suid\"):\n options.append(\"--with-suid\")\n return options\n\n def flag_handler(self, name, flags):\n # Certain go modules this build pulls in cannot be built with anything\n # other than -O0. Best to just discard any injected flags.\n return (None, flags, None)\n",
"path": "var/spack/repos/builtin/packages/apptainer/package.py"
}
] | diff --git a/var/spack/repos/builtin/packages/apptainer/package.py b/var/spack/repos/builtin/packages/apptainer/package.py
index 600f37d210e5fa..d1b9ed71e39365 100644
--- a/var/spack/repos/builtin/packages/apptainer/package.py
+++ b/var/spack/repos/builtin/packages/apptainer/package.py
@@ -53,3 +53,8 @@ def config_options(self):
if spec.satisfies("@1.1.0: +suid"):
options.append("--with-suid")
return options
+
+ def flag_handler(self, name, flags):
+ # Certain go modules this build pulls in cannot be built with anything
+ # other than -O0. Best to just discard any injected flags.
+ return (None, flags, None)
|
DistrictDataLabs__yellowbrick-1007 | Installs tests package
**Describe the bug**
Installing yellowbrick also installs a package "tests" into the enviornment.
**To Reproduce**
```shell
PS> virtualenv env
PS> .\env\Scripts\activate
PS> python -c "import tests; print(tests.__path__)"
Traceback (most recent call last):
File "<string>", line 1, in <module>
ModuleNotFoundError: No module named 'tests'
PS> pip install yellowbrick
PS> python -c "import tests; print(tests.__path__)"
_NamespacePath(['<PATH_FROM_C:>\\env\\lib\\site-packages\\tests'])
```
I dug into the files and found the scikit-yb developer copyright notice in the source files in the fields.
**Expected behavior**
I would guess it is not the expected nor intendent behavior to install the tests package. Also looking at the setup.py it seems like it should be excluded, so i do not understand why this isn't the case. Mainly, this is a issue as it causes python to import the yb tests package instead of my local tests package when running pytest.
**Desktop (please complete the following information):**
- OS: Windows
- Python Version 3.7.4
- Yellowbrick Version 1.0.1
| [
{
"content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] [email protected] $\n\n\"\"\"\nSetup script for installing yellowbrick.\nSee http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport codecs\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n##########################################################################\n## Package Information\n##########################################################################\n\n## Basic information\n## Basic information\nNAME = \"yellowbrick\"\nDESCRIPTION = \"A suite of visual analysis and diagnostic tools for machine learning.\"\nAUTHOR = \"The scikit-yb developers\"\nEMAIL = \"[email protected]\"\nMAINTAINER = \"The scikit-yb developers\"\nLICENSE = \"Apache 2\"\nREPOSITORY = \"https://github.com/DistrictDataLabs/yellowbrick\"\nPACKAGE = \"yellowbrick\"\nURL = \"http://scikit-yb.org/\"\n\n## Define the keywords\nKEYWORDS = (\n \"visualization\",\n \"machine learning\",\n \"scikit-learn\",\n \"matplotlib\",\n \"data science\",\n)\n\n## Define the classifiers\n## See https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = (\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n)\n\n## Important Paths\nPROJECT = os.path.abspath(os.path.dirname(__file__))\nREQUIRE_PATH = \"requirements.txt\"\nVERSION_PATH = os.path.join(PACKAGE, \"version.py\")\nPKG_DESCRIBE = \"DESCRIPTION.md\"\n\n## Directories to ignore in find_packages\nEXCLUDES = (\n \"tests\",\n \"bin\",\n \"docs\",\n \"fixtures\",\n \"register\",\n \"notebooks\",\n \"examples\",\n \"binder\",\n \"paper\",\n)\n\n##########################################################################\n## Helper Functions\n##########################################################################\n\n\ndef read(*parts):\n \"\"\"\n Assume UTF-8 encoding and return the contents of the file located at the\n absolute path from the REPOSITORY joined with *parts.\n \"\"\"\n with codecs.open(os.path.join(PROJECT, *parts), \"rb\", \"utf-8\") as f:\n return f.read()\n\n\ndef get_version(path=VERSION_PATH):\n \"\"\"\n Reads the python file defined in the VERSION_PATH to find the get_version\n function, and executes it to ensure that it is loaded correctly. Separating\n the version in this way ensures no additional code is executed.\n \"\"\"\n namespace = {}\n exec(read(path), namespace)\n return namespace[\"get_version\"](short=True)\n\n\ndef get_requires(path=REQUIRE_PATH):\n \"\"\"\n Yields a generator of requirements as defined by the REQUIRE_PATH which\n should point to a requirements.txt output by `pip freeze`.\n \"\"\"\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith(\"#\"):\n yield line\n\n\ndef get_description_type(path=PKG_DESCRIBE):\n \"\"\"\n Returns the long_description_content_type based on the extension of the\n package describe path (e.g. .txt, .rst, or .md).\n \"\"\"\n _, ext = os.path.splitext(path)\n return {\".rst\": \"text/x-rst\", \".txt\": \"text/plain\", \".md\": \"text/markdown\"}[ext]\n\n\n##########################################################################\n## Define the configuration\n##########################################################################\n\nconfig = {\n \"name\": NAME,\n \"version\": get_version(),\n \"description\": DESCRIPTION,\n \"long_description\": read(PKG_DESCRIBE),\n \"long_description_content_type\": get_description_type(PKG_DESCRIBE),\n \"classifiers\": CLASSIFIERS,\n \"keywords\": KEYWORDS,\n \"license\": LICENSE,\n \"author\": AUTHOR,\n \"author_email\": EMAIL,\n \"url\": URL,\n \"maintainer\": MAINTAINER,\n \"maintainer_email\": EMAIL,\n \"project_urls\": {\n \"Documentation\": URL,\n \"Download\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"Source\": REPOSITORY,\n \"Tracker\": \"{}/issues\".format(REPOSITORY),\n },\n \"download_url\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"packages\": find_packages(where=PROJECT, exclude=EXCLUDES),\n \"package_data\": {\"yellowbrick\": [\"datasets/manifest.json\"]},\n \"zip_safe\": False,\n \"entry_points\": {\"console_scripts\": []},\n \"install_requires\": list(get_requires()),\n \"python_requires\": \">=3.4, <4\",\n \"setup_requires\": [\"pytest-runner\"],\n \"tests_require\": [\"pytest\"],\n}\n\n\n##########################################################################\n## Run setup script\n##########################################################################\n\nif __name__ == \"__main__\":\n setup(**config)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] [email protected] $\n\n\"\"\"\nSetup script for installing yellowbrick.\nSee http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport codecs\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n##########################################################################\n## Package Information\n##########################################################################\n\n## Basic information\n## Basic information\nNAME = \"yellowbrick\"\nDESCRIPTION = \"A suite of visual analysis and diagnostic tools for machine learning.\"\nAUTHOR = \"The scikit-yb developers\"\nEMAIL = \"[email protected]\"\nMAINTAINER = \"The scikit-yb developers\"\nLICENSE = \"Apache 2\"\nREPOSITORY = \"https://github.com/DistrictDataLabs/yellowbrick\"\nPACKAGE = \"yellowbrick\"\nURL = \"http://scikit-yb.org/\"\n\n## Define the keywords\nKEYWORDS = (\n \"visualization\",\n \"machine learning\",\n \"scikit-learn\",\n \"matplotlib\",\n \"data science\",\n)\n\n## Define the classifiers\n## See https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = (\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n)\n\n## Important Paths\nPROJECT = os.path.abspath(os.path.dirname(__file__))\nREQUIRE_PATH = \"requirements.txt\"\nVERSION_PATH = os.path.join(PACKAGE, \"version.py\")\nPKG_DESCRIBE = \"DESCRIPTION.md\"\n\n## Directories to ignore in find_packages\nEXCLUDES = (\n \"tests\", \"tests.*\",\n \"bin\",\n \"docs\", \"docs.*\",\n \"fixtures\",\n \"register\",\n \"notebooks\", \"notebooks.*\",\n \"examples\", \"examples.*\",\n \"binder\", \"binder.*\",\n \"paper\",\n)\n\n##########################################################################\n## Helper Functions\n##########################################################################\n\n\ndef read(*parts):\n \"\"\"\n Assume UTF-8 encoding and return the contents of the file located at the\n absolute path from the REPOSITORY joined with *parts.\n \"\"\"\n with codecs.open(os.path.join(PROJECT, *parts), \"rb\", \"utf-8\") as f:\n return f.read()\n\n\ndef get_version(path=VERSION_PATH):\n \"\"\"\n Reads the python file defined in the VERSION_PATH to find the get_version\n function, and executes it to ensure that it is loaded correctly. Separating\n the version in this way ensures no additional code is executed.\n \"\"\"\n namespace = {}\n exec(read(path), namespace)\n return namespace[\"get_version\"](short=True)\n\n\ndef get_requires(path=REQUIRE_PATH):\n \"\"\"\n Yields a generator of requirements as defined by the REQUIRE_PATH which\n should point to a requirements.txt output by `pip freeze`.\n \"\"\"\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith(\"#\"):\n yield line\n\n\ndef get_description_type(path=PKG_DESCRIBE):\n \"\"\"\n Returns the long_description_content_type based on the extension of the\n package describe path (e.g. .txt, .rst, or .md).\n \"\"\"\n _, ext = os.path.splitext(path)\n return {\".rst\": \"text/x-rst\", \".txt\": \"text/plain\", \".md\": \"text/markdown\"}[ext]\n\n\n##########################################################################\n## Define the configuration\n##########################################################################\n\nconfig = {\n \"name\": NAME,\n \"version\": get_version(),\n \"description\": DESCRIPTION,\n \"long_description\": read(PKG_DESCRIBE),\n \"long_description_content_type\": get_description_type(PKG_DESCRIBE),\n \"classifiers\": CLASSIFIERS,\n \"keywords\": KEYWORDS,\n \"license\": LICENSE,\n \"author\": AUTHOR,\n \"author_email\": EMAIL,\n \"url\": URL,\n \"maintainer\": MAINTAINER,\n \"maintainer_email\": EMAIL,\n \"project_urls\": {\n \"Documentation\": URL,\n \"Download\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"Source\": REPOSITORY,\n \"Tracker\": \"{}/issues\".format(REPOSITORY),\n },\n \"download_url\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"packages\": find_packages(where=PROJECT, exclude=EXCLUDES),\n \"package_data\": {\"yellowbrick\": [\"datasets/manifest.json\"]},\n \"zip_safe\": False,\n \"entry_points\": {\"console_scripts\": []},\n \"install_requires\": list(get_requires()),\n \"python_requires\": \">=3.4, <4\",\n \"setup_requires\": [\"pytest-runner\"],\n \"tests_require\": [\"pytest\"],\n}\n\n\n##########################################################################\n## Run setup script\n##########################################################################\n\nif __name__ == \"__main__\":\n setup(**config)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index babb04d3e..d47d5f66e 100755
--- a/setup.py
+++ b/setup.py
@@ -77,14 +77,14 @@
## Directories to ignore in find_packages
EXCLUDES = (
- "tests",
+ "tests", "tests.*",
"bin",
- "docs",
+ "docs", "docs.*",
"fixtures",
"register",
- "notebooks",
- "examples",
- "binder",
+ "notebooks", "notebooks.*",
+ "examples", "examples.*",
+ "binder", "binder.*",
"paper",
)
|
Kinto__kinto-186 | Allow POST on buckets using the id_generator or the id provided in the data.
| [
{
"content": "from six import text_type\nfrom uuid import UUID\n\nfrom pyramid.httpexceptions import (HTTPForbidden, HTTPPreconditionFailed,\n HTTPException)\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request, reapply_cors\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict.get('subpath')\n if subpath and subpath.startswith('collections/'):\n collection_id = subpath.split('/')[1]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\n@view_config(route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(hmac_secret, request.prefixed_userid)\n bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n\n try:\n response = request.invoke_subrequest(subrequest)\n except HTTPException as error:\n response = reapply_cors(subrequest, error)\n return response\n\n\[email protected](name='bucket',\n collection_methods=('GET',),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='group',\n parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n",
"path": "kinto/views/buckets.py"
}
] | [
{
"content": "from six import text_type\nfrom uuid import UUID\n\nfrom pyramid.httpexceptions import (HTTPForbidden, HTTPPreconditionFailed,\n HTTPException)\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import hmac_digest, build_request, reapply_cors\n\nfrom kinto.views import NameGenerator\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n\n if not bucket_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s' % bucket_id,\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The bucket already exists\n pass\n\n\ndef create_collection(request, bucket_id):\n subpath = request.matchdict.get('subpath')\n if subpath and subpath.startswith('collections/'):\n collection_id = subpath.split('/')[1]\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if not collection_put:\n subrequest = build_request(request, {\n 'method': 'PUT',\n 'path': '/buckets/%s/collections/%s' % (\n bucket_id, collection_id),\n 'body': {\"data\": {}},\n 'headers': {'If-None-Match': '*'.encode('utf-8')}\n })\n try:\n request.invoke_subrequest(subrequest)\n except HTTPPreconditionFailed:\n # The collection already exists\n pass\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\n@view_config(route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if getattr(request, 'prefixed_userid', None) is None:\n raise HTTPForbidden # Pass through the forbidden_view_config\n\n settings = request.registry.settings\n hmac_secret = settings['cliquet.userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(hmac_secret, request.prefixed_userid)\n bucket_id = text_type(UUID(digest[:32]))\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n\n try:\n response = request.invoke_subrequest(subrequest)\n except HTTPException as error:\n response = reapply_cors(subrequest, error)\n return response\n\n\[email protected](name='bucket',\n collection_methods=('GET', 'POST'),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.collection.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='group',\n parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.collection.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n",
"path": "kinto/views/buckets.py"
}
] | diff --git a/docs/api/buckets.rst b/docs/api/buckets.rst
index f54270d3a..a352c4cce 100644
--- a/docs/api/buckets.rst
+++ b/docs/api/buckets.rst
@@ -10,11 +10,71 @@ A bucket is a mapping with the following attributes:
* ``permissions``: (*optional*) the :term:`ACLs <ACL>` for the bucket object
-.. _buckets-put:
+.. _buckets-post:
Creating a bucket
=================
+.. http:post:: /buckets
+
+ :synopsis: Creates a new bucket. If ``id`` is not provided, it is automatically generated.
+
+ **Requires authentication**
+
+ **Example Request**
+
+ .. sourcecode:: bash
+
+ $ echo '{"data": {"id": "blog"}}' | http POST http://localhost:8888/v1/buckets --auth="bob:" --verbose
+
+ .. sourcecode:: http
+
+ POST /v1/buckets HTTP/1.1
+ Accept: application/json
+ Accept-Encoding: gzip, deflate
+ Authorization: Basic Ym9iOg==
+ Connection: keep-alive
+ Content-Length: 25
+ Content-Type: application/json; charset=utf-8
+ Host: localhost:8888
+ User-Agent: HTTPie/0.8.0
+
+ {
+ "data": {
+ "id": "blog"
+ }
+ }
+
+
+ **Example Response**
+
+ .. sourcecode:: http
+
+ HTTP/1.1 201 Created
+ Access-Control-Expose-Headers: Backoff, Retry-After, Alert, Content-Length
+ Content-Length: 155
+ Content-Type: application/json; charset=UTF-8
+ Date: Thu, 10 Sep 2015 08:34:32 GMT
+ Server: waitress
+
+ {
+ "data": {
+ "id": "blog",
+ "last_modified": 1441874072429
+ },
+ "permissions": {
+ "write": [
+ "basicauth:206691a25679e4e1135f16aa77ebcf211c767393c4306cfffe6cc228ac0886b6"
+ ]
+ }
+ }
+
+
+.. _bucket-put:
+
+Replacing a bucket
+==================
+
.. http:put:: /buckets/(bucket_id)
:synopsis: Creates or replaces a bucket with a specific ID.
@@ -126,12 +186,12 @@ Retrieve an existing bucket
}
-.. _bucket-put:
+.. _bucket-patch:
Updating an existing bucket
===========================
-.. http:put:: /buckets/(bucket_id)
+.. http:patch:: /buckets/(bucket_id)
:synopsis: Modifies an existing bucket.
@@ -189,3 +249,54 @@ Deleting a bucket
"last_modified": 1434641382954
}
}
+
+
+.. _buckets-get:
+
+Retrieving all buckets
+======================
+
+.. http:get:: /buckets
+
+ :synopsis: Returns the list of accessible buckets
+
+ **Requires authentication**
+
+ **Example Request**
+
+ .. sourcecode:: bash
+
+ $ http get http://localhost:8888/v1/buckets --auth="bob:" --verbose
+
+ .. sourcecode:: http
+
+ GET /v1/buckets HTTP/1.1
+ Accept: */*
+ Accept-Encoding: gzip, deflate
+ Authorization: Basic Ym9iOg==
+ Connection: keep-alive
+ Host: localhost:8888
+ User-Agent: HTTPie/0.8.0
+
+ **Example Response**
+
+ .. sourcecode:: http
+
+ HTTP/1.1 200 OK
+ Access-Control-Expose-Headers: Backoff, Retry-After, Alert, Content-Length, Next-Page, Total-Records, Last-Modified, ETag
+ Content-Length: 54
+ Content-Type: application/json; charset=UTF-8
+ Date: Thu, 10 Sep 2015 08:37:32 GMT
+ Etag: "1441874072429"
+ Last-Modified: Thu, 10 Sep 2015 08:34:32 GMT
+ Server: waitress
+ Total-Records: 1
+
+ {
+ "data": [
+ {
+ "id": "blog",
+ "last_modified": 1441874072429
+ }
+ ]
+ }
diff --git a/docs/api/index.rst b/docs/api/index.rst
index 36a8dc0aa..163fd342f 100644
--- a/docs/api/index.rst
+++ b/docs/api/index.rst
@@ -19,6 +19,10 @@ Cheatsheet
+----------+----------------------------------------------------------------------------------------------+---------------------------------------------------------+
| **Buckets** |
+----------+----------------------------------------------------------------------------------------------+---------------------------------------------------------+
+| `POST` | :ref:`/buckets <buckets-post>` | :ref:`Create a bucket <buckets-post>` |
++----------+----------------------------------------------------------------------------------------------+---------------------------------------------------------+
+| `GET` | :ref:`/buckets <buckets-get>` | :ref:`List buckets <buckets-get>` |
++----------+----------------------------------------------------------------------------------------------+---------------------------------------------------------+
| `PUT` | :ref:`/buckets/(bucket_id) <bucket-put>` | :ref:`Create or replace a bucket <bucket-put>` |
+----------+----------------------------------------------------------------------------------------------+---------------------------------------------------------+
| `GET` | :ref:`/buckets/(bucket_id) <bucket-get>` | :ref:`Retrieve an existing bucket <bucket-get>` |
diff --git a/kinto/tests/test_views_buckets.py b/kinto/tests/test_views_buckets.py
index 7c3e1b044..0cc7b0552 100644
--- a/kinto/tests/test_views_buckets.py
+++ b/kinto/tests/test_views_buckets.py
@@ -23,10 +23,6 @@ def test_buckets_are_global_to_every_users(self):
headers=self.headers)
self.app.get(self.record_url, headers=get_user_headers('alice'))
- def test_buckets_do_not_support_post(self):
- self.app.post(self.collection_url, headers=self.headers,
- status=405)
-
def test_buckets_can_be_put_with_simple_name(self):
self.assertEqual(self.record['id'], 'beers')
@@ -75,6 +71,21 @@ def test_wrong_create_permissions_cannot_be_added_on_buckets(self):
status=400)
+class BucketCreationTest(BaseWebTest, unittest.TestCase):
+ def test_buckets_can_be_created_with_post(self):
+ r = self.app.post_json('/buckets',
+ MINIMALIST_BUCKET,
+ headers=self.headers)
+ self.assertEqual(r.status_code, 201)
+
+ def test_bucket_id_can_be_specified_in_post(self):
+ bucket = 'blog'
+ r = self.app.post_json('/buckets',
+ {'data': {'id': bucket}},
+ headers=self.headers)
+ self.assertEqual(r.json['data']['id'], bucket)
+
+
class BucketReadPermissionTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets'
diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py
index 5f6ea37e2..d592a99ff 100644
--- a/kinto/views/buckets.py
+++ b/kinto/views/buckets.py
@@ -97,7 +97,7 @@ def default_bucket(request):
@resource.register(name='bucket',
- collection_methods=('GET',),
+ collection_methods=('GET', 'POST'),
collection_path='/buckets',
record_path='/buckets/{{id}}')
class Bucket(resource.ProtectedResource):
|
sonic-net__sonic-utilities-1691 | [PFCWD] The incorrect cli command doesn't return failed exit code 1
**Description**
The incorrect cli PFCWD command doesn't return failed exit code 1
**Steps to reproduce**
1) Run the cli command:
```
config pfcwd start --action drop ports all deteection-time 200 --restoration-time 200 ; echo AUTO_REMOTE_STATUS=$?
```
Observed behavior
```
root@r-qa-sw-eth-114:/home/admin# [sonic_dump_r-qa-sw-eth-2132_20210531_113146.tar.gz](https://github.com/Azure/sonic-buildimage/files/6569960/sonic_dump_r-qa-sw-eth-2132_20210531_113146.tar.gz)
Failed to run command, invalid options:
deteection-time
AUTO_REMOTE_STATUS=0
```
Expected behavior
```
root@r-qa-sw-eth-114:/home/admin# config pfcwd start --action drop ports all deteection-time 200 --restoration-time 200 ; echo AUTO_REMOTE_STATUS=$?
Failed to run command, invalid options:
deteection-time
AUTO_REMOTE_STATUS=1
```
**Output of show version**
```
SONiC Software Version: SONiC.202012.81-2fc748e_Internal
Distribution: Debian 10.9
Kernel: 4.19.0-12-2-amd64
Build commit: 2fc748ea
Build date: Thu May 6 20:42:29 UTC 2021
Built by: sw-r2d2-bot@r-build-sonic-ci02
Platform: x86_64-mlnx_msn3800-r0
HwSKU: ACS-MSN3800
ASIC: mellanox
ASIC Count: 1
Serial Number: MT1925X00004
```
[sonic_dump_r-qa-sw-eth-2132_20210531_113146.tar.gz](https://github.com/Azure/sonic-buildimage/files/6569960/sonic_dump_r-qa-sw-eth-2132_20210531_113146.tar.gz)
| [
{
"content": "import importlib\nimport os\nimport sys\n\nimport click\nimport utilities_common.cli as clicommon\nfrom natsort import natsorted\nfrom sonic_py_common.multi_asic import get_external_ports\nfrom tabulate import tabulate\nfrom utilities_common import multi_asic as multi_asic_util\nfrom utilities_common import constants\nfrom sonic_py_common import logger\n\nSYSLOG_IDENTIFIER = \"config\"\n\nlog = logger.Logger(SYSLOG_IDENTIFIER)\n\n# mock the redis for unit test purposes #\ntry:\n if os.environ[\"UTILITIES_UNIT_TESTING\"] == \"2\":\n modules_path = os.path.join(os.path.dirname(__file__), \"..\")\n tests_path = os.path.join(modules_path, \"tests\")\n sys.path.insert(0, modules_path)\n sys.path.insert(0, tests_path)\n import mock_tables.dbconnector\n if os.environ[\"UTILITIES_UNIT_TESTING_TOPOLOGY\"] == \"multi_asic\":\n import mock_tables.mock_multi_asic\n importlib.reload(mock_tables.mock_multi_asic)\n mock_tables.dbconnector.load_namespace_config()\n\nexcept KeyError:\n pass\n\n# Default configuration\nDEFAULT_DETECTION_TIME = 200\nDEFAULT_RESTORATION_TIME = 200\nDEFAULT_POLL_INTERVAL = 200\nDEFAULT_PORT_NUM = 32\nDEFAULT_ACTION = 'drop'\n\nSTATS_DESCRIPTION = [\n ('STORM DETECTED/RESTORED', 'PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED', 'PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED'),\n ('TX OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS'),\n ('RX OK/DROP', 'PFC_WD_QUEUE_STATS_RX_PACKETS', 'PFC_WD_QUEUE_STATS_RX_DROPPED_PACKETS'),\n ('TX LAST OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS_LAST', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS_LAST'),\n ('RX LAST OK/DROP', 'PFC_WD_QUEUE_STATS_RX_PACKETS_LAST', 'PFC_WD_QUEUE_STATS_RX_DROPPED_PACKETS_LAST'),\n]\n\nCONFIG_DESCRIPTION = [\n ('ACTION', 'action', 'drop'),\n ('DETECTION TIME', 'detection_time', 'N/A'),\n ('RESTORATION TIME', 'restoration_time', 'infinite')\n]\n\nSTATS_HEADER = ('QUEUE', 'STATUS',) + list(zip(*STATS_DESCRIPTION))[0]\nCONFIG_HEADER = ('PORT',) + list(zip(*CONFIG_DESCRIPTION))[0]\n\nCONFIG_DB_PFC_WD_TABLE_NAME = 'PFC_WD'\nPORT_QOS_MAP = \"PORT_QOS_MAP\"\n\n# Main entrypoint\[email protected]()\ndef cli():\n \"\"\" SONiC PFC Watchdog \"\"\"\n\n\ndef get_all_queues(db, namespace=None, display=constants.DISPLAY_ALL):\n queue_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP')\n queues = list(queue_names.keys()) if queue_names else {}\n if display == constants.DISPLAY_ALL:\n return natsorted(queues)\n # filter the backend ports\n display_ports = [q.split(\":\")[0] for q in queues]\n display_ports = get_external_ports(display_ports, namespace)\n queues = [q for q in queues if q.split(\":\")[0] in display_ports]\n return natsorted(queues)\n\n\ndef get_all_ports(db, namespace=None, display=constants.DISPLAY_ALL):\n all_port_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP')\n\n # Get list of physical ports\n port_names = {}\n for i in all_port_names:\n if i.startswith('Ethernet'):\n port_names[i] = all_port_names[i]\n display_ports = list(port_names.keys())\n if display == constants.DISPLAY_EXTERNAL:\n display_ports = get_external_ports(display_ports, namespace)\n return natsorted(display_ports)\n\n\ndef get_server_facing_ports(db):\n candidates = db.get_table('DEVICE_NEIGHBOR')\n server_facing_ports = []\n for port in candidates:\n neighbor = db.get_entry(\n 'DEVICE_NEIGHBOR_METADATA', candidates[port]['name']\n )\n if neighbor and neighbor['type'].lower() == 'server':\n server_facing_ports.append(port)\n if not server_facing_ports:\n server_facing_ports = [p[1] for p in db.get_table('VLAN_MEMBER')]\n return server_facing_ports\n\n\nclass PfcwdCli(object):\n def __init__(\n self, db=None, namespace=None, display=constants.DISPLAY_ALL\n ):\n self.db = None\n self.config_db = None\n self.multi_asic = multi_asic_util.MultiAsic(\n display, namespace, db\n )\n self.table = []\n self.all_ports = []\n\n @multi_asic_util.run_on_multi_asic\n def collect_stats(self, empty, queues):\n table = []\n\n if len(queues) == 0:\n queues = get_all_queues(\n self.db,\n self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n for queue in queues:\n stats_list = []\n queue_oid = self.db.get(\n self.db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP', queue\n )\n if queue_oid is None:\n continue\n stats = self.db.get_all(\n self.db.COUNTERS_DB, 'COUNTERS:' + queue_oid\n )\n if stats is None:\n continue\n for stat in STATS_DESCRIPTION:\n line = stats.get(stat[1], '0') + '/' + stats.get(stat[2], '0')\n stats_list.append(line)\n if stats_list != ['0/0'] * len(STATS_DESCRIPTION) or empty:\n table.append(\n [queue, stats.get('PFC_WD_STATUS', 'N/A')] + stats_list\n )\n\n self.table += table\n\n def show_stats(self, empty, queues):\n del self.table[:]\n self.collect_stats(empty, queues)\n click.echo(tabulate(\n self.table, STATS_HEADER, stralign='right', numalign='right',\n tablefmt='simple'\n ))\n\n @multi_asic_util.run_on_multi_asic\n def get_all_namespace_ports(self):\n ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n self.all_ports.extend(ports)\n\n def get_invalid_ports(self, ports=[]):\n if len(ports) == 0:\n return []\n self.get_all_namespace_ports()\n port_set = set(ports)\n # \"all\" is a valid option, remove before performing set diff\n port_set.discard(\"all\")\n return port_set - set(self.all_ports)\n\n @multi_asic_util.run_on_multi_asic\n def collect_config(self, ports):\n table = []\n\n if len(ports) == 0:\n ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n ports_found = False\n for port in ports:\n config_list = []\n config_entry = self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, port\n )\n if config_entry is None or config_entry == {}:\n continue\n ports_found = True\n for config in CONFIG_DESCRIPTION:\n line = config_entry.get(config[1], config[2])\n config_list.append(line)\n table.append([port] + config_list)\n\n if not ports_found:\n return\n\n poll_interval = self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, 'GLOBAL'\n ).get('POLL_INTERVAL')\n\n current_ns = self.multi_asic.current_namespace\n asic_namesapce = \\\n \"\" if current_ns is None or current_ns == \"\" else \" on {}\".format(\n current_ns\n )\n if poll_interval is not None:\n click.echo(\n \"Changed polling interval to {}ms{}\".format(\n poll_interval, asic_namesapce\n )\n )\n\n big_red_switch = self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, 'GLOBAL'\n ).get('BIG_RED_SWITCH')\n\n if big_red_switch is not None:\n click.echo(\"BIG_RED_SWITCH status is {}{}\".format(\n big_red_switch, asic_namesapce\n ))\n\n self.table += table\n\n def config(self, ports):\n del self.table[:]\n self.collect_config(ports)\n click.echo(tabulate(\n self.table, CONFIG_HEADER, stralign='right', numalign='right',\n tablefmt='simple'\n ))\n\n def start(self, action, restoration_time, ports, detection_time):\n invalid_ports = self.get_invalid_ports(ports)\n if len(invalid_ports):\n click.echo(\"Failed to run command, invalid options:\")\n for opt in invalid_ports:\n click.echo(opt)\n exit()\n self.start_cmd(action, restoration_time, ports, detection_time)\n\n\n def verify_pfc_enable_status_per_port(self, port, pfcwd_info):\n pfc_status = self.config_db.get_entry(PORT_QOS_MAP, port).get('pfc_enable')\n if pfc_status is None:\n log.log_warning(\"SKIPPED: PFC is not enabled on port: {}\".format(port), also_print_to_console=True)\n return\n\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, port, None\n )\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, port, pfcwd_info\n )\n\n @multi_asic_util.run_on_multi_asic\n def start_cmd(self, action, restoration_time, ports, detection_time):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n\n all_ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n if len(ports) == 0:\n ports = all_ports\n\n pfcwd_info = {\n 'detection_time': detection_time,\n }\n if action is not None:\n pfcwd_info['action'] = action\n if restoration_time is not None:\n pfcwd_info['restoration_time'] = restoration_time\n else:\n pfcwd_info['restoration_time'] = 2 * detection_time\n click.echo(\n \"restoration time not defined; default to 2 times \"\n \"detection time: {} ms\".format(2 * detection_time)\n )\n\n for port in ports:\n if port == \"all\":\n for p in all_ports:\n self.verify_pfc_enable_status_per_port(p, pfcwd_info)\n else:\n if port not in all_ports:\n continue\n self.verify_pfc_enable_status_per_port(port, pfcwd_info)\n\n @multi_asic_util.run_on_multi_asic\n def interval(self, poll_interval):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n pfcwd_info = {}\n if poll_interval is not None:\n pfcwd_table = self.config_db.get_table(CONFIG_DB_PFC_WD_TABLE_NAME)\n entry_min = 3000\n for entry in pfcwd_table:\n if(\"Ethernet\" not in entry):\n continue\n detection_time_entry_value = int(self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, entry\n ).get('detection_time'))\n restoration_time_entry_value = int(self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, entry\n ).get('restoration_time'))\n if ((detection_time_entry_value is not None) and\n (detection_time_entry_value < entry_min)\n ):\n entry_min = detection_time_entry_value\n entry_min_str = \"detection time\"\n if ((restoration_time_entry_value is not None) and\n (restoration_time_entry_value < entry_min)\n ):\n entry_min = restoration_time_entry_value\n entry_min_str = \"restoration time\"\n if entry_min < poll_interval:\n click.echo(\n \"unable to use polling interval = {}ms, value is \"\n \"bigger than one of the configured {} values, \"\n \"please choose a smaller polling_interval\".format(\n poll_interval, entry_min_str\n ), err=True\n )\n exit(1)\n\n pfcwd_info['POLL_INTERVAL'] = poll_interval\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, \"GLOBAL\", pfcwd_info\n )\n\n @multi_asic_util.run_on_multi_asic\n def stop(self, ports):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n\n all_ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n if len(ports) == 0:\n ports = all_ports\n\n for port in ports:\n if port not in all_ports:\n continue\n self.config_db.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, port, None)\n\n @multi_asic_util.run_on_multi_asic\n def start_default(self):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n enable = self.config_db.get_entry('DEVICE_METADATA', 'localhost').get(\n 'default_pfcwd_status'\n )\n\n # Get active ports from Config DB\n active_ports = natsorted(\n list(self.config_db.get_table('DEVICE_NEIGHBOR').keys())\n )\n\n if not enable or enable.lower() != \"enable\":\n return\n\n port_num = len(list(self.config_db.get_table('PORT').keys()))\n\n # Paramter values positively correlate to the number of ports.\n multiply = max(1, (port_num-1)//DEFAULT_PORT_NUM+1)\n pfcwd_info = {\n 'detection_time': DEFAULT_DETECTION_TIME * multiply,\n 'restoration_time': DEFAULT_RESTORATION_TIME * multiply,\n 'action': DEFAULT_ACTION\n }\n\n for port in active_ports:\n self.verify_pfc_enable_status_per_port(port, pfcwd_info)\n\n pfcwd_info = {}\n pfcwd_info['POLL_INTERVAL'] = DEFAULT_POLL_INTERVAL * multiply\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, \"GLOBAL\", pfcwd_info\n )\n\n @multi_asic_util.run_on_multi_asic\n def counter_poll(self, counter_poll):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n pfcwd_info = {}\n pfcwd_info['FLEX_COUNTER_STATUS'] = counter_poll\n self.config_db.mod_entry(\"FLEX_COUNTER_TABLE\", \"PFCWD\", pfcwd_info)\n\n @multi_asic_util.run_on_multi_asic\n def big_red_switch(self, big_red_switch):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n pfcwd_info = {}\n if big_red_switch is not None:\n pfcwd_info['BIG_RED_SWITCH'] = big_red_switch\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, \"GLOBAL\",\n pfcwd_info\n )\n\n\n# Show stats\nclass Show(object):\n # Show commands\n @cli.group()\n def show():\n \"\"\" Show PFC Watchdog information\"\"\"\n\n @show.command()\n @multi_asic_util.multi_asic_click_options\n @click.option('-e', '--empty', is_flag=True)\n @click.argument('queues', nargs=-1)\n @clicommon.pass_db\n def stats(db, namespace, display, empty, queues):\n \"\"\" Show PFC Watchdog stats per queue \"\"\"\n if (len(queues)):\n display = constants.DISPLAY_ALL\n PfcwdCli(db, namespace, display).show_stats(empty, queues)\n\n # Show config\n @show.command()\n @multi_asic_util.multi_asic_click_options\n @click.argument('ports', nargs=-1)\n @clicommon.pass_db\n def config(db, namespace, display, ports):\n \"\"\" Show PFC Watchdog configuration \"\"\"\n PfcwdCli(db, namespace, display).config(ports)\n\n\n# Start WD\nclass Start(object):\n @cli.command()\n @click.option(\n '--action', '-a', type=click.Choice(['drop', 'forward', 'alert'])\n )\n @click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))\n @click.argument('ports', nargs=-1)\n @click.argument('detection-time', type=click.IntRange(100, 5000))\n @clicommon.pass_db\n def start(db, action, restoration_time, ports, detection_time):\n \"\"\"\n Start PFC watchdog on port(s). To config all ports, use all as input.\n\n Example:\n\n sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400\n\n \"\"\"\n PfcwdCli(db).start(\n action, restoration_time, ports, detection_time\n )\n\n\n# Set WD poll interval\nclass Interval(object):\n @cli.command()\n @click.argument('poll_interval', type=click.IntRange(100, 3000))\n @clicommon.pass_db\n def interval(db, poll_interval):\n \"\"\" Set PFC watchdog counter polling interval \"\"\"\n PfcwdCli(db).interval(poll_interval)\n\n\n# Stop WD\nclass Stop(object):\n @cli.command()\n @click.argument('ports', nargs=-1)\n @clicommon.pass_db\n def stop(db, ports):\n \"\"\" Stop PFC watchdog on port(s) \"\"\"\n PfcwdCli(db).stop(ports)\n\n\n# Set WD default configuration on server facing ports when enable flag is on\nclass StartDefault(object):\n @cli.command(\"start_default\")\n @clicommon.pass_db\n def start_default(db):\n \"\"\" Start PFC WD by default configurations \"\"\"\n PfcwdCli(db).start_default()\n\n\n# Enable/disable PFC WD counter polling\nclass CounterPoll(object):\n @cli.command('counter_poll')\n @click.argument('counter_poll', type=click.Choice(['enable', 'disable']))\n @clicommon.pass_db\n def counter_poll(db, counter_poll):\n \"\"\" Enable/disable counter polling \"\"\"\n PfcwdCli(db).counter_poll(counter_poll)\n\n\n# Enable/disable PFC WD BIG_RED_SWITCH mode\nclass BigRedSwitch(object):\n @cli.command('big_red_switch')\n @click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))\n @clicommon.pass_db\n def big_red_switch(db, big_red_switch):\n \"\"\" Enable/disable BIG_RED_SWITCH mode \"\"\"\n PfcwdCli(db).big_red_switch(big_red_switch)\n\n\ndef get_pfcwd_clis():\n cli.add_command(BigRedSwitch().big_red_switch)\n cli.add_command(CounterPoll().counter_poll)\n cli.add_command(StartDefault().start_default)\n cli.add_command(Stop().stop)\n cli.add_command(Interval().interval)\n cli.add_command(Start().start)\n cli.add_command(Show().show)\n return cli\n\n\nif __name__ == '__main__':\n cli = get_pfcwd_clis()\n cli()\n",
"path": "pfcwd/main.py"
}
] | [
{
"content": "import importlib\nimport os\nimport sys\n\nimport click\nimport utilities_common.cli as clicommon\nfrom natsort import natsorted\nfrom sonic_py_common.multi_asic import get_external_ports\nfrom tabulate import tabulate\nfrom utilities_common import multi_asic as multi_asic_util\nfrom utilities_common import constants\nfrom sonic_py_common import logger\n\nSYSLOG_IDENTIFIER = \"config\"\n\nlog = logger.Logger(SYSLOG_IDENTIFIER)\n\n# mock the redis for unit test purposes #\ntry:\n if os.environ[\"UTILITIES_UNIT_TESTING\"] == \"2\":\n modules_path = os.path.join(os.path.dirname(__file__), \"..\")\n tests_path = os.path.join(modules_path, \"tests\")\n sys.path.insert(0, modules_path)\n sys.path.insert(0, tests_path)\n import mock_tables.dbconnector\n if os.environ[\"UTILITIES_UNIT_TESTING_TOPOLOGY\"] == \"multi_asic\":\n import mock_tables.mock_multi_asic\n importlib.reload(mock_tables.mock_multi_asic)\n mock_tables.dbconnector.load_namespace_config()\n\nexcept KeyError:\n pass\n\n# Default configuration\nDEFAULT_DETECTION_TIME = 200\nDEFAULT_RESTORATION_TIME = 200\nDEFAULT_POLL_INTERVAL = 200\nDEFAULT_PORT_NUM = 32\nDEFAULT_ACTION = 'drop'\n\nSTATS_DESCRIPTION = [\n ('STORM DETECTED/RESTORED', 'PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED', 'PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED'),\n ('TX OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS'),\n ('RX OK/DROP', 'PFC_WD_QUEUE_STATS_RX_PACKETS', 'PFC_WD_QUEUE_STATS_RX_DROPPED_PACKETS'),\n ('TX LAST OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS_LAST', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS_LAST'),\n ('RX LAST OK/DROP', 'PFC_WD_QUEUE_STATS_RX_PACKETS_LAST', 'PFC_WD_QUEUE_STATS_RX_DROPPED_PACKETS_LAST'),\n]\n\nCONFIG_DESCRIPTION = [\n ('ACTION', 'action', 'drop'),\n ('DETECTION TIME', 'detection_time', 'N/A'),\n ('RESTORATION TIME', 'restoration_time', 'infinite')\n]\n\nSTATS_HEADER = ('QUEUE', 'STATUS',) + list(zip(*STATS_DESCRIPTION))[0]\nCONFIG_HEADER = ('PORT',) + list(zip(*CONFIG_DESCRIPTION))[0]\n\nCONFIG_DB_PFC_WD_TABLE_NAME = 'PFC_WD'\nPORT_QOS_MAP = \"PORT_QOS_MAP\"\n\n# Main entrypoint\[email protected]()\ndef cli():\n \"\"\" SONiC PFC Watchdog \"\"\"\n\n\ndef get_all_queues(db, namespace=None, display=constants.DISPLAY_ALL):\n queue_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP')\n queues = list(queue_names.keys()) if queue_names else {}\n if display == constants.DISPLAY_ALL:\n return natsorted(queues)\n # filter the backend ports\n display_ports = [q.split(\":\")[0] for q in queues]\n display_ports = get_external_ports(display_ports, namespace)\n queues = [q for q in queues if q.split(\":\")[0] in display_ports]\n return natsorted(queues)\n\n\ndef get_all_ports(db, namespace=None, display=constants.DISPLAY_ALL):\n all_port_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP')\n\n # Get list of physical ports\n port_names = {}\n for i in all_port_names:\n if i.startswith('Ethernet'):\n port_names[i] = all_port_names[i]\n display_ports = list(port_names.keys())\n if display == constants.DISPLAY_EXTERNAL:\n display_ports = get_external_ports(display_ports, namespace)\n return natsorted(display_ports)\n\n\ndef get_server_facing_ports(db):\n candidates = db.get_table('DEVICE_NEIGHBOR')\n server_facing_ports = []\n for port in candidates:\n neighbor = db.get_entry(\n 'DEVICE_NEIGHBOR_METADATA', candidates[port]['name']\n )\n if neighbor and neighbor['type'].lower() == 'server':\n server_facing_ports.append(port)\n if not server_facing_ports:\n server_facing_ports = [p[1] for p in db.get_table('VLAN_MEMBER')]\n return server_facing_ports\n\n\nclass PfcwdCli(object):\n def __init__(\n self, db=None, namespace=None, display=constants.DISPLAY_ALL\n ):\n self.db = None\n self.config_db = None\n self.multi_asic = multi_asic_util.MultiAsic(\n display, namespace, db\n )\n self.table = []\n self.all_ports = []\n\n @multi_asic_util.run_on_multi_asic\n def collect_stats(self, empty, queues):\n table = []\n\n if len(queues) == 0:\n queues = get_all_queues(\n self.db,\n self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n for queue in queues:\n stats_list = []\n queue_oid = self.db.get(\n self.db.COUNTERS_DB, 'COUNTERS_QUEUE_NAME_MAP', queue\n )\n if queue_oid is None:\n continue\n stats = self.db.get_all(\n self.db.COUNTERS_DB, 'COUNTERS:' + queue_oid\n )\n if stats is None:\n continue\n for stat in STATS_DESCRIPTION:\n line = stats.get(stat[1], '0') + '/' + stats.get(stat[2], '0')\n stats_list.append(line)\n if stats_list != ['0/0'] * len(STATS_DESCRIPTION) or empty:\n table.append(\n [queue, stats.get('PFC_WD_STATUS', 'N/A')] + stats_list\n )\n\n self.table += table\n\n def show_stats(self, empty, queues):\n del self.table[:]\n self.collect_stats(empty, queues)\n click.echo(tabulate(\n self.table, STATS_HEADER, stralign='right', numalign='right',\n tablefmt='simple'\n ))\n\n @multi_asic_util.run_on_multi_asic\n def get_all_namespace_ports(self):\n ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n self.all_ports.extend(ports)\n\n def get_invalid_ports(self, ports=[]):\n if len(ports) == 0:\n return []\n self.get_all_namespace_ports()\n port_set = set(ports)\n # \"all\" is a valid option, remove before performing set diff\n port_set.discard(\"all\")\n return port_set - set(self.all_ports)\n\n @multi_asic_util.run_on_multi_asic\n def collect_config(self, ports):\n table = []\n\n if len(ports) == 0:\n ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n ports_found = False\n for port in ports:\n config_list = []\n config_entry = self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, port\n )\n if config_entry is None or config_entry == {}:\n continue\n ports_found = True\n for config in CONFIG_DESCRIPTION:\n line = config_entry.get(config[1], config[2])\n config_list.append(line)\n table.append([port] + config_list)\n\n if not ports_found:\n return\n\n poll_interval = self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, 'GLOBAL'\n ).get('POLL_INTERVAL')\n\n current_ns = self.multi_asic.current_namespace\n asic_namesapce = \\\n \"\" if current_ns is None or current_ns == \"\" else \" on {}\".format(\n current_ns\n )\n if poll_interval is not None:\n click.echo(\n \"Changed polling interval to {}ms{}\".format(\n poll_interval, asic_namesapce\n )\n )\n\n big_red_switch = self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, 'GLOBAL'\n ).get('BIG_RED_SWITCH')\n\n if big_red_switch is not None:\n click.echo(\"BIG_RED_SWITCH status is {}{}\".format(\n big_red_switch, asic_namesapce\n ))\n\n self.table += table\n\n def config(self, ports):\n del self.table[:]\n self.collect_config(ports)\n click.echo(tabulate(\n self.table, CONFIG_HEADER, stralign='right', numalign='right',\n tablefmt='simple'\n ))\n\n def start(self, action, restoration_time, ports, detection_time):\n invalid_ports = self.get_invalid_ports(ports)\n if len(invalid_ports):\n click.echo(\"Failed to run command, invalid options:\")\n for opt in invalid_ports:\n click.echo(opt)\n exit(1)\n self.start_cmd(action, restoration_time, ports, detection_time)\n\n\n def verify_pfc_enable_status_per_port(self, port, pfcwd_info):\n pfc_status = self.config_db.get_entry(PORT_QOS_MAP, port).get('pfc_enable')\n if pfc_status is None:\n log.log_warning(\"SKIPPED: PFC is not enabled on port: {}\".format(port), also_print_to_console=True)\n return\n\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, port, None\n )\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, port, pfcwd_info\n )\n\n @multi_asic_util.run_on_multi_asic\n def start_cmd(self, action, restoration_time, ports, detection_time):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n\n all_ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n if len(ports) == 0:\n ports = all_ports\n\n pfcwd_info = {\n 'detection_time': detection_time,\n }\n if action is not None:\n pfcwd_info['action'] = action\n if restoration_time is not None:\n pfcwd_info['restoration_time'] = restoration_time\n else:\n pfcwd_info['restoration_time'] = 2 * detection_time\n click.echo(\n \"restoration time not defined; default to 2 times \"\n \"detection time: {} ms\".format(2 * detection_time)\n )\n\n for port in ports:\n if port == \"all\":\n for p in all_ports:\n self.verify_pfc_enable_status_per_port(p, pfcwd_info)\n else:\n if port not in all_ports:\n continue\n self.verify_pfc_enable_status_per_port(port, pfcwd_info)\n\n @multi_asic_util.run_on_multi_asic\n def interval(self, poll_interval):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n pfcwd_info = {}\n if poll_interval is not None:\n pfcwd_table = self.config_db.get_table(CONFIG_DB_PFC_WD_TABLE_NAME)\n entry_min = 3000\n for entry in pfcwd_table:\n if(\"Ethernet\" not in entry):\n continue\n detection_time_entry_value = int(self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, entry\n ).get('detection_time'))\n restoration_time_entry_value = int(self.config_db.get_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, entry\n ).get('restoration_time'))\n if ((detection_time_entry_value is not None) and\n (detection_time_entry_value < entry_min)\n ):\n entry_min = detection_time_entry_value\n entry_min_str = \"detection time\"\n if ((restoration_time_entry_value is not None) and\n (restoration_time_entry_value < entry_min)\n ):\n entry_min = restoration_time_entry_value\n entry_min_str = \"restoration time\"\n if entry_min < poll_interval:\n click.echo(\n \"unable to use polling interval = {}ms, value is \"\n \"bigger than one of the configured {} values, \"\n \"please choose a smaller polling_interval\".format(\n poll_interval, entry_min_str\n ), err=True\n )\n exit(1)\n\n pfcwd_info['POLL_INTERVAL'] = poll_interval\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, \"GLOBAL\", pfcwd_info\n )\n\n @multi_asic_util.run_on_multi_asic\n def stop(self, ports):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n\n all_ports = get_all_ports(\n self.db, self.multi_asic.current_namespace,\n self.multi_asic.display_option\n )\n\n if len(ports) == 0:\n ports = all_ports\n\n for port in ports:\n if port not in all_ports:\n continue\n self.config_db.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, port, None)\n\n @multi_asic_util.run_on_multi_asic\n def start_default(self):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n enable = self.config_db.get_entry('DEVICE_METADATA', 'localhost').get(\n 'default_pfcwd_status'\n )\n\n # Get active ports from Config DB\n active_ports = natsorted(\n list(self.config_db.get_table('DEVICE_NEIGHBOR').keys())\n )\n\n if not enable or enable.lower() != \"enable\":\n return\n\n port_num = len(list(self.config_db.get_table('PORT').keys()))\n\n # Paramter values positively correlate to the number of ports.\n multiply = max(1, (port_num-1)//DEFAULT_PORT_NUM+1)\n pfcwd_info = {\n 'detection_time': DEFAULT_DETECTION_TIME * multiply,\n 'restoration_time': DEFAULT_RESTORATION_TIME * multiply,\n 'action': DEFAULT_ACTION\n }\n\n for port in active_ports:\n self.verify_pfc_enable_status_per_port(port, pfcwd_info)\n\n pfcwd_info = {}\n pfcwd_info['POLL_INTERVAL'] = DEFAULT_POLL_INTERVAL * multiply\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, \"GLOBAL\", pfcwd_info\n )\n\n @multi_asic_util.run_on_multi_asic\n def counter_poll(self, counter_poll):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n pfcwd_info = {}\n pfcwd_info['FLEX_COUNTER_STATUS'] = counter_poll\n self.config_db.mod_entry(\"FLEX_COUNTER_TABLE\", \"PFCWD\", pfcwd_info)\n\n @multi_asic_util.run_on_multi_asic\n def big_red_switch(self, big_red_switch):\n if os.geteuid() != 0:\n exit(\"Root privileges are required for this operation\")\n pfcwd_info = {}\n if big_red_switch is not None:\n pfcwd_info['BIG_RED_SWITCH'] = big_red_switch\n self.config_db.mod_entry(\n CONFIG_DB_PFC_WD_TABLE_NAME, \"GLOBAL\",\n pfcwd_info\n )\n\n\n# Show stats\nclass Show(object):\n # Show commands\n @cli.group()\n def show():\n \"\"\" Show PFC Watchdog information\"\"\"\n\n @show.command()\n @multi_asic_util.multi_asic_click_options\n @click.option('-e', '--empty', is_flag=True)\n @click.argument('queues', nargs=-1)\n @clicommon.pass_db\n def stats(db, namespace, display, empty, queues):\n \"\"\" Show PFC Watchdog stats per queue \"\"\"\n if (len(queues)):\n display = constants.DISPLAY_ALL\n PfcwdCli(db, namespace, display).show_stats(empty, queues)\n\n # Show config\n @show.command()\n @multi_asic_util.multi_asic_click_options\n @click.argument('ports', nargs=-1)\n @clicommon.pass_db\n def config(db, namespace, display, ports):\n \"\"\" Show PFC Watchdog configuration \"\"\"\n PfcwdCli(db, namespace, display).config(ports)\n\n\n# Start WD\nclass Start(object):\n @cli.command()\n @click.option(\n '--action', '-a', type=click.Choice(['drop', 'forward', 'alert'])\n )\n @click.option('--restoration-time', '-r', type=click.IntRange(100, 60000))\n @click.argument('ports', nargs=-1)\n @click.argument('detection-time', type=click.IntRange(100, 5000))\n @clicommon.pass_db\n def start(db, action, restoration_time, ports, detection_time):\n \"\"\"\n Start PFC watchdog on port(s). To config all ports, use all as input.\n\n Example:\n\n sudo pfcwd start --action drop ports all detection-time 400 --restoration-time 400\n\n \"\"\"\n PfcwdCli(db).start(\n action, restoration_time, ports, detection_time\n )\n\n\n# Set WD poll interval\nclass Interval(object):\n @cli.command()\n @click.argument('poll_interval', type=click.IntRange(100, 3000))\n @clicommon.pass_db\n def interval(db, poll_interval):\n \"\"\" Set PFC watchdog counter polling interval \"\"\"\n PfcwdCli(db).interval(poll_interval)\n\n\n# Stop WD\nclass Stop(object):\n @cli.command()\n @click.argument('ports', nargs=-1)\n @clicommon.pass_db\n def stop(db, ports):\n \"\"\" Stop PFC watchdog on port(s) \"\"\"\n PfcwdCli(db).stop(ports)\n\n\n# Set WD default configuration on server facing ports when enable flag is on\nclass StartDefault(object):\n @cli.command(\"start_default\")\n @clicommon.pass_db\n def start_default(db):\n \"\"\" Start PFC WD by default configurations \"\"\"\n PfcwdCli(db).start_default()\n\n\n# Enable/disable PFC WD counter polling\nclass CounterPoll(object):\n @cli.command('counter_poll')\n @click.argument('counter_poll', type=click.Choice(['enable', 'disable']))\n @clicommon.pass_db\n def counter_poll(db, counter_poll):\n \"\"\" Enable/disable counter polling \"\"\"\n PfcwdCli(db).counter_poll(counter_poll)\n\n\n# Enable/disable PFC WD BIG_RED_SWITCH mode\nclass BigRedSwitch(object):\n @cli.command('big_red_switch')\n @click.argument('big_red_switch', type=click.Choice(['enable', 'disable']))\n @clicommon.pass_db\n def big_red_switch(db, big_red_switch):\n \"\"\" Enable/disable BIG_RED_SWITCH mode \"\"\"\n PfcwdCli(db).big_red_switch(big_red_switch)\n\n\ndef get_pfcwd_clis():\n cli.add_command(BigRedSwitch().big_red_switch)\n cli.add_command(CounterPoll().counter_poll)\n cli.add_command(StartDefault().start_default)\n cli.add_command(Stop().stop)\n cli.add_command(Interval().interval)\n cli.add_command(Start().start)\n cli.add_command(Show().show)\n return cli\n\n\nif __name__ == '__main__':\n cli = get_pfcwd_clis()\n cli()\n",
"path": "pfcwd/main.py"
}
] | diff --git a/pfcwd/main.py b/pfcwd/main.py
index 1f8ec2293e..c3b92fd223 100644
--- a/pfcwd/main.py
+++ b/pfcwd/main.py
@@ -242,7 +242,7 @@ def start(self, action, restoration_time, ports, detection_time):
click.echo("Failed to run command, invalid options:")
for opt in invalid_ports:
click.echo(opt)
- exit()
+ exit(1)
self.start_cmd(action, restoration_time, ports, detection_time)
diff --git a/tests/pfcwd_test.py b/tests/pfcwd_test.py
index b0af050233..c150c0568b 100644
--- a/tests/pfcwd_test.py
+++ b/tests/pfcwd_test.py
@@ -255,7 +255,7 @@ def test_pfcwd_start_ports_invalid(self):
obj=db
)
print(result.output)
- assert result.exit_code == 0
+ assert result.exit_code == 1
assert result.output == pfcwd_show_start_config_output_fail
@classmethod
@@ -447,7 +447,7 @@ def test_pfcwd_start_ports_masic_invalid(self):
obj=db
)
print(result.output)
- assert result.exit_code == 0
+ assert result.exit_code == 1
assert result.output == show_pfc_config_start_fail
# get config after the command, config shouldn't change
@@ -500,4 +500,4 @@ def teardown_class(cls):
import mock_tables.mock_single_asic
importlib.reload(mock_tables.mock_single_asic)
import pfcwd.main
- importlib.reload(pfcwd.main)
\ No newline at end of file
+ importlib.reload(pfcwd.main)
|
mirumee__ariadne-961 | Support Starlette 0.21.0
Starlette 0.21.0 fix important issues on the BaseHttpMiddleware side.
https://github.com/encode/starlette/pull/1715
https://github.com/tiangolo/fastapi/issues/4544
| [
{
"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.16.1\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette>0.17,<0.21\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.16.1\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette>0.17,<1.0\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 485edc8b4..2ffedd603 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@
- Added `explorer` option to ASGI and WSGI `GraphQL` applications that enables API explorer customization.
- Added `ExplorerHttp405` API explorer that returns `405 Method Not Allowed` for GET HTTP requests.
- Added implementations for GraphiQL2, GraphQL-Playground and Apollo Sandbox explorers.
+- Updated `starlette` dependency in setup.py to `<1.0`.
## 0.16.1 (2022-09-26)
diff --git a/requirements-dev.in b/requirements-dev.in
index e1270b230..9152094b6 100644
--- a/requirements-dev.in
+++ b/requirements-dev.in
@@ -1,6 +1,7 @@
black
codecov
freezegun
+httpx
mypy
opentracing
pylint
diff --git a/requirements-dev.txt b/requirements-dev.txt
index e480c17ab..dec8f380d 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,9 +1,11 @@
#
-# This file is autogenerated by pip-compile with python 3.8
+# This file is autogenerated by pip-compile with python 3.10
# To update, run:
#
# pip-compile --output-file=requirements-dev.txt requirements-dev.in
#
+anyio==3.6.2
+ # via httpcore
astroid==2.11.5
# via pylint
attrs==19.3.0
@@ -13,7 +15,10 @@ black==22.8.0
build==0.8.0
# via pip-tools
certifi==2019.9.11
- # via requests
+ # via
+ # httpcore
+ # httpx
+ # requests
charset-normalizer==2.0.3
# via requests
click==8.0.3
@@ -32,8 +37,17 @@ fastdiff==0.2.0
# via snapshottest
freezegun==1.2.2
# via -r requirements-dev.in
+h11==0.12.0
+ # via httpcore
+httpcore==0.15.0
+ # via httpx
+httpx==0.23.0
+ # via -r requirements-dev.in
idna==2.8
- # via requests
+ # via
+ # anyio
+ # requests
+ # rfc3986
iniconfig==1.0.1
# via pytest
isort==4.3.21
@@ -101,6 +115,8 @@ requests==2.28.1
# via
# -r requirements-dev.in
# codecov
+rfc3986[idna2008]==1.5.0
+ # via httpx
six==1.13.0
# via
# packaging
@@ -109,10 +125,17 @@ six==1.13.0
# snapshottest
snapshottest==0.6.0
# via -r requirements-dev.in
+sniffio==1.3.0
+ # via
+ # anyio
+ # httpcore
+ # httpx
termcolor==1.1.0
# via snapshottest
toml==0.10.2
- # via coverage
+ # via
+ # coverage
+ # pep517
tomli==1.2.3
# via
# black
diff --git a/requirements.txt b/requirements.txt
index 09eb1c464..1ee342e8a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,7 +12,7 @@ idna==3.3
# via anyio
sniffio==1.2.0
# via anyio
-starlette==0.20.4
+starlette==0.21.0
# via ariadne (setup.py)
typing-extensions==4.3.0
# via ariadne (setup.py)
diff --git a/setup.py b/setup.py
index f5406b857..fac8a2a13 100755
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
include_package_data=True,
install_requires=[
"graphql-core>=3.2.0,<3.3",
- "starlette>0.17,<0.21",
+ "starlette>0.17,<1.0",
"typing_extensions>=3.6.0",
],
extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
diff --git a/tests/asgi/test_http_methods.py b/tests/asgi/test_http_methods.py
index 338dbb9d8..dc1302923 100644
--- a/tests/asgi/test_http_methods.py
+++ b/tests/asgi/test_http_methods.py
@@ -31,7 +31,7 @@ def test_put_is_not_supported(client):
def test_delete_is_not_supported(client):
- response = client.delete("/", json={})
+ response = client.delete("/")
assert response.status_code == 405
assert response.headers["Allow"] == "OPTIONS, POST, GET"
diff --git a/tests/asgi/test_query_execution.py b/tests/asgi/test_query_execution.py
index 41b285eca..7043643e0 100644
--- a/tests/asgi/test_query_execution.py
+++ b/tests/asgi/test_query_execution.py
@@ -161,7 +161,7 @@ def test_query_is_executed_for_multipart_form_request_with_file(
),
"map": json.dumps({"0": ["variables.file"]}),
},
- files={"0": ("test.txt", "hello")},
+ files={"0": ("test.txt", "hello".encode("utf-8"))},
)
assert response.status_code == 200
snapshot.assert_match(response.json())
diff --git a/tests/asgi/test_request_data_reading.py b/tests/asgi/test_request_data_reading.py
index 8729d6d47..af10b9477 100644
--- a/tests/asgi/test_request_data_reading.py
+++ b/tests/asgi/test_request_data_reading.py
@@ -4,19 +4,21 @@
def test_attempt_parse_request_missing_content_type_raises_bad_request_error(
client, snapshot
):
- response = client.post("/", data="")
+ response = client.post("/", content="")
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_attempt_parse_non_json_request_raises_bad_request_error(client, snapshot):
- response = client.post("/", data="", headers={"content-type": "text/plain"})
+ response = client.post("/", content="", headers={"content-type": "text/plain"})
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_attempt_parse_non_json_request_body_raises_bad_request_error(client, snapshot):
- response = client.post("/", data="", headers={"content-type": "application/json"})
+ response = client.post(
+ "/", content="", headers={"content-type": "application/json"}
+ )
assert response.status_code == 400
snapshot.assert_match(response.text)
@@ -44,7 +46,7 @@ def test_multipart_form_request_fails_if_operations_is_not_valid_json(client, sn
"operations": "not a valid json",
"map": json.dumps({"0": ["variables.file"]}),
},
- files={"0": ("test.txt", "hello")},
+ files={"0": ("test.txt", "hello".encode("utf-8"))},
)
assert response.status_code == 400
snapshot.assert_match(response.content)
@@ -62,7 +64,7 @@ def test_multipart_form_request_fails_if_map_is_not_valid_json(client, snapshot)
),
"map": "not a valid json",
},
- files={"0": ("test.txt", "hello")},
+ files={"0": ("test.txt", "hello".encode("utf-8"))},
)
assert response.status_code == 400
snapshot.assert_match(response.content)
|
saulpw__visidata-1721 | Bugs with save-all
I've a question about saving.
1. I load a two sheet notebook "responses-grades-cda.xlsx".
2. `g ctrl+s`
3. it gives me a blank line -- why not default to name of file without xlsx extension?
4. I type in file name without extension + `vds`: "responses-grades-cda.vds".
5. I get an error: "AttributeError: 'X1sxIndexSheet' object has no attribute 'join'"
_Originally posted by @reagle in https://github.com/saulpw/visidata/discussions/1266#discussioncomment-4870711_
| [
{
"content": "import collections\n\nfrom visidata import *\n\n\nvd.option('confirm_overwrite', True, 'whether to prompt for overwrite confirmation on save')\nvd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)\n\[email protected]\ndef safe_trdict(vs):\n 'returns string.translate dictionary for replacing tabs and newlines'\n if options.safety_first:\n delim = vs.options.delimiter\n return {\n 0: '', # strip NUL completely\n ord(delim): vs.options.tsv_safe_tab, # \\t\n 10: vs.options.tsv_safe_newline, # \\n\n 13: vs.options.tsv_safe_newline, # \\r\n }\n return {}\n\n\[email protected]\ndef iterdispvals(sheet, *cols, format=False):\n 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'\n if not cols:\n cols = sheet.visibleCols\n\n transformers = collections.OrderedDict() # list of transformers for each column in order\n for col in cols:\n transformers[col] = [ col.type ]\n if format:\n formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))\n transformers[col].append(formatMaker(col._formatdict))\n trdict = sheet.safe_trdict()\n if trdict:\n transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))\n\n options_safe_error = options.safe_error\n for r in Progress(sheet.rows):\n dispvals = collections.OrderedDict() # [col] -> value\n for col, transforms in transformers.items():\n try:\n dispval = col.getValue(r)\n\n except Exception as e:\n vd.exceptionCaught(e)\n dispval = options_safe_error or str(e)\n\n try:\n for t in transforms:\n if dispval is None:\n break\n elif isinstance(dispval, TypedExceptionWrapper):\n dispval = options_safe_error or str(dispval)\n break\n else:\n dispval = t(dispval)\n\n if dispval is None and format:\n dispval = ''\n except Exception as e:\n dispval = str(dispval)\n\n dispvals[col] = dispval\n\n yield dispvals\n\n\[email protected]\ndef itervals(sheet, *cols, format=False):\n for row in sheet.iterdispvals(*cols, format=format):\n yield [row[c] for c in cols]\n\[email protected]\ndef getDefaultSaveName(sheet):\n src = getattr(sheet, 'source', None)\n if hasattr(src, 'scheme') and src.scheme:\n return src.name + src.suffix\n if isinstance(src, Path):\n if sheet.options.is_set('save_filetype', sheet):\n # if save_filetype is over-ridden from default, use it as the extension\n return str(src.with_suffix('')) + '.' + sheet.options.save_filetype\n return str(src)\n else:\n return sheet.name+'.'+getattr(sheet, 'filetype', options.save_filetype)\n\n\[email protected]\ndef save_cols(vd, cols):\n sheet = cols[0].sheet\n vs = copy(sheet)\n vs.columns = list(cols)\n vs.rows = sheet.rows\n if len(cols) == 1:\n savedcoltxt = cols[0].name + ' column'\n else:\n savedcoltxt = '%s columns' % len(cols)\n path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())\n vd.saveSheets(path, vs, confirm_overwrite=options.confirm_overwrite)\n\n\[email protected]\ndef saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):\n 'Save all *vsheets* to *givenpath*.'\n\n filetype = givenpath.ext or options.save_filetype\n\n vd.clearCaches()\n\n savefunc = getattr(vsheets[0], 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n\n if savefunc is None:\n vd.fail(f'no function to save as {filetype}')\n\n if givenpath.exists() and confirm_overwrite:\n vd.confirm(\"%s already exists. overwrite? \" % givenpath.given)\n\n vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))\n\n if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /\n for vs in vsheets:\n vs.hasBeenModified = False\n # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)\n # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)\n if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n return vd.execAsync(savefunc, givenpath, *vsheets)\n\n # path is a dir\n\n # save as individual files in the givenpath directory\n try:\n os.makedirs(givenpath, exist_ok=True)\n except FileExistsError:\n pass\n\n if not givenpath.is_dir():\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n\n def _savefiles(vsheets, givenpath, savefunc, filetype):\n for vs in vsheets:\n p = Path((givenpath / vs.name).with_suffix('.'+filetype))\n savefunc(p, vs)\n vs.hasBeenModified = False\n return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)\n\n\[email protected]\ndef save_zip(vd, p, *vsheets):\n vd.clearCaches()\n\n import tempfile\n import zipfile\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:\n for vs in Progress(vsheets):\n filetype = vs.options.save_filetype\n tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')\n savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n savefunc(tmpp, vs)\n zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef save_txt(vd, p, *vsheets):\n with p.open_text(mode='w', encoding=vsheets[0].options.encoding) as fp:\n for vs in vsheets:\n unitsep = vs.options.delimiter\n rowsep = vs.options.row_delimiter\n for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):\n fp.write(unitsep.join(dispvals.values()))\n fp.write(rowsep)\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef rootSheet(sheet):\n r = sheet\n while isinstance(r.source, BaseSheet):\n r = r.source\n\n return r\n\nBaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath(\"save to: \", value=getDefaultSaveName()), sheet, confirm_overwrite=options.confirm_overwrite)', 'save current sheet to filename in format determined by extension (default .tsv)')\nBaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')\nBaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet(), confirm_overwrite=options.confirm_overwrite)', 'save root sheet to its source')\nBaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath(\"save all sheets to: \"), *vd.stackedSheets, confirm_overwrite=options.confirm_overwrite)', 'save all sheets to given file or directory)')\nIndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath(\"save %d sheets to: \" % nSelectedRows, value=\"_\".join(getattr(vs, \"name\", None) or \"blank\" for vs in selectedRows)), *selectedRows, confirm_overwrite=options.confirm_overwrite)', 'save all selected sheets to given file or directory')\nSheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')\nSheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')\n",
"path": "visidata/save.py"
}
] | [
{
"content": "import collections\n\nfrom visidata import *\n\n\nvd.option('confirm_overwrite', True, 'whether to prompt for overwrite confirmation on save')\nvd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)\n\[email protected]\ndef safe_trdict(vs):\n 'returns string.translate dictionary for replacing tabs and newlines'\n if options.safety_first:\n delim = vs.options.delimiter\n return {\n 0: '', # strip NUL completely\n ord(delim): vs.options.tsv_safe_tab, # \\t\n 10: vs.options.tsv_safe_newline, # \\n\n 13: vs.options.tsv_safe_newline, # \\r\n }\n return {}\n\n\[email protected]\ndef iterdispvals(sheet, *cols, format=False):\n 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'\n if not cols:\n cols = sheet.visibleCols\n\n transformers = collections.OrderedDict() # list of transformers for each column in order\n for col in cols:\n transformers[col] = [ col.type ]\n if format:\n formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))\n transformers[col].append(formatMaker(col._formatdict))\n trdict = sheet.safe_trdict()\n if trdict:\n transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))\n\n options_safe_error = options.safe_error\n for r in Progress(sheet.rows):\n dispvals = collections.OrderedDict() # [col] -> value\n for col, transforms in transformers.items():\n try:\n dispval = col.getValue(r)\n\n except Exception as e:\n vd.exceptionCaught(e)\n dispval = options_safe_error or str(e)\n\n try:\n for t in transforms:\n if dispval is None:\n break\n elif isinstance(dispval, TypedExceptionWrapper):\n dispval = options_safe_error or str(dispval)\n break\n else:\n dispval = t(dispval)\n\n if dispval is None and format:\n dispval = ''\n except Exception as e:\n dispval = str(dispval)\n\n dispvals[col] = dispval\n\n yield dispvals\n\n\[email protected]\ndef itervals(sheet, *cols, format=False):\n for row in sheet.iterdispvals(*cols, format=format):\n yield [row[c] for c in cols]\n\[email protected]\ndef getDefaultSaveName(sheet):\n src = getattr(sheet, 'source', None)\n if hasattr(src, 'scheme') and src.scheme:\n return src.name + src.suffix\n if isinstance(src, Path):\n if sheet.options.is_set('save_filetype', sheet):\n # if save_filetype is over-ridden from default, use it as the extension\n return str(src.with_suffix('')) + '.' + sheet.options.save_filetype\n return str(src)\n else:\n return sheet.name+'.'+getattr(sheet, 'filetype', options.save_filetype)\n\n\[email protected]\ndef save_cols(vd, cols):\n sheet = cols[0].sheet\n vs = copy(sheet)\n vs.columns = list(cols)\n vs.rows = sheet.rows\n if len(cols) == 1:\n savedcoltxt = cols[0].name + ' column'\n else:\n savedcoltxt = '%s columns' % len(cols)\n path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())\n vd.saveSheets(path, vs, confirm_overwrite=options.confirm_overwrite)\n\n\[email protected]\ndef saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):\n 'Save all *vsheets* to *givenpath*.'\n\n if not vsheets: # blank tuple\n vd.warning('no sheets to save')\n return\n\n filetype = givenpath.ext or options.save_filetype\n\n vd.clearCaches()\n\n savefunc = getattr(vsheets[0], 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n\n if savefunc is None:\n vd.fail(f'no function to save as {filetype}')\n\n if givenpath.exists() and confirm_overwrite:\n vd.confirm(\"%s already exists. overwrite? \" % givenpath.given)\n\n vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))\n\n if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /\n for vs in vsheets:\n vs.hasBeenModified = False\n # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)\n # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)\n if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n return vd.execAsync(savefunc, givenpath, *vsheets)\n\n # path is a dir\n\n # save as individual files in the givenpath directory\n try:\n os.makedirs(givenpath, exist_ok=True)\n except FileExistsError:\n pass\n\n if not givenpath.is_dir():\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n\n def _savefiles(vsheets, givenpath, savefunc, filetype):\n for vs in vsheets:\n p = Path((givenpath / vs.name).with_suffix('.'+filetype))\n savefunc(p, vs)\n vs.hasBeenModified = False\n return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)\n\n\[email protected]\ndef save_zip(vd, p, *vsheets):\n vd.clearCaches()\n\n import tempfile\n import zipfile\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:\n for vs in Progress(vsheets):\n filetype = vs.options.save_filetype\n tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')\n savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n savefunc(tmpp, vs)\n zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef save_txt(vd, p, *vsheets):\n with p.open_text(mode='w', encoding=vsheets[0].options.encoding) as fp:\n for vs in vsheets:\n unitsep = vs.options.delimiter\n rowsep = vs.options.row_delimiter\n for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):\n fp.write(unitsep.join(dispvals.values()))\n fp.write(rowsep)\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef rootSheet(sheet):\n r = sheet\n while isinstance(r.source, BaseSheet):\n r = r.source\n\n return r\n\nBaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath(\"save to: \", value=getDefaultSaveName()), sheet, confirm_overwrite=options.confirm_overwrite)', 'save current sheet to filename in format determined by extension (default .tsv)')\nBaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')\nBaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet(), confirm_overwrite=options.confirm_overwrite)', 'save root sheet to its source')\nBaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath(\"save all sheets to: \"), *vd.stackedSheets, confirm_overwrite=options.confirm_overwrite)', 'save all sheets to given file or directory)')\nIndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath(\"save %d sheets to: \" % nSelectedRows, value=\"_\".join(getattr(vs, \"name\", None) or \"blank\" for vs in selectedRows)), *selectedRows, confirm_overwrite=options.confirm_overwrite)', 'save all selected sheets to given file or directory')\nSheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')\nSheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')\n",
"path": "visidata/save.py"
}
] | diff --git a/visidata/save.py b/visidata/save.py
index 5c35dfaef..a56622483 100644
--- a/visidata/save.py
+++ b/visidata/save.py
@@ -104,6 +104,10 @@ def save_cols(vd, cols):
def saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):
'Save all *vsheets* to *givenpath*.'
+ if not vsheets: # blank tuple
+ vd.warning('no sheets to save')
+ return
+
filetype = givenpath.ext or options.save_filetype
vd.clearCaches()
|
qutip__qutip-494 | Qobj.expm() not functioning properly for diagonal states which have a zero element in the diagonal
Qobj.expm() is currently not working properly for diagonal states which have a zero in the diagonal (these zeros don't become ones). An example:
``` python
>>> foo = qutip.Qobj([[1.,0.],[0.,0.]])
>>> foo.expm()
Quantum object: dims = [[2], [2]], shape = [2, 2], type = oper, isherm = True
Qobj data =
[[ 2.71828183 0. ]
[ 0. 0. ]]
```
While the correct result should have been (notice the element in second row, second column):
``` python
Quantum object: dims = [[2], [2]], shape = [2, 2], type = oper, isherm = True
Qobj data =
[[ 2.71828183 0. ]
[ 0. 1. ]]
```
On analysing the source code I found the the function `sp_expm` in `sparse.py` is responsible for this computation. The particular code is reproduced below:
``` python
def sp_expm(A, sparse=False):
"""
Sparse matrix exponential.
"""
if _isdiag(A.indices, A.indptr, A.shape[0]):
A.data = np.exp(A.data)
return A
if sparse:
E = spla.expm(A.tocsc())
else:
E = spla.expm(A.toarray())
return sp.csr_matrix(E)
```
Since `A.data` is stored in sparse matrix format, only non-zero elements are exponentiated. A way to avoid this problem would be to either completely remove the case of optimizing for diagonal states or modifying the appropriate code as:
``` python
def sp_expm(A, sparse=False):
"""
Sparse matrix exponential.
"""
if _isdiag(A.indices, A.indptr, A.shape[0]):
A = sp.diags(np.exp(A.diagonal()),format='csr')
return A
if sparse:
E = spla.expm(A.tocsc())
else:
E = spla.expm(A.toarray())
return sp.csr_matrix(E)
```
I am running qutip 3.2.0.dev-cb698ef
Python 3.5.1, numpy 1.10.4, scipy 0.17.0
| [
{
"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"\nThis module contains a collection of routines for operating on sparse\nmatrices on the scipy.sparse formats, for use internally by other modules\nthroughout QuTiP.\n\"\"\"\n\n__all__ = ['sp_fro_norm', 'sp_inf_norm', 'sp_L2_norm', 'sp_max_norm',\n 'sp_one_norm', 'sp_reshape', 'sp_eigs', 'sp_expm', 'sp_permute',\n 'sp_reverse_permute', 'sp_bandwidth', 'sp_profile']\n\nimport scipy.sparse as sp\nimport scipy.sparse.linalg as spla\nimport numpy as np\nimport scipy.linalg as la\nfrom scipy.linalg.blas import get_blas_funcs\n_dznrm2 = get_blas_funcs(\"znrm2\")\nfrom qutip.cy.sparse_utils import (_sparse_profile, _sparse_permute,\n _sparse_reverse_permute, _sparse_bandwidth,\n _isdiag)\nfrom qutip.settings import debug\n\nimport qutip.logging_utils\nlogger = qutip.logging_utils.get_logger()\n\nif debug:\n import inspect\n\n\ndef sp_fro_norm(data):\n \"\"\"\n Frobius norm for sparse matrix\n \"\"\"\n out = np.sum(np.abs(data.data)**2)\n return np.sqrt(out)\n\n\ndef sp_inf_norm(data):\n \"\"\"\n Infinity norm for sparse matrix\n \"\"\"\n return np.max([np.sum(np.abs(data.getrow(k).data))\n for k in range(data.shape[0])])\n\n\ndef sp_L2_norm(data):\n \"\"\"\n L2 norm sparse vector\n \"\"\"\n if 1 not in data.shape:\n raise TypeError(\"Use L2-norm only for vectors.\")\n\n if len(data.data):\n return _dznrm2(data.data)\n else:\n return 0\n\n\ndef sp_max_norm(data):\n \"\"\"\n Max norm for sparse matrix\n \"\"\"\n return np.max(np.abs(data.data)) if any(data.data) else 0\n\n\ndef sp_one_norm(data):\n \"\"\"\n One norm for sparse matrix\n \"\"\"\n return np.max(np.array([np.sum(np.abs((data.getcol(k).data)))\n for k in range(data.shape[1])]))\n\n\ndef sp_reshape(A, shape, format='csr'):\n \"\"\"\n Reshapes a sparse matrix.\n\n Parameters\n ----------\n A : sparse_matrix\n Input matrix in any format\n shape : list/tuple\n Desired shape of new matrix\n format : string {'csr','coo','csc','lil'}\n Optional string indicating desired output format\n\n Returns\n -------\n B : csr_matrix\n Reshaped sparse matrix\n\n References\n ----------\n\n http://stackoverflow.com/questions/16511879/reshape-sparse-matrix-efficiently-python-scipy-0-12\n\n \"\"\"\n if not hasattr(shape, '__len__') or len(shape) != 2:\n raise ValueError('Shape must be a list of two integers')\n\n C = A.tocoo()\n nrows, ncols = C.shape\n size = nrows * ncols\n new_size = shape[0] * shape[1]\n\n if new_size != size:\n raise ValueError('Total size of new array must be unchanged.')\n\n flat_indices = ncols * C.row + C.col\n new_row, new_col = divmod(flat_indices, shape[1])\n B = sp.coo_matrix((C.data, (new_row, new_col)), shape=shape)\n\n if format == 'csr':\n return B.tocsr()\n elif format == 'coo':\n return B\n elif format == 'csc':\n return B.tocsc()\n elif format == 'lil':\n return B.tolil()\n else:\n raise ValueError('Return format not valid.')\n\n\ndef _dense_eigs(data, isherm, vecs, N, eigvals, num_large, num_small):\n \"\"\"\n Internal functions for computing eigenvalues and eigenstates for a dense\n matrix.\n \"\"\"\n if debug:\n logger.debug(inspect.stack()[0][3] + \": vectors = \" + str(vecs))\n\n evecs = None\n\n if vecs:\n if isherm:\n if eigvals == 0:\n evals, evecs = la.eigh(data)\n else:\n if num_small > 0:\n evals, evecs = la.eigh(\n data, eigvals=[0, num_small - 1])\n if num_large > 0:\n evals, evecs = la.eigh(\n data, eigvals=[N - num_large, N - 1])\n else:\n evals, evecs = la.eig(data)\n else:\n if isherm:\n if eigvals == 0:\n evals = la.eigvalsh(data)\n else:\n if num_small > 0:\n evals = la.eigvalsh(data, eigvals=[0, num_small - 1])\n if num_large > 0:\n evals = la.eigvalsh(data, eigvals=[N - num_large, N - 1])\n else:\n evals = la.eigvals(data)\n\n _zipped = list(zip(evals, range(len(evals))))\n _zipped.sort()\n evals, perm = list(zip(*_zipped))\n\n if vecs:\n evecs = np.array([evecs[:, k] for k in perm])\n\n if not isherm and eigvals > 0:\n if vecs:\n if num_small > 0:\n evals, evecs = evals[:num_small], evecs[:num_small]\n elif num_large > 0:\n evals, evecs = evals[(N - num_large):], evecs[(N - num_large):]\n else:\n if num_small > 0:\n evals = evals[:num_small]\n elif num_large > 0:\n evals = evals[(N - num_large):]\n\n return np.array(evals), np.array(evecs)\n\n\ndef _sp_eigs(data, isherm, vecs, N, eigvals, num_large, num_small, tol,\n maxiter):\n \"\"\"\n Internal functions for computing eigenvalues and eigenstates for a sparse\n matrix.\n \"\"\"\n if debug:\n print(inspect.stack()[0][3] + \": vectors = \" + str(vecs))\n\n big_vals = np.array([])\n small_vals = np.array([])\n evecs = None\n\n remove_one = False\n if eigvals == (N - 1):\n # calculate all eigenvalues and remove one at output if using sparse\n eigvals = 0\n num_small = int(np.ceil(N / 2.0))\n num_large = N - num_small\n remove_one = True\n\n if vecs:\n if isherm:\n if num_large > 0:\n big_vals, big_vecs = sp.linalg.eigsh(data, k=num_large,\n which='LA', tol=tol,\n maxiter=maxiter)\n big_vecs = sp.csr_matrix(big_vecs, dtype=complex)\n if num_small > 0:\n small_vals, small_vecs = sp.linalg.eigsh(\n data, k=num_small, which='SA',\n tol=tol, maxiter=maxiter)\n\n else:\n if num_large > 0:\n big_vals, big_vecs = sp.linalg.eigs(data, k=num_large,\n which='LR', tol=tol,\n maxiter=maxiter)\n big_vecs = sp.csr_matrix(big_vecs, dtype=complex)\n if num_small > 0:\n small_vals, small_vecs = sp.linalg.eigs(\n data, k=num_small, which='SR',\n tol=tol, maxiter=maxiter)\n\n if num_large != 0 and num_small != 0:\n evecs = sp.hstack([small_vecs, big_vecs], format='csr')\n elif num_large != 0 and num_small == 0:\n evecs = big_vecs\n elif num_large == 0 and num_small != 0:\n evecs = small_vecs\n else:\n if isherm:\n if num_large > 0:\n big_vals = sp.linalg.eigsh(\n data, k=num_large, which='LA',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n if num_small > 0:\n small_vals = sp.linalg.eigsh(\n data, k=num_small, which='SA',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n else:\n if num_large > 0:\n big_vals = sp.linalg.eigs(\n data, k=num_large, which='LR',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n if num_small > 0:\n small_vals = sp.linalg.eigs(\n data, k=num_small, which='SR',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n\n evals = np.hstack((small_vals, big_vals))\n if isherm:\n evals = np.real(evals)\n\n _zipped = list(zip(evals, range(len(evals))))\n _zipped.sort()\n evals, perm = list(zip(*_zipped))\n\n if vecs:\n evecs = np.array([evecs[:, k] for k in perm])\n\n # remove last element if requesting N-1 eigs and using sparse\n if remove_one:\n evals = np.delete(evals, -1)\n if vecs:\n evecs = np.delete(evecs, -1)\n\n return np.array(evals), np.array(evecs)\n\n\ndef sp_eigs(data, isherm, vecs=True, sparse=False, sort='low',\n eigvals=0, tol=0, maxiter=100000):\n \"\"\"Returns Eigenvalues and Eigenvectors for a sparse matrix.\n Uses dense eigen-solver unless user sets sparse=True.\n\n Parameters\n ----------\n data : csr_matrix\n Input matrix\n isherm : bool\n Indicate whether the matrix is hermitian or not\n vecs : bool {True , False}\n Flag for requesting eigenvectors\n sparse : bool {False , True}\n Flag to use sparse solver\n sort : str {'low' , 'high}\n Return lowest or highest eigenvals/vecs\n eigvals : int\n Number of eigenvals/vecs to return. Default = 0 (return all)\n tol : float\n Tolerance for sparse eigensolver. Default = 0 (Machine precision)\n maxiter : int\n Max. number of iterations used by sparse sigensolver.\n\n Returns\n -------\n Array of eigenvalues and (by default) array of corresponding Eigenvectors.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n if data.shape[0] != data.shape[1]:\n raise TypeError(\"Can only diagonalize square matrices\")\n\n N = data.shape[0]\n if eigvals == N:\n eigvals = 0\n\n if eigvals > N:\n raise ValueError(\"Number of requested eigen vals/vecs must be <= N.\")\n\n # set number of large and small eigenvals/vecs\n if eigvals == 0: # user wants all eigs (default)\n D = int(np.ceil(N / 2.0))\n num_large = N - D\n if not np.mod(N, 2):\n M = D\n else:\n M = D - 1\n num_small = N - M\n else: # if user wants only a few eigen vals/vecs\n if sort == 'low':\n num_small = eigvals\n num_large = 0\n elif sort == 'high':\n num_large = eigvals\n num_small = 0\n else:\n raise ValueError(\"Invalid option for 'sort'.\")\n\n # Dispatch to sparse/dense solvers\n if sparse:\n evals, evecs = _sp_eigs(data, isherm, vecs, N, eigvals, num_large,\n num_small, tol, maxiter)\n else:\n evals, evecs = _dense_eigs(data.todense(), isherm, vecs, N, eigvals,\n num_large, num_small)\n\n if sort == 'high': # flip arrays to largest values first\n if vecs:\n evecs = np.flipud(evecs)\n evals = np.flipud(evals)\n\n return (evals, evecs) if vecs else evals\n\n\ndef sp_expm(A, sparse=False):\n \"\"\"\n Sparse matrix exponential. \n \"\"\"\n if _isdiag(A.indices, A.indptr, A.shape[0]):\n A.data = np.exp(A.data)\n return A\n if sparse:\n E = spla.expm(A.tocsc())\n else:\n E = spla.expm(A.toarray())\n return sp.csr_matrix(E)\n \n\n\ndef sp_permute(A, rperm=(), cperm=(), safe=True):\n \"\"\"\n Permutes the rows and columns of a sparse CSR/CSC matrix\n according to the permutation arrays rperm and cperm, respectively.\n Here, the permutation arrays specify the new order of the rows and\n columns. i.e. [0,1,2,3,4] -> [3,0,4,1,2].\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix.\n rperm : array_like of integers\n Array of row permutations.\n cperm : array_like of integers\n Array of column permutations.\n safe : bool\n Check structure of permutation arrays.\n\n Returns\n -------\n perm_csr : csr_matrix, csc_matrix\n CSR or CSC matrix with permuted rows/columns.\n\n \"\"\"\n rperm = np.asarray(rperm, dtype=np.int32)\n cperm = np.asarray(cperm, dtype=np.int32)\n nrows = A.shape[0]\n ncols = A.shape[1]\n if len(rperm) == 0:\n rperm = np.arange(nrows, dtype=np.int32)\n if len(cperm) == 0:\n cperm = np.arange(ncols, dtype=np.int32)\n if safe:\n if len(np.setdiff1d(rperm, np.arange(nrows))) != 0:\n raise Exception('Invalid row permutation array.')\n if len(np.setdiff1d(cperm, np.arange(ncols))) != 0:\n raise Exception('Invalid column permutation array.')\n\n shp = A.shape\n kind = A.getformat()\n if kind == 'csr':\n flag = 0\n elif kind == 'csc':\n flag = 1\n else:\n raise Exception('Input must be Qobj, CSR, or CSC matrix.')\n\n data, ind, ptr = _sparse_permute(A.data, A.indices, A.indptr,\n nrows, ncols, rperm, cperm, flag)\n if kind == 'csr':\n return sp.csr_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n elif kind == 'csc':\n return sp.csc_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n\n\ndef sp_reverse_permute(A, rperm=(), cperm=(), safe=True):\n \"\"\"\n Performs a reverse permutations of the rows and columns of a sparse CSR/CSC\n matrix according to the permutation arrays rperm and cperm, respectively.\n Here, the permutation arrays specify the order of the rows and columns used\n to permute the original array.\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix.\n rperm : array_like of integers\n Array of row permutations.\n cperm : array_like of integers\n Array of column permutations.\n safe : bool\n Check structure of permutation arrays.\n\n Returns\n -------\n perm_csr : csr_matrix, csc_matrix\n CSR or CSC matrix with permuted rows/columns.\n\n \"\"\"\n rperm = np.asarray(rperm, dtype=np.int32)\n cperm = np.asarray(cperm, dtype=np.int32)\n nrows = A.shape[0]\n ncols = A.shape[1]\n if len(rperm) == 0:\n rperm = np.arange(nrows, dtype=np.int32)\n if len(cperm) == 0:\n cperm = np.arange(ncols, dtype=np.int32)\n if safe:\n if len(np.setdiff1d(rperm, np.arange(nrows))) != 0:\n raise Exception('Invalid row permutation array.')\n if len(np.setdiff1d(cperm, np.arange(ncols))) != 0:\n raise Exception('Invalid column permutation array.')\n\n shp = A.shape\n kind = A.getformat()\n if kind == 'csr':\n flag = 0\n elif kind == 'csc':\n flag = 1\n else:\n raise Exception('Input must be Qobj, CSR, or CSC matrix.')\n\n data, ind, ptr = _sparse_reverse_permute(A.data, A.indices, A.indptr,\n nrows, ncols, rperm, cperm, flag)\n\n if kind == 'csr':\n return sp.csr_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n elif kind == 'csc':\n return sp.csc_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n\n\ndef sp_bandwidth(A):\n \"\"\"\n Returns the max(mb), lower(lb), and upper(ub) bandwidths of a\n sparse CSR/CSC matrix.\n\n If the matrix is symmetric then the upper and lower bandwidths are\n identical. Diagonal matrices have a bandwidth equal to one.\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix\n\n Returns\n -------\n mb : int\n Maximum bandwidth of matrix.\n lb : int\n Lower bandwidth of matrix.\n ub : int\n Upper bandwidth of matrix.\n\n \"\"\"\n nrows = A.shape[0]\n ncols = A.shape[1]\n\n if A.getformat() == 'csr':\n return _sparse_bandwidth(A.indices, A.indptr, nrows)\n elif A.getformat() == 'csc':\n # Normal output is mb,lb,ub but since CSC\n # is transpose of CSR switch lb and ub\n mb, ub, lb = _sparse_bandwidth(A.indices, A.indptr, ncols)\n return mb, lb, ub\n else:\n raise Exception('Invalid sparse input format.')\n\n\ndef sp_profile(A):\n \"\"\"Returns the total, lower, and upper profiles of a sparse matrix.\n\n If the matrix is symmetric then the upper and lower profiles are\n identical. Diagonal matrices have zero profile.\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix\n \"\"\"\n if sp.isspmatrix_csr(A):\n up = _sparse_profile(A.indices, A.indptr, A.shape[0])\n A = A.tocsc()\n lp = _sparse_profile(A.indices, A.indptr, A.shape[0])\n\n elif sp.isspmatrix_csc(A):\n lp = _sparse_profile(A.indices, A.indptr, A.shape[0])\n A = A.tocsr()\n up = _sparse_profile(A.indices, A.indptr, A.shape[0])\n\n else:\n raise TypeError('Input sparse matrix must be in CSR or CSC format.')\n\n return up+lp, lp, up\n\n\ndef sp_isdiag(A):\n \"\"\"Determine if sparse CSR matrix is diagonal.\n \n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix\n \n Returns\n -------\n isdiag : int\n True if matix is diagonal, False otherwise.\n \n \"\"\"\n if not sp.isspmatrix_csr(A):\n raise TypeError('Input sparse matrix must be in CSR format.')\n return _isdiag(A.indices, A.indptr, A.shape[0])\n",
"path": "qutip/sparse.py"
}
] | [
{
"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"\nThis module contains a collection of routines for operating on sparse\nmatrices on the scipy.sparse formats, for use internally by other modules\nthroughout QuTiP.\n\"\"\"\n\n__all__ = ['sp_fro_norm', 'sp_inf_norm', 'sp_L2_norm', 'sp_max_norm',\n 'sp_one_norm', 'sp_reshape', 'sp_eigs', 'sp_expm', 'sp_permute',\n 'sp_reverse_permute', 'sp_bandwidth', 'sp_profile']\n\nimport scipy.sparse as sp\nimport scipy.sparse.linalg as spla\nimport numpy as np\nimport scipy.linalg as la\nfrom scipy.linalg.blas import get_blas_funcs\n_dznrm2 = get_blas_funcs(\"znrm2\")\nfrom qutip.cy.sparse_utils import (_sparse_profile, _sparse_permute,\n _sparse_reverse_permute, _sparse_bandwidth,\n _isdiag)\nfrom qutip.settings import debug\n\nimport qutip.logging_utils\nlogger = qutip.logging_utils.get_logger()\n\nif debug:\n import inspect\n\n\ndef sp_fro_norm(data):\n \"\"\"\n Frobius norm for sparse matrix\n \"\"\"\n out = np.sum(np.abs(data.data)**2)\n return np.sqrt(out)\n\n\ndef sp_inf_norm(data):\n \"\"\"\n Infinity norm for sparse matrix\n \"\"\"\n return np.max([np.sum(np.abs(data.getrow(k).data))\n for k in range(data.shape[0])])\n\n\ndef sp_L2_norm(data):\n \"\"\"\n L2 norm sparse vector\n \"\"\"\n if 1 not in data.shape:\n raise TypeError(\"Use L2-norm only for vectors.\")\n\n if len(data.data):\n return _dznrm2(data.data)\n else:\n return 0\n\n\ndef sp_max_norm(data):\n \"\"\"\n Max norm for sparse matrix\n \"\"\"\n return np.max(np.abs(data.data)) if any(data.data) else 0\n\n\ndef sp_one_norm(data):\n \"\"\"\n One norm for sparse matrix\n \"\"\"\n return np.max(np.array([np.sum(np.abs((data.getcol(k).data)))\n for k in range(data.shape[1])]))\n\n\ndef sp_reshape(A, shape, format='csr'):\n \"\"\"\n Reshapes a sparse matrix.\n\n Parameters\n ----------\n A : sparse_matrix\n Input matrix in any format\n shape : list/tuple\n Desired shape of new matrix\n format : string {'csr','coo','csc','lil'}\n Optional string indicating desired output format\n\n Returns\n -------\n B : csr_matrix\n Reshaped sparse matrix\n\n References\n ----------\n\n http://stackoverflow.com/questions/16511879/reshape-sparse-matrix-efficiently-python-scipy-0-12\n\n \"\"\"\n if not hasattr(shape, '__len__') or len(shape) != 2:\n raise ValueError('Shape must be a list of two integers')\n\n C = A.tocoo()\n nrows, ncols = C.shape\n size = nrows * ncols\n new_size = shape[0] * shape[1]\n\n if new_size != size:\n raise ValueError('Total size of new array must be unchanged.')\n\n flat_indices = ncols * C.row + C.col\n new_row, new_col = divmod(flat_indices, shape[1])\n B = sp.coo_matrix((C.data, (new_row, new_col)), shape=shape)\n\n if format == 'csr':\n return B.tocsr()\n elif format == 'coo':\n return B\n elif format == 'csc':\n return B.tocsc()\n elif format == 'lil':\n return B.tolil()\n else:\n raise ValueError('Return format not valid.')\n\n\ndef _dense_eigs(data, isherm, vecs, N, eigvals, num_large, num_small):\n \"\"\"\n Internal functions for computing eigenvalues and eigenstates for a dense\n matrix.\n \"\"\"\n if debug:\n logger.debug(inspect.stack()[0][3] + \": vectors = \" + str(vecs))\n\n evecs = None\n\n if vecs:\n if isherm:\n if eigvals == 0:\n evals, evecs = la.eigh(data)\n else:\n if num_small > 0:\n evals, evecs = la.eigh(\n data, eigvals=[0, num_small - 1])\n if num_large > 0:\n evals, evecs = la.eigh(\n data, eigvals=[N - num_large, N - 1])\n else:\n evals, evecs = la.eig(data)\n else:\n if isherm:\n if eigvals == 0:\n evals = la.eigvalsh(data)\n else:\n if num_small > 0:\n evals = la.eigvalsh(data, eigvals=[0, num_small - 1])\n if num_large > 0:\n evals = la.eigvalsh(data, eigvals=[N - num_large, N - 1])\n else:\n evals = la.eigvals(data)\n\n _zipped = list(zip(evals, range(len(evals))))\n _zipped.sort()\n evals, perm = list(zip(*_zipped))\n\n if vecs:\n evecs = np.array([evecs[:, k] for k in perm])\n\n if not isherm and eigvals > 0:\n if vecs:\n if num_small > 0:\n evals, evecs = evals[:num_small], evecs[:num_small]\n elif num_large > 0:\n evals, evecs = evals[(N - num_large):], evecs[(N - num_large):]\n else:\n if num_small > 0:\n evals = evals[:num_small]\n elif num_large > 0:\n evals = evals[(N - num_large):]\n\n return np.array(evals), np.array(evecs)\n\n\ndef _sp_eigs(data, isherm, vecs, N, eigvals, num_large, num_small, tol,\n maxiter):\n \"\"\"\n Internal functions for computing eigenvalues and eigenstates for a sparse\n matrix.\n \"\"\"\n if debug:\n print(inspect.stack()[0][3] + \": vectors = \" + str(vecs))\n\n big_vals = np.array([])\n small_vals = np.array([])\n evecs = None\n\n remove_one = False\n if eigvals == (N - 1):\n # calculate all eigenvalues and remove one at output if using sparse\n eigvals = 0\n num_small = int(np.ceil(N / 2.0))\n num_large = N - num_small\n remove_one = True\n\n if vecs:\n if isherm:\n if num_large > 0:\n big_vals, big_vecs = sp.linalg.eigsh(data, k=num_large,\n which='LA', tol=tol,\n maxiter=maxiter)\n big_vecs = sp.csr_matrix(big_vecs, dtype=complex)\n if num_small > 0:\n small_vals, small_vecs = sp.linalg.eigsh(\n data, k=num_small, which='SA',\n tol=tol, maxiter=maxiter)\n\n else:\n if num_large > 0:\n big_vals, big_vecs = sp.linalg.eigs(data, k=num_large,\n which='LR', tol=tol,\n maxiter=maxiter)\n big_vecs = sp.csr_matrix(big_vecs, dtype=complex)\n if num_small > 0:\n small_vals, small_vecs = sp.linalg.eigs(\n data, k=num_small, which='SR',\n tol=tol, maxiter=maxiter)\n\n if num_large != 0 and num_small != 0:\n evecs = sp.hstack([small_vecs, big_vecs], format='csr')\n elif num_large != 0 and num_small == 0:\n evecs = big_vecs\n elif num_large == 0 and num_small != 0:\n evecs = small_vecs\n else:\n if isherm:\n if num_large > 0:\n big_vals = sp.linalg.eigsh(\n data, k=num_large, which='LA',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n if num_small > 0:\n small_vals = sp.linalg.eigsh(\n data, k=num_small, which='SA',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n else:\n if num_large > 0:\n big_vals = sp.linalg.eigs(\n data, k=num_large, which='LR',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n if num_small > 0:\n small_vals = sp.linalg.eigs(\n data, k=num_small, which='SR',\n return_eigenvectors=False, tol=tol, maxiter=maxiter)\n\n evals = np.hstack((small_vals, big_vals))\n if isherm:\n evals = np.real(evals)\n\n _zipped = list(zip(evals, range(len(evals))))\n _zipped.sort()\n evals, perm = list(zip(*_zipped))\n\n if vecs:\n evecs = np.array([evecs[:, k] for k in perm])\n\n # remove last element if requesting N-1 eigs and using sparse\n if remove_one:\n evals = np.delete(evals, -1)\n if vecs:\n evecs = np.delete(evecs, -1)\n\n return np.array(evals), np.array(evecs)\n\n\ndef sp_eigs(data, isherm, vecs=True, sparse=False, sort='low',\n eigvals=0, tol=0, maxiter=100000):\n \"\"\"Returns Eigenvalues and Eigenvectors for a sparse matrix.\n Uses dense eigen-solver unless user sets sparse=True.\n\n Parameters\n ----------\n data : csr_matrix\n Input matrix\n isherm : bool\n Indicate whether the matrix is hermitian or not\n vecs : bool {True , False}\n Flag for requesting eigenvectors\n sparse : bool {False , True}\n Flag to use sparse solver\n sort : str {'low' , 'high}\n Return lowest or highest eigenvals/vecs\n eigvals : int\n Number of eigenvals/vecs to return. Default = 0 (return all)\n tol : float\n Tolerance for sparse eigensolver. Default = 0 (Machine precision)\n maxiter : int\n Max. number of iterations used by sparse sigensolver.\n\n Returns\n -------\n Array of eigenvalues and (by default) array of corresponding Eigenvectors.\n\n \"\"\"\n\n if debug:\n print(inspect.stack()[0][3])\n\n if data.shape[0] != data.shape[1]:\n raise TypeError(\"Can only diagonalize square matrices\")\n\n N = data.shape[0]\n if eigvals == N:\n eigvals = 0\n\n if eigvals > N:\n raise ValueError(\"Number of requested eigen vals/vecs must be <= N.\")\n\n # set number of large and small eigenvals/vecs\n if eigvals == 0: # user wants all eigs (default)\n D = int(np.ceil(N / 2.0))\n num_large = N - D\n if not np.mod(N, 2):\n M = D\n else:\n M = D - 1\n num_small = N - M\n else: # if user wants only a few eigen vals/vecs\n if sort == 'low':\n num_small = eigvals\n num_large = 0\n elif sort == 'high':\n num_large = eigvals\n num_small = 0\n else:\n raise ValueError(\"Invalid option for 'sort'.\")\n\n # Dispatch to sparse/dense solvers\n if sparse:\n evals, evecs = _sp_eigs(data, isherm, vecs, N, eigvals, num_large,\n num_small, tol, maxiter)\n else:\n evals, evecs = _dense_eigs(data.todense(), isherm, vecs, N, eigvals,\n num_large, num_small)\n\n if sort == 'high': # flip arrays to largest values first\n if vecs:\n evecs = np.flipud(evecs)\n evals = np.flipud(evals)\n\n return (evals, evecs) if vecs else evals\n\n\ndef sp_expm(A, sparse=False):\n \"\"\"\n Sparse matrix exponential. \n \"\"\"\n if _isdiag(A.indices, A.indptr, A.shape[0]):\n A = sp.diags(np.exp(A.diagonal()), shape=A.shape, \n format='csr', dtype=complex)\n return A\n if sparse:\n E = spla.expm(A.tocsc())\n else:\n E = spla.expm(A.toarray())\n return sp.csr_matrix(E)\n \n\n\ndef sp_permute(A, rperm=(), cperm=(), safe=True):\n \"\"\"\n Permutes the rows and columns of a sparse CSR/CSC matrix\n according to the permutation arrays rperm and cperm, respectively.\n Here, the permutation arrays specify the new order of the rows and\n columns. i.e. [0,1,2,3,4] -> [3,0,4,1,2].\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix.\n rperm : array_like of integers\n Array of row permutations.\n cperm : array_like of integers\n Array of column permutations.\n safe : bool\n Check structure of permutation arrays.\n\n Returns\n -------\n perm_csr : csr_matrix, csc_matrix\n CSR or CSC matrix with permuted rows/columns.\n\n \"\"\"\n rperm = np.asarray(rperm, dtype=np.int32)\n cperm = np.asarray(cperm, dtype=np.int32)\n nrows = A.shape[0]\n ncols = A.shape[1]\n if len(rperm) == 0:\n rperm = np.arange(nrows, dtype=np.int32)\n if len(cperm) == 0:\n cperm = np.arange(ncols, dtype=np.int32)\n if safe:\n if len(np.setdiff1d(rperm, np.arange(nrows))) != 0:\n raise Exception('Invalid row permutation array.')\n if len(np.setdiff1d(cperm, np.arange(ncols))) != 0:\n raise Exception('Invalid column permutation array.')\n\n shp = A.shape\n kind = A.getformat()\n if kind == 'csr':\n flag = 0\n elif kind == 'csc':\n flag = 1\n else:\n raise Exception('Input must be Qobj, CSR, or CSC matrix.')\n\n data, ind, ptr = _sparse_permute(A.data, A.indices, A.indptr,\n nrows, ncols, rperm, cperm, flag)\n if kind == 'csr':\n return sp.csr_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n elif kind == 'csc':\n return sp.csc_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n\n\ndef sp_reverse_permute(A, rperm=(), cperm=(), safe=True):\n \"\"\"\n Performs a reverse permutations of the rows and columns of a sparse CSR/CSC\n matrix according to the permutation arrays rperm and cperm, respectively.\n Here, the permutation arrays specify the order of the rows and columns used\n to permute the original array.\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix.\n rperm : array_like of integers\n Array of row permutations.\n cperm : array_like of integers\n Array of column permutations.\n safe : bool\n Check structure of permutation arrays.\n\n Returns\n -------\n perm_csr : csr_matrix, csc_matrix\n CSR or CSC matrix with permuted rows/columns.\n\n \"\"\"\n rperm = np.asarray(rperm, dtype=np.int32)\n cperm = np.asarray(cperm, dtype=np.int32)\n nrows = A.shape[0]\n ncols = A.shape[1]\n if len(rperm) == 0:\n rperm = np.arange(nrows, dtype=np.int32)\n if len(cperm) == 0:\n cperm = np.arange(ncols, dtype=np.int32)\n if safe:\n if len(np.setdiff1d(rperm, np.arange(nrows))) != 0:\n raise Exception('Invalid row permutation array.')\n if len(np.setdiff1d(cperm, np.arange(ncols))) != 0:\n raise Exception('Invalid column permutation array.')\n\n shp = A.shape\n kind = A.getformat()\n if kind == 'csr':\n flag = 0\n elif kind == 'csc':\n flag = 1\n else:\n raise Exception('Input must be Qobj, CSR, or CSC matrix.')\n\n data, ind, ptr = _sparse_reverse_permute(A.data, A.indices, A.indptr,\n nrows, ncols, rperm, cperm, flag)\n\n if kind == 'csr':\n return sp.csr_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n elif kind == 'csc':\n return sp.csc_matrix((data, ind, ptr), shape=shp, dtype=data.dtype)\n\n\ndef sp_bandwidth(A):\n \"\"\"\n Returns the max(mb), lower(lb), and upper(ub) bandwidths of a\n sparse CSR/CSC matrix.\n\n If the matrix is symmetric then the upper and lower bandwidths are\n identical. Diagonal matrices have a bandwidth equal to one.\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix\n\n Returns\n -------\n mb : int\n Maximum bandwidth of matrix.\n lb : int\n Lower bandwidth of matrix.\n ub : int\n Upper bandwidth of matrix.\n\n \"\"\"\n nrows = A.shape[0]\n ncols = A.shape[1]\n\n if A.getformat() == 'csr':\n return _sparse_bandwidth(A.indices, A.indptr, nrows)\n elif A.getformat() == 'csc':\n # Normal output is mb,lb,ub but since CSC\n # is transpose of CSR switch lb and ub\n mb, ub, lb = _sparse_bandwidth(A.indices, A.indptr, ncols)\n return mb, lb, ub\n else:\n raise Exception('Invalid sparse input format.')\n\n\ndef sp_profile(A):\n \"\"\"Returns the total, lower, and upper profiles of a sparse matrix.\n\n If the matrix is symmetric then the upper and lower profiles are\n identical. Diagonal matrices have zero profile.\n\n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix\n \"\"\"\n if sp.isspmatrix_csr(A):\n up = _sparse_profile(A.indices, A.indptr, A.shape[0])\n A = A.tocsc()\n lp = _sparse_profile(A.indices, A.indptr, A.shape[0])\n\n elif sp.isspmatrix_csc(A):\n lp = _sparse_profile(A.indices, A.indptr, A.shape[0])\n A = A.tocsr()\n up = _sparse_profile(A.indices, A.indptr, A.shape[0])\n\n else:\n raise TypeError('Input sparse matrix must be in CSR or CSC format.')\n\n return up+lp, lp, up\n\n\ndef sp_isdiag(A):\n \"\"\"Determine if sparse CSR matrix is diagonal.\n \n Parameters\n ----------\n A : csr_matrix, csc_matrix\n Input matrix\n \n Returns\n -------\n isdiag : int\n True if matix is diagonal, False otherwise.\n \n \"\"\"\n if not sp.isspmatrix_csr(A):\n raise TypeError('Input sparse matrix must be in CSR format.')\n return _isdiag(A.indices, A.indptr, A.shape[0])\n",
"path": "qutip/sparse.py"
}
] | diff --git a/qutip/sparse.py b/qutip/sparse.py
index 950071b7f2..8991599ef7 100644
--- a/qutip/sparse.py
+++ b/qutip/sparse.py
@@ -383,7 +383,8 @@ def sp_expm(A, sparse=False):
Sparse matrix exponential.
"""
if _isdiag(A.indices, A.indptr, A.shape[0]):
- A.data = np.exp(A.data)
+ A = sp.diags(np.exp(A.diagonal()), shape=A.shape,
+ format='csr', dtype=complex)
return A
if sparse:
E = spla.expm(A.tocsc())
diff --git a/qutip/tests/test_qobj.py b/qutip/tests/test_qobj.py
index 6b92944c69..ab2d5376aa 100644
--- a/qutip/tests/test_qobj.py
+++ b/qutip/tests/test_qobj.py
@@ -40,7 +40,7 @@
from qutip.random_objects import (rand_ket, rand_dm, rand_herm, rand_unitary,
rand_super, rand_super_bcsz, rand_dm_ginibre)
from qutip.states import basis, fock_dm, ket2dm
-from qutip.operators import create, destroy, num, sigmax, sigmay
+from qutip.operators import create, destroy, num, sigmax, sigmay, qeye
from qutip.superoperator import spre, spost, operator_to_vector
from qutip.superop_reps import to_super, to_choi, to_chi
from qutip.tensor import tensor, super_tensor, composite
@@ -512,6 +512,13 @@ def test_QobjExpmExplicitlySparse():
assert_((B.data.todense() - np.matrix(la.expm(data)) < 1e-10).all())
+def test_QobjExpmZeroOper():
+ "Qobj expm zero_oper (#493)"
+ A = Qobj(np.zeros((5,5), dtype=complex))
+ B = A.expm()
+ assert_(B == qeye(5))
+
+
def test_Qobj_sqrtm():
"Qobj sqrtm"
data = np.random.random(
|
spack__spack-36099 | LLNL Cardioid homepage no longer exists
https://github.com/spack/spack/blob/fe5865da0d4ee4480a85ee1257cb310c036b0c88/var/spack/repos/builtin/packages/cardioid/package.py#L12
@rblake-llnl it looks like Cardioid's [home page](https://baasic.llnl.gov/comp-bio/cardioid-code.php) redirects to a 404 these days. You're listed as the maintainer. Has the home of cardioid moved?
| [
{
"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Cardioid(CMakePackage):\n \"\"\"Cardiac simulation suite.\"\"\"\n\n homepage = \"https://baasic.llnl.gov/comp-bio/cardioid-code.php\"\n git = \"https://github.com/LLNL/cardioid.git\"\n maintainers(\"rblake-llnl\")\n\n version(\"develop\", branch=\"master\")\n version(\"elecfem\", branch=\"elec-fem\")\n\n variant(\"cuda\", default=False, description=\"Build with cuda support\")\n variant(\"mfem\", default=False, description=\"Build with mfem support\")\n\n depends_on(\"blas\")\n depends_on(\"lapack\")\n depends_on(\"mpi\")\n depends_on(\"cuda\", when=\"+cuda\")\n depends_on(\"mfem+mpi+superlu-dist+lapack\", when=\"+mfem\")\n depends_on(\"hypre+cuda\", when=\"+mfem+cuda\")\n depends_on(\"[email protected]:\", type=\"build\")\n depends_on(\"perl\", type=\"build\")\n\n def cmake_args(self):\n spec = self.spec\n args = [\n \"-DLAPACK_LIB:PATH=\" + \";\".join(spec[\"lapack\"].libs.libraries),\n \"-DBLAS_LIB:PATH=\" + \";\".join(spec[\"blas\"].libs.libraries),\n \"-DENABLE_OPENMP:BOOL=ON\",\n \"-DENABLE_MPI:BOOL=ON\",\n \"-DENABLE_FIND_MPI:BOOL=OFF\",\n \"-DMPI_C_COMPILER:STRING=\" + spec[\"mpi\"].mpicc,\n \"-DMPI_CXX_COMPILER:STRING=\" + spec[\"mpi\"].mpicxx,\n \"-DCMAKE_C_COMPILER:STRING=\" + spec[\"mpi\"].mpicc,\n \"-DCMAKE_CXX_COMPILER:STRING=\" + spec[\"mpi\"].mpicxx,\n ]\n\n if \"+cuda\" in self.spec:\n args.append(\"-DENABLE_CUDA:BOOL=ON\")\n args.append(\"-DCUDA_TOOLKIT_ROOT:PATH=\" + spec[\"cuda\"].prefix)\n else:\n args.append(\"-DENABLE_CUDA:BOOL=OFF\")\n\n if \"+mfem\" in self.spec:\n args.append(\"-DMFEM_DIR:PATH=\" + spec[\"mfem\"].prefix)\n return args\n",
"path": "var/spack/repos/builtin/packages/cardioid/package.py"
}
] | [
{
"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Cardioid(CMakePackage):\n \"\"\"Cardiac simulation suite.\"\"\"\n\n homepage = \"https://baasic.llnl.gov/comp-bio/cardioid-code\"\n git = \"https://github.com/LLNL/cardioid.git\"\n maintainers(\"rblake-llnl\")\n\n version(\"develop\", branch=\"master\")\n version(\"elecfem\", branch=\"elec-fem\")\n\n variant(\"cuda\", default=False, description=\"Build with cuda support\")\n variant(\"mfem\", default=False, description=\"Build with mfem support\")\n\n depends_on(\"blas\")\n depends_on(\"lapack\")\n depends_on(\"mpi\")\n depends_on(\"cuda\", when=\"+cuda\")\n depends_on(\"mfem+mpi+superlu-dist+lapack\", when=\"+mfem\")\n depends_on(\"hypre+cuda\", when=\"+mfem+cuda\")\n depends_on(\"[email protected]:\", type=\"build\")\n depends_on(\"perl\", type=\"build\")\n\n def cmake_args(self):\n spec = self.spec\n args = [\n \"-DLAPACK_LIB:PATH=\" + \";\".join(spec[\"lapack\"].libs.libraries),\n \"-DBLAS_LIB:PATH=\" + \";\".join(spec[\"blas\"].libs.libraries),\n \"-DENABLE_OPENMP:BOOL=ON\",\n \"-DENABLE_MPI:BOOL=ON\",\n \"-DENABLE_FIND_MPI:BOOL=OFF\",\n \"-DMPI_C_COMPILER:STRING=\" + spec[\"mpi\"].mpicc,\n \"-DMPI_CXX_COMPILER:STRING=\" + spec[\"mpi\"].mpicxx,\n \"-DCMAKE_C_COMPILER:STRING=\" + spec[\"mpi\"].mpicc,\n \"-DCMAKE_CXX_COMPILER:STRING=\" + spec[\"mpi\"].mpicxx,\n ]\n\n if \"+cuda\" in self.spec:\n args.append(\"-DENABLE_CUDA:BOOL=ON\")\n args.append(\"-DCUDA_TOOLKIT_ROOT:PATH=\" + spec[\"cuda\"].prefix)\n else:\n args.append(\"-DENABLE_CUDA:BOOL=OFF\")\n\n if \"+mfem\" in self.spec:\n args.append(\"-DMFEM_DIR:PATH=\" + spec[\"mfem\"].prefix)\n return args\n",
"path": "var/spack/repos/builtin/packages/cardioid/package.py"
}
] | diff --git a/var/spack/repos/builtin/packages/cardioid/package.py b/var/spack/repos/builtin/packages/cardioid/package.py
index 54e1c99ac07adc..1cff449740f670 100644
--- a/var/spack/repos/builtin/packages/cardioid/package.py
+++ b/var/spack/repos/builtin/packages/cardioid/package.py
@@ -9,7 +9,7 @@
class Cardioid(CMakePackage):
"""Cardiac simulation suite."""
- homepage = "https://baasic.llnl.gov/comp-bio/cardioid-code.php"
+ homepage = "https://baasic.llnl.gov/comp-bio/cardioid-code"
git = "https://github.com/LLNL/cardioid.git"
maintainers("rblake-llnl")
|
googleapis__python-spanner-django-364 | django: Case(When(..., then=binary)) crashes
e.g.
```python
CaseTestModel.objects.update(
binary=Case(
When(integer=1, then=Value(b'one', output_field=models.BinaryField())),
When(integer=2, then=Value(b'two', output_field=models.BinaryField())),
default=Value(b'', output_field=models.BinaryField()),
),
)
```
```shell
======================================================================
ERROR: test_update_binary (expressions_case.tests.CaseExpressionTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable
return callable_(*args, **kwargs)
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/grpc/_channel.py", line 690, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/grpc/_channel.py", line 592, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.INVALID_ARGUMENT
details = "Value of type INT64 cannot be assigned to binary, which has type BYTES [at 1:52]\nUPDATE expressions_case_casetestmodel SET binary = CASE WHEN (expressions_cas...\n ^"
debug_error_string = "{"created":"@1577632634.714942491","description":"Error received from peer ipv4:172.217.6.202:443","file":"src/core/lib/surface/call.cc","file_line":1055,"grpc_message":"Value of type INT64 cannot be assigned to binary, which has type BYTES [at 1:52]\nUPDATE expressions_case_casetestmodel SET binary = CASE WHEN (expressions_cas...\n ^","grpc_status":3}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/tim/code/spanner-orm/spanner/dbapi/cursor.py", line 95, in execute
self.__handle_update(sql, args or None)
File "/home/tim/code/spanner-orm/spanner/dbapi/cursor.py", line 107, in __handle_update
sql, params,
File "/home/tim/code/spanner-orm/spanner/dbapi/connection.py", line 91, in in_transaction
return self.__dbhandle.run_in_transaction(fn, *args, **kwargs)
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/cloud/spanner_v1/database.py", line 443, in run_in_transaction
return session.run_in_transaction(func, *args, **kw)
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/cloud/spanner_v1/session.py", line 299, in run_in_transaction
return_value = func(txn, *args, **kw)
File "/home/tim/code/spanner-orm/spanner/dbapi/cursor.py", line 114, in __do_execute_update
res = transaction.execute_update(sql, params=params, param_types=get_param_types(params))
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/cloud/spanner_v1/transaction.py", line 202, in execute_update
metadata=metadata,
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/cloud/spanner_v1/gapic/spanner_client.py", line 812, in execute_sql
request, retry=retry, timeout=timeout, metadata=metadata
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 143, in __call__
return wrapped_func(*args, **kwargs)
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/api_core/retry.py", line 277, in retry_wrapped_func
on_error=on_error,
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/api_core/retry.py", line 182, in retry_target
return target()
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/api_core/timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "/home/tim/.virtualenvs/django37/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.InvalidArgument: 400 Value of type INT64 cannot be assigned to binary, which has type BYTES [at 1:52]\nUPDATE expressions_case_casetestmodel SET binary = CASE WHEN (expressions_cas...\n ^
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/tim/code/django/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/home/tim/code/spanner-orm/spanner/dbapi/cursor.py", line 99, in execute
raise ProgrammingError(e.details if hasattr(e, 'details') else e)
spanner.dbapi.exceptions.ProgrammingError: 400 Value of type INT64 cannot be assigned to binary, which has type BYTES [at 1:52]\nUPDATE expressions_case_casetestmodel SET binary = CASE WHEN (expressions_cas...\n ^
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/tim/code/django/tests/expressions_case/tests.py", line 636, in test_update_binary
default=Value(b'', output_field=models.BinaryField()),
File "/home/tim/code/django/django/db/models/manager.py", line 82, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/home/tim/code/django/django/db/models/query.py", line 741, in update
rows = query.get_compiler(self.db).execute_sql(CURSOR)
File "/home/tim/code/django/django/db/models/sql/compiler.py", line 1462, in execute_sql
cursor = super().execute_sql(result_type)
File "/home/tim/code/django/django/db/models/sql/compiler.py", line 1133, in execute_sql
cursor.execute(sql, params)
File "/home/tim/code/django/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/home/tim/code/django/django/db/backends/utils.py", line 76, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/home/tim/code/django/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/home/tim/code/django/django/db/utils.py", line 89, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/home/tim/code/django/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/home/tim/code/spanner-orm/spanner/dbapi/cursor.py", line 99, in execute
raise ProgrammingError(e.details if hasattr(e, 'details') else e)
django.db.utils.ProgrammingError: 400 Value of type INT64 cannot be assigned to binary, which has type BYTES [at 1:52]\nUPDATE expressions_case_casetestmodel SET binary = CASE WHEN (expressions_cas...\n
```
| [
{
"content": "# Copyright 2020 Google LLC\n#\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file or at\n# https://developers.google.com/open-source/licenses/bsd\n\nimport datetime\nimport decimal\nimport re\nfrom functools import reduce\n\nimport sqlparse\nfrom google.cloud import spanner_v1 as spanner\n\nfrom .exceptions import Error, ProgrammingError\nfrom .parser import parse_values\nfrom .types import DateStr, TimestampStr\n\nSTMT_DDL = 'DDL'\nSTMT_NON_UPDATING = 'NON_UPDATING'\nSTMT_UPDATING = 'UPDATING'\nSTMT_INSERT = 'INSERT'\n\n# Heuristic for identifying statements that don't need to be run as updates.\nre_NON_UPDATE = re.compile(r'^\\s*(SELECT)', re.IGNORECASE)\n\nre_WITH = re.compile(r'^\\s*(WITH)', re.IGNORECASE)\n\n# DDL statements follow https://cloud.google.com/spanner/docs/data-definition-language\nre_DDL = re.compile(r'^\\s*(CREATE|ALTER|DROP)', re.IGNORECASE | re.DOTALL)\n\nre_IS_INSERT = re.compile(r'^\\s*(INSERT)', re.IGNORECASE | re.DOTALL)\n\n\ndef classify_stmt(sql):\n if re_DDL.match(sql):\n return STMT_DDL\n elif re_IS_INSERT.match(sql):\n return STMT_INSERT\n elif re_NON_UPDATE.match(sql):\n return STMT_NON_UPDATING\n elif re_WITH.match(sql):\n # As of Fri-13th-March-2020, Cloud Spanner only supports WITH for DQL\n # statements and doesn't yet support WITH for DML statements.\n # When WITH for DML is added, we'll need to update this classifier accordingly.\n return STMT_NON_UPDATING\n else:\n return STMT_UPDATING\n\n\nre_INSERT = re.compile(\n # Only match the `INSERT INTO <table_name> (columns...)\n # otherwise the rest of the statement could be a complex\n # operation.\n r'^\\s*INSERT INTO (?P<table_name>[^\\s\\(\\)]+)\\s*\\((?P<columns>[^\\(\\)]+)\\)',\n re.IGNORECASE | re.DOTALL,\n)\n\nre_VALUES_TILL_END = re.compile(\n r'VALUES\\s*\\(.+$',\n re.IGNORECASE | re.DOTALL,\n)\n\nre_VALUES_PYFORMAT = re.compile(\n # To match: (%s, %s,....%s)\n r'(\\(\\s*%s[^\\(\\)]+\\))',\n re.DOTALL,\n)\n\n\ndef strip_backticks(name):\n \"\"\"\n Strip backticks off of quoted names For example, '`no`' (a Spanner reserved\n word) becomes 'no'.\n \"\"\"\n has_quotes = name.startswith('`') and name.endswith('`')\n return name[1:-1] if has_quotes else name\n\n\ndef parse_insert(insert_sql, params):\n \"\"\"\n Parse an INSERT statement an generate a list of tuples of the form:\n [\n (SQL, params_per_row1),\n (SQL, params_per_row2),\n (SQL, params_per_row3),\n ...\n ]\n\n There are 4 variants of an INSERT statement:\n a) INSERT INTO <table> (columns...) VALUES (<inlined values>): no params\n b) INSERT INTO <table> (columns...) SELECT_STMT: no params\n c) INSERT INTO <table> (columns...) VALUES (%s,...): with params\n d) INSERT INTO <table> (columns...) VALUES (%s,..<EXPR>...) with params and expressions\n\n Thus given each of the forms, it will produce a dictionary describing\n how to upload the contents to Cloud Spanner:\n Case a)\n SQL: INSERT INTO T (f1, f2) VALUES (1, 2)\n it produces:\n {\n 'sql_params_list': [\n ('INSERT INTO T (f1, f2) VALUES (1, 2)', None),\n ],\n }\n\n Case b)\n SQL: 'INSERT INTO T (s, c) SELECT st, zc FROM cus ORDER BY fn, ln',\n it produces:\n {\n 'sql_params_list': [\n ('INSERT INTO T (s, c) SELECT st, zc FROM cus ORDER BY fn, ln', None),\n ]\n }\n\n Case c)\n SQL: INSERT INTO T (f1, f2) VALUES (%s, %s), (%s, %s)\n Params: ['a', 'b', 'c', 'd']\n it produces:\n {\n 'homogenous': True,\n 'table': 'T',\n 'columns': ['f1', 'f2'],\n 'values': [('a', 'b',), ('c', 'd',)],\n }\n\n Case d)\n SQL: INSERT INTO T (f1, f2) VALUES (%s, LOWER(%s)), (UPPER(%s), %s)\n Params: ['a', 'b', 'c', 'd']\n it produces:\n {\n 'sql_params_list': [\n ('INSERT INTO T (f1, f2) VALUES (%s, LOWER(%s))', ('a', 'b',))\n ('INSERT INTO T (f1, f2) VALUES (UPPER(%s), %s)', ('c', 'd',))\n ],\n }\n \"\"\"\n match = re_INSERT.search(insert_sql)\n\n if not match:\n raise ProgrammingError('Could not parse an INSERT statement from %s' % insert_sql)\n\n after_values_sql = re_VALUES_TILL_END.findall(insert_sql)\n if not after_values_sql:\n # Case b)\n return {\n 'sql_params_list': [(insert_sql, None,)],\n }\n\n if not params:\n # Case a) perhaps?\n # Check if any %s exists.\n pyformat_str_count = after_values_sql.count('%s')\n if pyformat_str_count > 0:\n raise ProgrammingError('no params yet there are %d \"%s\" tokens' % pyformat_str_count)\n\n # Confirmed case of:\n # SQL: INSERT INTO T (a1, a2) VALUES (1, 2)\n # Params: None\n return {\n 'sql_params_list': [(insert_sql, None,)],\n }\n\n values_str = after_values_sql[0]\n _, values = parse_values(values_str)\n\n if values.homogenous():\n # Case c)\n\n columns = [\n strip_backticks(mi.strip())\n for mi in match.group('columns').split(',')\n ]\n sql_params_list = []\n insert_sql_preamble = 'INSERT INTO %s (%s) VALUES %s' % (\n match.group('table_name'), match.group('columns'), values.argv[0],\n )\n values_pyformat = [str(arg) for arg in values.argv]\n rows_list = rows_for_insert_or_update(columns, params, values_pyformat)\n for row in rows_list:\n sql_params_list.append((insert_sql_preamble, row,))\n\n return {\n 'sql_params_list': sql_params_list,\n }\n\n # Case d)\n # insert_sql is of the form:\n # INSERT INTO T(c1, c2) VALUES (%s, %s), (%s, LOWER(%s))\n\n # Sanity check:\n # length(all_args) == len(params)\n args_len = reduce(lambda a, b: a+b, [len(arg) for arg in values.argv])\n if args_len != len(params):\n raise ProgrammingError('Invalid length: VALUES(...) len: %d != len(params): %d' % (\n args_len, len(params)),\n )\n\n trim_index = insert_sql.find(values_str)\n before_values_sql = insert_sql[:trim_index]\n\n sql_param_tuples = []\n for token_arg in values.argv:\n row_sql = before_values_sql + ' VALUES%s' % token_arg\n row_params, params = tuple(params[0:len(token_arg)]), params[len(token_arg):]\n sql_param_tuples.append((row_sql, row_params,))\n\n return {\n 'sql_params_list': sql_param_tuples,\n }\n\n\ndef rows_for_insert_or_update(columns, params, pyformat_args=None):\n \"\"\"\n Create a tupled list of params to be used as a single value per\n value that inserted from a statement such as\n SQL: 'INSERT INTO t (f1, f2, f3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)'\n Params A: [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n Params B: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n We'll have to convert both params types into:\n Params: [(1, 2, 3,), (4, 5, 6,), (7, 8, 9,)]\n \"\"\"\n\n if not pyformat_args:\n # This is the case where we have for example:\n # SQL: 'INSERT INTO t (f1, f2, f3)'\n # Params A: [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n # Params B: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n #\n # We'll have to convert both params types into:\n # [(1, 2, 3,), (4, 5, 6,), (7, 8, 9,)]\n contains_all_list_or_tuples = True\n for param in params:\n if not (isinstance(param, list) or isinstance(param, tuple)):\n contains_all_list_or_tuples = False\n break\n\n if contains_all_list_or_tuples:\n # The case with Params A: [(1, 2, 3), (4, 5, 6)]\n # Ensure that each param's length == len(columns)\n columns_len = len(columns)\n for param in params:\n if columns_len != len(param):\n raise Error('\\nlen(`%s`)=%d\\n!=\\ncolum_len(`%s`)=%d' % (\n param, len(param), columns, columns_len))\n return params\n else:\n # The case with Params B: [1, 2, 3]\n # Insert statements' params are only passed as tuples or lists,\n # yet for do_execute_update, we've got to pass in list of list.\n # https://googleapis.dev/python/spanner/latest/transaction-api.html\\\n # #google.cloud.spanner_v1.transaction.Transaction.insert\n n_stride = len(columns)\n else:\n # This is the case where we have for example:\n # SQL: 'INSERT INTO t (f1, f2, f3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)'\n # Params: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # which should become\n # Columns: (f1, f2, f3)\n # new_params: [(1, 2, 3,), (4, 5, 6,), (7, 8, 9,)]\n\n # Sanity check 1: all the pyformat_values should have the exact same length.\n first, rest = pyformat_args[0], pyformat_args[1:]\n n_stride = first.count('%s')\n for pyfmt_value in rest:\n n = pyfmt_value.count('%s')\n if n_stride != n:\n raise Error('\\nlen(`%s`)=%d\\n!=\\nlen(`%s`)=%d' % (\n first, n_stride, pyfmt_value, n))\n\n # Sanity check 2: len(params) MUST be a multiple of n_stride aka len(count of %s).\n # so that we can properly group for example:\n # Given pyformat args:\n # (%s, %s, %s)\n # Params:\n # [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # into\n # [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n if (len(params) % n_stride) != 0:\n raise ProgrammingError('Invalid length: len(params)=%d MUST be a multiple of len(pyformat_args)=%d' % (\n len(params), n_stride),\n )\n\n # Now chop up the strides.\n strides = []\n for step in range(0, len(params), n_stride):\n stride = tuple(params[step:step+n_stride:])\n strides.append(stride)\n\n return strides\n\n\nre_PYFORMAT = re.compile(r'(%s|%\\([^\\(\\)]+\\)s)+', re.DOTALL)\n\n\ndef sql_pyformat_args_to_spanner(sql, params):\n \"\"\"\n Transform pyformat set SQL to named arguments for Cloud Spanner.\n For example:\n SQL: 'SELECT * from t where f1=%s, f2=%s, f3=%s'\n Params: ('a', 23, '888***')\n becomes:\n SQL: 'SELECT * from t where f1=@a0, f2=@a1, f3=@a2'\n Params: {'a0': 'a', 'a1': 23, 'a2': '888***'}\n\n OR\n SQL: 'SELECT * from t where f1=%(f1)s, f2=%(f2)s, f3=%(f3)s'\n Params: {'f1': 'a', 'f2': 23, 'f3': '888***', 'extra': 'aye')\n becomes:\n SQL: 'SELECT * from t where f1=@a0, f2=@a1, f3=@a2'\n Params: {'a0': 'a', 'a1': 23, 'a2': '888***'}\n \"\"\"\n if not params:\n return sql, params\n\n found_pyformat_placeholders = re_PYFORMAT.findall(sql)\n params_is_dict = isinstance(params, dict)\n\n if params_is_dict:\n if not found_pyformat_placeholders:\n return sql, params\n else:\n n_params = len(params) if params else 0\n n_matches = len(found_pyformat_placeholders)\n if n_matches != n_params:\n raise Error(\n 'pyformat_args mismatch\\ngot %d args from %s\\n'\n 'want %d args in %s' % (n_matches, found_pyformat_placeholders, n_params, params))\n\n if len(params) == 0:\n return sql, params\n\n named_args = {}\n # We've now got for example:\n # Case a) Params is a non-dict\n # SQL: 'SELECT * from t where f1=%s, f2=%s, f3=%s'\n # Params: ('a', 23, '888***')\n # Case b) Params is a dict and the matches are %(value)s'\n for i, pyfmt in enumerate(found_pyformat_placeholders):\n key = 'a%d' % i\n sql = sql.replace(pyfmt, '@'+key, 1)\n if params_is_dict:\n # The '%(key)s' case, so interpolate it.\n resolved_value = pyfmt % params\n named_args[key] = resolved_value\n else:\n named_args[key] = cast_for_spanner(params[i])\n\n return sql, named_args\n\n\ndef cast_for_spanner(param):\n \"\"\"Convert param to its Cloud Spanner equivalent type.\"\"\"\n if isinstance(param, decimal.Decimal):\n return float(param)\n else:\n return param\n\n\ndef get_param_types(params):\n \"\"\"\n Return a dictionary of spanner.param_types for a dictionary of parameters.\n \"\"\"\n if params is None:\n return None\n param_types = {}\n for key, value in params.items():\n if isinstance(value, bool):\n param_types[key] = spanner.param_types.BOOL\n elif isinstance(value, float):\n param_types[key] = spanner.param_types.FLOAT64\n elif isinstance(value, int):\n param_types[key] = spanner.param_types.INT64\n elif isinstance(value, (TimestampStr, datetime.datetime,)):\n param_types[key] = spanner.param_types.TIMESTAMP\n elif isinstance(value, (DateStr, datetime.date,)):\n param_types[key] = spanner.param_types.DATE\n elif isinstance(value, str):\n param_types[key] = spanner.param_types.STRING\n return param_types\n\n\ndef ensure_where_clause(sql):\n \"\"\"\n Cloud Spanner requires a WHERE clause on UPDATE and DELETE statements.\n Add a dummy WHERE clause if necessary.\n \"\"\"\n if any(isinstance(token, sqlparse.sql.Where) for token in sqlparse.parse(sql)[0]):\n return sql\n return sql + ' WHERE 1=1'\n\n\nSPANNER_RESERVED_KEYWORDS = {\n 'ALL',\n 'AND',\n 'ANY',\n 'ARRAY',\n 'AS',\n 'ASC',\n 'ASSERT_ROWS_MODIFIED',\n 'AT',\n 'BETWEEN',\n 'BY',\n 'CASE',\n 'CAST',\n 'COLLATE',\n 'CONTAINS',\n 'CREATE',\n 'CROSS',\n 'CUBE',\n 'CURRENT',\n 'DEFAULT',\n 'DEFINE',\n 'DESC',\n 'DISTINCT',\n 'DROP',\n 'ELSE',\n 'END',\n 'ENUM',\n 'ESCAPE',\n 'EXCEPT',\n 'EXCLUDE',\n 'EXISTS',\n 'EXTRACT',\n 'FALSE',\n 'FETCH',\n 'FOLLOWING',\n 'FOR',\n 'FROM',\n 'FULL',\n 'GROUP',\n 'GROUPING',\n 'GROUPS',\n 'HASH',\n 'HAVING',\n 'IF',\n 'IGNORE',\n 'IN',\n 'INNER',\n 'INTERSECT',\n 'INTERVAL',\n 'INTO',\n 'IS',\n 'JOIN',\n 'LATERAL',\n 'LEFT',\n 'LIKE',\n 'LIMIT',\n 'LOOKUP',\n 'MERGE',\n 'NATURAL',\n 'NEW',\n 'NO',\n 'NOT',\n 'NULL',\n 'NULLS',\n 'OF',\n 'ON',\n 'OR',\n 'ORDER',\n 'OUTER',\n 'OVER',\n 'PARTITION',\n 'PRECEDING',\n 'PROTO',\n 'RANGE',\n 'RECURSIVE',\n 'RESPECT',\n 'RIGHT',\n 'ROLLUP',\n 'ROWS',\n 'SELECT',\n 'SET',\n 'SOME',\n 'STRUCT',\n 'TABLESAMPLE',\n 'THEN',\n 'TO',\n 'TREAT',\n 'TRUE',\n 'UNBOUNDED',\n 'UNION',\n 'UNNEST',\n 'USING',\n 'WHEN',\n 'WHERE',\n 'WINDOW',\n 'WITH',\n 'WITHIN',\n}\n\n\ndef escape_name(name):\n \"\"\"\n Escape name by applying backticks to value that either\n contain '-' or are any of Cloud Spanner's reserved keywords.\n \"\"\"\n if '-' in name or ' ' in name or name.upper() in SPANNER_RESERVED_KEYWORDS:\n return '`' + name + '`'\n return name\n",
"path": "spanner/dbapi/parse_utils.py"
}
] | [
{
"content": "# Copyright 2020 Google LLC\n#\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file or at\n# https://developers.google.com/open-source/licenses/bsd\n\nimport datetime\nimport decimal\nimport re\nfrom functools import reduce\n\nimport sqlparse\nfrom google.cloud import spanner_v1 as spanner\n\nfrom .exceptions import Error, ProgrammingError\nfrom .parser import parse_values\nfrom .types import DateStr, TimestampStr\n\nSTMT_DDL = 'DDL'\nSTMT_NON_UPDATING = 'NON_UPDATING'\nSTMT_UPDATING = 'UPDATING'\nSTMT_INSERT = 'INSERT'\n\n# Heuristic for identifying statements that don't need to be run as updates.\nre_NON_UPDATE = re.compile(r'^\\s*(SELECT)', re.IGNORECASE)\n\nre_WITH = re.compile(r'^\\s*(WITH)', re.IGNORECASE)\n\n# DDL statements follow https://cloud.google.com/spanner/docs/data-definition-language\nre_DDL = re.compile(r'^\\s*(CREATE|ALTER|DROP)', re.IGNORECASE | re.DOTALL)\n\nre_IS_INSERT = re.compile(r'^\\s*(INSERT)', re.IGNORECASE | re.DOTALL)\n\n\ndef classify_stmt(sql):\n if re_DDL.match(sql):\n return STMT_DDL\n elif re_IS_INSERT.match(sql):\n return STMT_INSERT\n elif re_NON_UPDATE.match(sql):\n return STMT_NON_UPDATING\n elif re_WITH.match(sql):\n # As of Fri-13th-March-2020, Cloud Spanner only supports WITH for DQL\n # statements and doesn't yet support WITH for DML statements.\n # When WITH for DML is added, we'll need to update this classifier accordingly.\n return STMT_NON_UPDATING\n else:\n return STMT_UPDATING\n\n\nre_INSERT = re.compile(\n # Only match the `INSERT INTO <table_name> (columns...)\n # otherwise the rest of the statement could be a complex\n # operation.\n r'^\\s*INSERT INTO (?P<table_name>[^\\s\\(\\)]+)\\s*\\((?P<columns>[^\\(\\)]+)\\)',\n re.IGNORECASE | re.DOTALL,\n)\n\nre_VALUES_TILL_END = re.compile(\n r'VALUES\\s*\\(.+$',\n re.IGNORECASE | re.DOTALL,\n)\n\nre_VALUES_PYFORMAT = re.compile(\n # To match: (%s, %s,....%s)\n r'(\\(\\s*%s[^\\(\\)]+\\))',\n re.DOTALL,\n)\n\n\ndef strip_backticks(name):\n \"\"\"\n Strip backticks off of quoted names For example, '`no`' (a Spanner reserved\n word) becomes 'no'.\n \"\"\"\n has_quotes = name.startswith('`') and name.endswith('`')\n return name[1:-1] if has_quotes else name\n\n\ndef parse_insert(insert_sql, params):\n \"\"\"\n Parse an INSERT statement an generate a list of tuples of the form:\n [\n (SQL, params_per_row1),\n (SQL, params_per_row2),\n (SQL, params_per_row3),\n ...\n ]\n\n There are 4 variants of an INSERT statement:\n a) INSERT INTO <table> (columns...) VALUES (<inlined values>): no params\n b) INSERT INTO <table> (columns...) SELECT_STMT: no params\n c) INSERT INTO <table> (columns...) VALUES (%s,...): with params\n d) INSERT INTO <table> (columns...) VALUES (%s,..<EXPR>...) with params and expressions\n\n Thus given each of the forms, it will produce a dictionary describing\n how to upload the contents to Cloud Spanner:\n Case a)\n SQL: INSERT INTO T (f1, f2) VALUES (1, 2)\n it produces:\n {\n 'sql_params_list': [\n ('INSERT INTO T (f1, f2) VALUES (1, 2)', None),\n ],\n }\n\n Case b)\n SQL: 'INSERT INTO T (s, c) SELECT st, zc FROM cus ORDER BY fn, ln',\n it produces:\n {\n 'sql_params_list': [\n ('INSERT INTO T (s, c) SELECT st, zc FROM cus ORDER BY fn, ln', None),\n ]\n }\n\n Case c)\n SQL: INSERT INTO T (f1, f2) VALUES (%s, %s), (%s, %s)\n Params: ['a', 'b', 'c', 'd']\n it produces:\n {\n 'homogenous': True,\n 'table': 'T',\n 'columns': ['f1', 'f2'],\n 'values': [('a', 'b',), ('c', 'd',)],\n }\n\n Case d)\n SQL: INSERT INTO T (f1, f2) VALUES (%s, LOWER(%s)), (UPPER(%s), %s)\n Params: ['a', 'b', 'c', 'd']\n it produces:\n {\n 'sql_params_list': [\n ('INSERT INTO T (f1, f2) VALUES (%s, LOWER(%s))', ('a', 'b',))\n ('INSERT INTO T (f1, f2) VALUES (UPPER(%s), %s)', ('c', 'd',))\n ],\n }\n \"\"\"\n match = re_INSERT.search(insert_sql)\n\n if not match:\n raise ProgrammingError('Could not parse an INSERT statement from %s' % insert_sql)\n\n after_values_sql = re_VALUES_TILL_END.findall(insert_sql)\n if not after_values_sql:\n # Case b)\n return {\n 'sql_params_list': [(insert_sql, None,)],\n }\n\n if not params:\n # Case a) perhaps?\n # Check if any %s exists.\n pyformat_str_count = after_values_sql.count('%s')\n if pyformat_str_count > 0:\n raise ProgrammingError('no params yet there are %d \"%s\" tokens' % pyformat_str_count)\n\n # Confirmed case of:\n # SQL: INSERT INTO T (a1, a2) VALUES (1, 2)\n # Params: None\n return {\n 'sql_params_list': [(insert_sql, None,)],\n }\n\n values_str = after_values_sql[0]\n _, values = parse_values(values_str)\n\n if values.homogenous():\n # Case c)\n\n columns = [\n strip_backticks(mi.strip())\n for mi in match.group('columns').split(',')\n ]\n sql_params_list = []\n insert_sql_preamble = 'INSERT INTO %s (%s) VALUES %s' % (\n match.group('table_name'), match.group('columns'), values.argv[0],\n )\n values_pyformat = [str(arg) for arg in values.argv]\n rows_list = rows_for_insert_or_update(columns, params, values_pyformat)\n for row in rows_list:\n sql_params_list.append((insert_sql_preamble, row,))\n\n return {\n 'sql_params_list': sql_params_list,\n }\n\n # Case d)\n # insert_sql is of the form:\n # INSERT INTO T(c1, c2) VALUES (%s, %s), (%s, LOWER(%s))\n\n # Sanity check:\n # length(all_args) == len(params)\n args_len = reduce(lambda a, b: a+b, [len(arg) for arg in values.argv])\n if args_len != len(params):\n raise ProgrammingError('Invalid length: VALUES(...) len: %d != len(params): %d' % (\n args_len, len(params)),\n )\n\n trim_index = insert_sql.find(values_str)\n before_values_sql = insert_sql[:trim_index]\n\n sql_param_tuples = []\n for token_arg in values.argv:\n row_sql = before_values_sql + ' VALUES%s' % token_arg\n row_params, params = tuple(params[0:len(token_arg)]), params[len(token_arg):]\n sql_param_tuples.append((row_sql, row_params,))\n\n return {\n 'sql_params_list': sql_param_tuples,\n }\n\n\ndef rows_for_insert_or_update(columns, params, pyformat_args=None):\n \"\"\"\n Create a tupled list of params to be used as a single value per\n value that inserted from a statement such as\n SQL: 'INSERT INTO t (f1, f2, f3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)'\n Params A: [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n Params B: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n We'll have to convert both params types into:\n Params: [(1, 2, 3,), (4, 5, 6,), (7, 8, 9,)]\n \"\"\"\n\n if not pyformat_args:\n # This is the case where we have for example:\n # SQL: 'INSERT INTO t (f1, f2, f3)'\n # Params A: [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n # Params B: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n #\n # We'll have to convert both params types into:\n # [(1, 2, 3,), (4, 5, 6,), (7, 8, 9,)]\n contains_all_list_or_tuples = True\n for param in params:\n if not (isinstance(param, list) or isinstance(param, tuple)):\n contains_all_list_or_tuples = False\n break\n\n if contains_all_list_or_tuples:\n # The case with Params A: [(1, 2, 3), (4, 5, 6)]\n # Ensure that each param's length == len(columns)\n columns_len = len(columns)\n for param in params:\n if columns_len != len(param):\n raise Error('\\nlen(`%s`)=%d\\n!=\\ncolum_len(`%s`)=%d' % (\n param, len(param), columns, columns_len))\n return params\n else:\n # The case with Params B: [1, 2, 3]\n # Insert statements' params are only passed as tuples or lists,\n # yet for do_execute_update, we've got to pass in list of list.\n # https://googleapis.dev/python/spanner/latest/transaction-api.html\\\n # #google.cloud.spanner_v1.transaction.Transaction.insert\n n_stride = len(columns)\n else:\n # This is the case where we have for example:\n # SQL: 'INSERT INTO t (f1, f2, f3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)'\n # Params: [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # which should become\n # Columns: (f1, f2, f3)\n # new_params: [(1, 2, 3,), (4, 5, 6,), (7, 8, 9,)]\n\n # Sanity check 1: all the pyformat_values should have the exact same length.\n first, rest = pyformat_args[0], pyformat_args[1:]\n n_stride = first.count('%s')\n for pyfmt_value in rest:\n n = pyfmt_value.count('%s')\n if n_stride != n:\n raise Error('\\nlen(`%s`)=%d\\n!=\\nlen(`%s`)=%d' % (\n first, n_stride, pyfmt_value, n))\n\n # Sanity check 2: len(params) MUST be a multiple of n_stride aka len(count of %s).\n # so that we can properly group for example:\n # Given pyformat args:\n # (%s, %s, %s)\n # Params:\n # [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # into\n # [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n if (len(params) % n_stride) != 0:\n raise ProgrammingError('Invalid length: len(params)=%d MUST be a multiple of len(pyformat_args)=%d' % (\n len(params), n_stride),\n )\n\n # Now chop up the strides.\n strides = []\n for step in range(0, len(params), n_stride):\n stride = tuple(params[step:step+n_stride:])\n strides.append(stride)\n\n return strides\n\n\nre_PYFORMAT = re.compile(r'(%s|%\\([^\\(\\)]+\\)s)+', re.DOTALL)\n\n\ndef sql_pyformat_args_to_spanner(sql, params):\n \"\"\"\n Transform pyformat set SQL to named arguments for Cloud Spanner.\n For example:\n SQL: 'SELECT * from t where f1=%s, f2=%s, f3=%s'\n Params: ('a', 23, '888***')\n becomes:\n SQL: 'SELECT * from t where f1=@a0, f2=@a1, f3=@a2'\n Params: {'a0': 'a', 'a1': 23, 'a2': '888***'}\n\n OR\n SQL: 'SELECT * from t where f1=%(f1)s, f2=%(f2)s, f3=%(f3)s'\n Params: {'f1': 'a', 'f2': 23, 'f3': '888***', 'extra': 'aye')\n becomes:\n SQL: 'SELECT * from t where f1=@a0, f2=@a1, f3=@a2'\n Params: {'a0': 'a', 'a1': 23, 'a2': '888***'}\n \"\"\"\n if not params:\n return sql, params\n\n found_pyformat_placeholders = re_PYFORMAT.findall(sql)\n params_is_dict = isinstance(params, dict)\n\n if params_is_dict:\n if not found_pyformat_placeholders:\n return sql, params\n else:\n n_params = len(params) if params else 0\n n_matches = len(found_pyformat_placeholders)\n if n_matches != n_params:\n raise Error(\n 'pyformat_args mismatch\\ngot %d args from %s\\n'\n 'want %d args in %s' % (n_matches, found_pyformat_placeholders, n_params, params))\n\n if len(params) == 0:\n return sql, params\n\n named_args = {}\n # We've now got for example:\n # Case a) Params is a non-dict\n # SQL: 'SELECT * from t where f1=%s, f2=%s, f3=%s'\n # Params: ('a', 23, '888***')\n # Case b) Params is a dict and the matches are %(value)s'\n for i, pyfmt in enumerate(found_pyformat_placeholders):\n key = 'a%d' % i\n sql = sql.replace(pyfmt, '@'+key, 1)\n if params_is_dict:\n # The '%(key)s' case, so interpolate it.\n resolved_value = pyfmt % params\n named_args[key] = resolved_value\n else:\n named_args[key] = cast_for_spanner(params[i])\n\n return sql, named_args\n\n\ndef cast_for_spanner(param):\n \"\"\"Convert param to its Cloud Spanner equivalent type.\"\"\"\n if isinstance(param, decimal.Decimal):\n return float(param)\n else:\n return param\n\n\ndef get_param_types(params):\n \"\"\"\n Return a dictionary of spanner.param_types for a dictionary of parameters.\n \"\"\"\n if params is None:\n return None\n param_types = {}\n for key, value in params.items():\n if isinstance(value, bool):\n param_types[key] = spanner.param_types.BOOL\n elif isinstance(value, float):\n param_types[key] = spanner.param_types.FLOAT64\n elif isinstance(value, int):\n param_types[key] = spanner.param_types.INT64\n elif isinstance(value, (TimestampStr, datetime.datetime,)):\n param_types[key] = spanner.param_types.TIMESTAMP\n elif isinstance(value, (DateStr, datetime.date,)):\n param_types[key] = spanner.param_types.DATE\n elif isinstance(value, str):\n param_types[key] = spanner.param_types.STRING\n elif isinstance(value, bytes):\n param_types[key] = spanner.param_types.BYTES\n return param_types\n\n\ndef ensure_where_clause(sql):\n \"\"\"\n Cloud Spanner requires a WHERE clause on UPDATE and DELETE statements.\n Add a dummy WHERE clause if necessary.\n \"\"\"\n if any(isinstance(token, sqlparse.sql.Where) for token in sqlparse.parse(sql)[0]):\n return sql\n return sql + ' WHERE 1=1'\n\n\nSPANNER_RESERVED_KEYWORDS = {\n 'ALL',\n 'AND',\n 'ANY',\n 'ARRAY',\n 'AS',\n 'ASC',\n 'ASSERT_ROWS_MODIFIED',\n 'AT',\n 'BETWEEN',\n 'BY',\n 'CASE',\n 'CAST',\n 'COLLATE',\n 'CONTAINS',\n 'CREATE',\n 'CROSS',\n 'CUBE',\n 'CURRENT',\n 'DEFAULT',\n 'DEFINE',\n 'DESC',\n 'DISTINCT',\n 'DROP',\n 'ELSE',\n 'END',\n 'ENUM',\n 'ESCAPE',\n 'EXCEPT',\n 'EXCLUDE',\n 'EXISTS',\n 'EXTRACT',\n 'FALSE',\n 'FETCH',\n 'FOLLOWING',\n 'FOR',\n 'FROM',\n 'FULL',\n 'GROUP',\n 'GROUPING',\n 'GROUPS',\n 'HASH',\n 'HAVING',\n 'IF',\n 'IGNORE',\n 'IN',\n 'INNER',\n 'INTERSECT',\n 'INTERVAL',\n 'INTO',\n 'IS',\n 'JOIN',\n 'LATERAL',\n 'LEFT',\n 'LIKE',\n 'LIMIT',\n 'LOOKUP',\n 'MERGE',\n 'NATURAL',\n 'NEW',\n 'NO',\n 'NOT',\n 'NULL',\n 'NULLS',\n 'OF',\n 'ON',\n 'OR',\n 'ORDER',\n 'OUTER',\n 'OVER',\n 'PARTITION',\n 'PRECEDING',\n 'PROTO',\n 'RANGE',\n 'RECURSIVE',\n 'RESPECT',\n 'RIGHT',\n 'ROLLUP',\n 'ROWS',\n 'SELECT',\n 'SET',\n 'SOME',\n 'STRUCT',\n 'TABLESAMPLE',\n 'THEN',\n 'TO',\n 'TREAT',\n 'TRUE',\n 'UNBOUNDED',\n 'UNION',\n 'UNNEST',\n 'USING',\n 'WHEN',\n 'WHERE',\n 'WINDOW',\n 'WITH',\n 'WITHIN',\n}\n\n\ndef escape_name(name):\n \"\"\"\n Escape name by applying backticks to value that either\n contain '-' or are any of Cloud Spanner's reserved keywords.\n \"\"\"\n if '-' in name or ' ' in name or name.upper() in SPANNER_RESERVED_KEYWORDS:\n return '`' + name + '`'\n return name\n",
"path": "spanner/dbapi/parse_utils.py"
}
] | diff --git a/spanner/dbapi/parse_utils.py b/spanner/dbapi/parse_utils.py
index ac8249b1f2..48035743c4 100644
--- a/spanner/dbapi/parse_utils.py
+++ b/spanner/dbapi/parse_utils.py
@@ -378,6 +378,8 @@ def get_param_types(params):
param_types[key] = spanner.param_types.DATE
elif isinstance(value, str):
param_types[key] = spanner.param_types.STRING
+ elif isinstance(value, bytes):
+ param_types[key] = spanner.param_types.BYTES
return param_types
diff --git a/tests/spanner/dbapi/test_parse_utils.py b/tests/spanner/dbapi/test_parse_utils.py
index 045e379795..5c24ba0573 100644
--- a/tests/spanner/dbapi/test_parse_utils.py
+++ b/tests/spanner/dbapi/test_parse_utils.py
@@ -345,6 +345,7 @@ def test_get_param_types(self):
{'a1': 10, 'b1': TimestampStr('2019-11-26T02:55:41.000000Z')},
{'a1': param_types.INT64, 'b1': param_types.TIMESTAMP},
),
+ ({'a1': b'bytes'}, {'a1': param_types.BYTES}),
({'a1': 10, 'b1': None}, {'a1': param_types.INT64}),
(None, None),
]
|
vyperlang__vyper-3276 | `CompilerPanic` when using `len` and `empty` together
### Version Information
* vyper Version (output of `vyper --version`): 0.3.8+commit.d76c6ed2
* OS: OSX
* Python Version (output of `python --version`): 3.8.0
### What's your issue about?
The compiler panic when using the builtin `len` on some `Bytes`, `String` or `DynArray` which is written as `empty(...)`.
``` Vyper
@external
def boo():
e:uint256 = len(empty(DynArray[uint256,50]))
```
```Bash
Error compiling: tests/customs/code.vy
Traceback (most recent call last):
File "/Users/trocher/Documents/thesis/vyper/venv/bin/vyper", line 11, in <module>
load_entry_point('vyper==0.3.8', 'console_scripts', 'vyper')()
File "/Users/trocher/Documents/thesis/vyper/vyper/cli/vyper_compile.py", line 57, in _parse_cli_args
return _parse_args(sys.argv[1:])
File "/Users/trocher/Documents/thesis/vyper/vyper/cli/vyper_compile.py", line 154, in _parse_args
compiled = compile_files(
File "/Users/trocher/Documents/thesis/vyper/vyper/cli/vyper_compile.py", line 294, in compile_files
compiler_data = vyper.compile_codes(
File "/Users/trocher/Documents/thesis/vyper/vyper/evm/opcodes.py", line 226, in _wrapper
return fn(*args, **kwargs)
File "/Users/trocher/Documents/thesis/vyper/vyper/compiler/__init__.py", line 141, in compile_codes
exc_handler(contract_name, exc)
File "/Users/trocher/Documents/thesis/vyper/vyper/cli/vyper_compile.py", line 189, in exc_handler
raise exception
File "/Users/trocher/Documents/thesis/vyper/vyper/compiler/__init__.py", line 138, in compile_codes
out[contract_name][output_format] = OUTPUT_FORMATS[output_format](compiler_data)
File "/Users/trocher/Documents/thesis/vyper/vyper/compiler/output.py", line 82, in build_ir_output
return compiler_data.ir_nodes
File "/Users/trocher/Documents/thesis/vyper/vyper/compiler/phases.py", line 126, in ir_nodes
ir, ir_runtime, sigs = self._ir_output
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/functools.py", line 966, in __get__
val = self.func(instance)
File "/Users/trocher/Documents/thesis/vyper/vyper/compiler/phases.py", line 122, in _ir_output
return generate_ir_nodes(self.global_ctx, self.no_optimize)
File "/Users/trocher/Documents/thesis/vyper/vyper/compiler/phases.py", line 258, in generate_ir_nodes
ir_nodes, ir_runtime, function_sigs = module.generate_ir_for_module(global_ctx)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/module.py", line 162, in generate_ir_for_module
runtime, internal_functions = _runtime_ir(runtime_functions, all_sigs, global_ctx)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/module.py", line 103, in _runtime_ir
func_ir = generate_ir_for_function(func_ast, all_sigs, global_ctx, skip_nonpayable_check)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/function_definitions/common.py", line 62, in generate_ir_for_function
o = generate_ir_for_external_function(code, sig, context, skip_nonpayable_check)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/function_definitions/external_function.py", line 199, in generate_ir_for_external_function
body += [parse_body(code.body, context, ensure_terminated=True)]
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/stmt.py", line 414, in parse_body
ir = parse_stmt(stmt, context)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/stmt.py", line 388, in parse_stmt
return Stmt(stmt, context).ir_node
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/stmt.py", line 40, in __init__
self.ir_node = fn()
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/stmt.py", line 67, in parse_AnnAssign
rhs = Expr(self.stmt.value, self.context).ir_node
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/expr.py", line 77, in __init__
self.ir_node = fn()
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/expr.py", line 632, in parse_Call
return DISPATCH_TABLE[function_name].build_IR(self.expr, self.context)
File "/Users/trocher/Documents/thesis/vyper/vyper/builtins/functions.py", line 482, in build_IR
return get_bytearray_length(arg)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/core.py", line 309, in get_bytearray_length
return IRnode.from_list(LOAD(arg), typ=typ)
File "/Users/trocher/Documents/thesis/vyper/vyper/codegen/core.py", line 580, in LOAD
raise CompilerPanic("cannot dereference non-pointer type")
vyper.exceptions.CompilerPanic: cannot dereference non-pointer type
This is an unhandled internal compiler error. Please create an issue on Github to notify the developers.
https://github.com/vyperlang/vyper/issues/new?template=bug.md
```
### How can it be fixed?
Implementing the following `todo` should probably fix the issue:
https://github.com/vyperlang/vyper/blob/d76c6ed26abf02f917eb1b4f6bab734eb3eb598b/vyper/codegen/core.py#L307
| [
{
"content": "from vyper import ast as vy_ast\nfrom vyper.address_space import CALLDATA, DATA, IMMUTABLES, MEMORY, STORAGE\nfrom vyper.codegen.ir_node import Encoding, IRnode\nfrom vyper.evm.opcodes import version_check\nfrom vyper.exceptions import CompilerPanic, StructureException, TypeCheckFailure, TypeMismatch\nfrom vyper.semantics.types import (\n AddressT,\n BoolT,\n BytesM_T,\n BytesT,\n DArrayT,\n DecimalT,\n HashMapT,\n IntegerT,\n InterfaceT,\n StructT,\n TupleT,\n _BytestringT,\n)\nfrom vyper.semantics.types.shortcuts import BYTES32_T, INT256_T, UINT256_T\nfrom vyper.semantics.types.subscriptable import SArrayT\nfrom vyper.semantics.types.user import EnumT\nfrom vyper.utils import (\n GAS_CALLDATACOPY_WORD,\n GAS_CODECOPY_WORD,\n GAS_IDENTITY,\n GAS_IDENTITYWORD,\n ceil32,\n)\n\nDYNAMIC_ARRAY_OVERHEAD = 1\n\n\ndef is_bytes_m_type(typ):\n return isinstance(typ, BytesM_T)\n\n\ndef is_numeric_type(typ):\n return isinstance(typ, (IntegerT, DecimalT))\n\n\ndef is_integer_type(typ):\n return isinstance(typ, IntegerT)\n\n\ndef is_decimal_type(typ):\n return isinstance(typ, DecimalT)\n\n\ndef is_enum_type(typ):\n return isinstance(typ, EnumT)\n\n\ndef is_tuple_like(typ):\n # A lot of code paths treat tuples and structs similarly\n # so we have a convenience function to detect it\n ret = isinstance(typ, (TupleT, StructT))\n assert ret == hasattr(typ, \"tuple_items\")\n return ret\n\n\ndef is_array_like(typ):\n # For convenience static and dynamic arrays share some code paths\n ret = isinstance(typ, (DArrayT, SArrayT))\n assert ret == typ._is_array_type\n return ret\n\n\ndef get_type_for_exact_size(n_bytes):\n \"\"\"Create a type which will take up exactly n_bytes. Used for allocating internal buffers.\n\n Parameters:\n n_bytes: the number of bytes to allocate\n Returns:\n type: A type which can be passed to context.new_variable\n \"\"\"\n return BytesT(n_bytes - 32 * DYNAMIC_ARRAY_OVERHEAD)\n\n\n# propagate revert message when calls to external contracts fail\ndef check_external_call(call_ir):\n copy_revertdata = [\"returndatacopy\", 0, 0, \"returndatasize\"]\n revert = IRnode.from_list([\"revert\", 0, \"returndatasize\"], error_msg=\"external call failed\")\n\n propagate_revert_ir = [\"seq\", copy_revertdata, revert]\n return [\"if\", [\"iszero\", call_ir], propagate_revert_ir]\n\n\n# cost per byte of the identity precompile\ndef _identity_gas_bound(num_bytes):\n return GAS_IDENTITY + GAS_IDENTITYWORD * (ceil32(num_bytes) // 32)\n\n\ndef _calldatacopy_gas_bound(num_bytes):\n return GAS_CALLDATACOPY_WORD * ceil32(num_bytes) // 32\n\n\ndef _codecopy_gas_bound(num_bytes):\n return GAS_CODECOPY_WORD * ceil32(num_bytes) // 32\n\n\n# Copy byte array word-for-word (including layout)\n# TODO make this a private function\ndef make_byte_array_copier(dst, src):\n assert isinstance(src.typ, _BytestringT)\n assert isinstance(dst.typ, _BytestringT)\n\n _check_assign_bytes(dst, src)\n\n # TODO: remove this branch, copy_bytes and get_bytearray_length should handle\n if src.value == \"~empty\":\n # set length word to 0.\n return STORE(dst, 0)\n\n with src.cache_when_complex(\"src\") as (b1, src):\n with get_bytearray_length(src).cache_when_complex(\"len\") as (b2, len_):\n\n max_bytes = src.typ.maxlen\n\n ret = [\"seq\"]\n # store length\n ret.append(STORE(dst, len_))\n\n dst = bytes_data_ptr(dst)\n src = bytes_data_ptr(src)\n\n ret.append(copy_bytes(dst, src, len_, max_bytes))\n return b1.resolve(b2.resolve(ret))\n\n\ndef bytes_data_ptr(ptr):\n if ptr.location is None:\n raise CompilerPanic(\"tried to modify non-pointer type\")\n assert isinstance(ptr.typ, _BytestringT)\n return add_ofst(ptr, ptr.location.word_scale)\n\n\ndef dynarray_data_ptr(ptr):\n if ptr.location is None:\n raise CompilerPanic(\"tried to modify non-pointer type\")\n assert isinstance(ptr.typ, DArrayT)\n return add_ofst(ptr, ptr.location.word_scale)\n\n\ndef _dynarray_make_setter(dst, src):\n assert isinstance(src.typ, DArrayT)\n assert isinstance(dst.typ, DArrayT)\n\n if src.value == \"~empty\":\n return IRnode.from_list(STORE(dst, 0))\n\n if src.value == \"multi\":\n ret = [\"seq\"]\n # handle literals\n\n # write the length word\n store_length = STORE(dst, len(src.args))\n ann = None\n if src.annotation is not None:\n ann = f\"len({src.annotation})\"\n store_length = IRnode.from_list(store_length, annotation=ann)\n ret.append(store_length)\n\n n_items = len(src.args)\n for i in range(n_items):\n k = IRnode.from_list(i, typ=UINT256_T)\n dst_i = get_element_ptr(dst, k, array_bounds_check=False)\n src_i = get_element_ptr(src, k, array_bounds_check=False)\n ret.append(make_setter(dst_i, src_i))\n\n return ret\n\n with src.cache_when_complex(\"darray_src\") as (b1, src):\n\n # for ABI-encoded dynamic data, we must loop to unpack, since\n # the layout does not match our memory layout\n should_loop = src.encoding == Encoding.ABI and src.typ.value_type.abi_type.is_dynamic()\n\n # if the data is not validated, we must loop to unpack\n should_loop |= needs_clamp(src.typ.value_type, src.encoding)\n\n # performance: if the subtype is dynamic, there might be a lot\n # of unused space inside of each element. for instance\n # DynArray[DynArray[uint256, 100], 5] where all the child\n # arrays are empty - for this case, we recursively call\n # into make_setter instead of straight bytes copy\n # TODO we can make this heuristic more precise, e.g.\n # loop when subtype.is_dynamic AND location == storage\n # OR array_size <= /bound where loop is cheaper than memcpy/\n should_loop |= src.typ.value_type.abi_type.is_dynamic()\n\n with get_dyn_array_count(src).cache_when_complex(\"darray_count\") as (b2, count):\n ret = [\"seq\"]\n\n ret.append(STORE(dst, count))\n\n if should_loop:\n i = IRnode.from_list(_freshname(\"copy_darray_ix\"), typ=UINT256_T)\n\n loop_body = make_setter(\n get_element_ptr(dst, i, array_bounds_check=False),\n get_element_ptr(src, i, array_bounds_check=False),\n )\n loop_body.annotation = f\"{dst}[i] = {src}[i]\"\n\n ret.append([\"repeat\", i, 0, count, src.typ.count, loop_body])\n\n else:\n element_size = src.typ.value_type.memory_bytes_required\n # number of elements * size of element in bytes\n n_bytes = _mul(count, element_size)\n max_bytes = src.typ.count * element_size\n\n src_ = dynarray_data_ptr(src)\n dst_ = dynarray_data_ptr(dst)\n ret.append(copy_bytes(dst_, src_, n_bytes, max_bytes))\n\n return b1.resolve(b2.resolve(ret))\n\n\n# Copy bytes\n# Accepts 4 arguments:\n# (i) an IR node for the start position of the source\n# (ii) an IR node for the start position of the destination\n# (iii) an IR node for the length (in bytes)\n# (iv) a constant for the max length (in bytes)\n# NOTE: may pad to ceil32 of `length`! If you ask to copy 1 byte, it may\n# copy an entire (32-byte) word, depending on the copy routine chosen.\n# TODO maybe always pad to ceil32, to reduce dirty bytes bugs\ndef copy_bytes(dst, src, length, length_bound):\n annotation = f\"copy up to {length_bound} bytes from {src} to {dst}\"\n\n src = IRnode.from_list(src)\n dst = IRnode.from_list(dst)\n length = IRnode.from_list(length)\n\n with src.cache_when_complex(\"src\") as (b1, src), length.cache_when_complex(\n \"copy_bytes_count\"\n ) as (b2, length), dst.cache_when_complex(\"dst\") as (b3, dst):\n\n assert isinstance(length_bound, int) and length_bound >= 0\n\n # correctness: do not clobber dst\n if length_bound == 0:\n return IRnode.from_list([\"seq\"], annotation=annotation)\n # performance: if we know that length is 0, do not copy anything\n if length.value == 0:\n return IRnode.from_list([\"seq\"], annotation=annotation)\n\n assert src.is_pointer and dst.is_pointer\n\n # fast code for common case where num bytes is small\n # TODO expand this for more cases where num words is less than ~8\n if length_bound <= 32:\n copy_op = STORE(dst, LOAD(src))\n ret = IRnode.from_list(copy_op, annotation=annotation)\n return b1.resolve(b2.resolve(b3.resolve(ret)))\n\n if dst.location == MEMORY and src.location in (MEMORY, CALLDATA, DATA):\n # special cases: batch copy to memory\n # TODO: iloadbytes\n if src.location == MEMORY:\n copy_op = [\"staticcall\", \"gas\", 4, src, length, dst, length]\n gas_bound = _identity_gas_bound(length_bound)\n elif src.location == CALLDATA:\n copy_op = [\"calldatacopy\", dst, src, length]\n gas_bound = _calldatacopy_gas_bound(length_bound)\n elif src.location == DATA:\n copy_op = [\"dloadbytes\", dst, src, length]\n # note: dloadbytes compiles to CODECOPY\n gas_bound = _codecopy_gas_bound(length_bound)\n\n ret = IRnode.from_list(copy_op, annotation=annotation, add_gas_estimate=gas_bound)\n return b1.resolve(b2.resolve(b3.resolve(ret)))\n\n if dst.location == IMMUTABLES and src.location in (MEMORY, DATA):\n # TODO istorebytes-from-mem, istorebytes-from-calldata(?)\n # compile to identity, CODECOPY respectively.\n pass\n\n # general case, copy word-for-word\n # pseudocode for our approach (memory-storage as example):\n # for i in range(len, bound=MAX_LEN):\n # sstore(_dst + i, mload(src + i * 32))\n i = IRnode.from_list(_freshname(\"copy_bytes_ix\"), typ=UINT256_T)\n\n # optimized form of (div (ceil32 len) 32)\n n = [\"div\", [\"add\", 31, length], 32]\n n_bound = ceil32(length_bound) // 32\n\n dst_i = add_ofst(dst, _mul(i, dst.location.word_scale))\n src_i = add_ofst(src, _mul(i, src.location.word_scale))\n\n copy_one_word = STORE(dst_i, LOAD(src_i))\n\n main_loop = [\"repeat\", i, 0, n, n_bound, copy_one_word]\n\n return b1.resolve(\n b2.resolve(b3.resolve(IRnode.from_list(main_loop, annotation=annotation)))\n )\n\n\n# get the number of bytes at runtime\ndef get_bytearray_length(arg):\n typ = UINT256_T\n\n # TODO add \"~empty\" case to mirror get_dyn_array_count\n\n return IRnode.from_list(LOAD(arg), typ=typ)\n\n\n# get the number of elements at runtime\ndef get_dyn_array_count(arg):\n assert isinstance(arg.typ, DArrayT)\n\n typ = UINT256_T\n\n if arg.value == \"multi\":\n return IRnode.from_list(len(arg.args), typ=typ)\n\n if arg.value == \"~empty\":\n # empty(DynArray[...])\n return IRnode.from_list(0, typ=typ)\n\n return IRnode.from_list(LOAD(arg), typ=typ)\n\n\ndef append_dyn_array(darray_node, elem_node):\n assert isinstance(darray_node.typ, DArrayT)\n\n assert darray_node.typ.count > 0, \"jerk boy u r out\"\n\n ret = [\"seq\"]\n with darray_node.cache_when_complex(\"darray\") as (b1, darray_node):\n len_ = get_dyn_array_count(darray_node)\n with len_.cache_when_complex(\"old_darray_len\") as (b2, len_):\n assertion = [\"assert\", [\"lt\", len_, darray_node.typ.count]]\n ret.append(IRnode.from_list(assertion, error_msg=f\"{darray_node.typ} bounds check\"))\n ret.append(STORE(darray_node, [\"add\", len_, 1]))\n # NOTE: typechecks elem_node\n # NOTE skip array bounds check bc we already asserted len two lines up\n ret.append(\n make_setter(get_element_ptr(darray_node, len_, array_bounds_check=False), elem_node)\n )\n return IRnode.from_list(b1.resolve(b2.resolve(ret)))\n\n\ndef pop_dyn_array(darray_node, return_popped_item):\n assert isinstance(darray_node.typ, DArrayT)\n assert darray_node.encoding == Encoding.VYPER\n ret = [\"seq\"]\n with darray_node.cache_when_complex(\"darray\") as (b1, darray_node):\n old_len = clamp(\"gt\", get_dyn_array_count(darray_node), 0)\n new_len = IRnode.from_list([\"sub\", old_len, 1], typ=UINT256_T)\n\n with new_len.cache_when_complex(\"new_len\") as (b2, new_len):\n ret.append(STORE(darray_node, new_len))\n\n # NOTE skip array bounds check bc we already asserted len two lines up\n if return_popped_item:\n popped_item = get_element_ptr(darray_node, new_len, array_bounds_check=False)\n ret.append(popped_item)\n typ = popped_item.typ\n location = popped_item.location\n else:\n typ, location = None, None\n return IRnode.from_list(b1.resolve(b2.resolve(ret)), typ=typ, location=location)\n\n\ndef getpos(node):\n return (\n node.lineno,\n node.col_offset,\n getattr(node, \"end_lineno\", None),\n getattr(node, \"end_col_offset\", None),\n )\n\n\n# add an offset to a pointer, keeping location and encoding info\ndef add_ofst(ptr, ofst):\n ret = [\"add\", ptr, ofst]\n return IRnode.from_list(ret, location=ptr.location, encoding=ptr.encoding)\n\n\n# shorthand util\ndef _mul(x, y):\n ret = [\"mul\", x, y]\n return IRnode.from_list(ret)\n\n\n# Resolve pointer locations for ABI-encoded data\ndef _getelemptr_abi_helper(parent, member_t, ofst, clamp=True):\n member_abi_t = member_t.abi_type\n\n # ABI encoding has length word and then pretends length is not there\n # e.g. [[1,2]] is encoded as 0x01 <len> 0x20 <inner array ofst> <encode(inner array)>\n # note that inner array ofst is 0x20, not 0x40.\n if has_length_word(parent.typ):\n parent = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)\n\n ofst_ir = add_ofst(parent, ofst)\n\n if member_abi_t.is_dynamic():\n # double dereference, according to ABI spec\n # TODO optimize special case: first dynamic item\n # offset is statically known.\n ofst_ir = add_ofst(parent, unwrap_location(ofst_ir))\n\n return IRnode.from_list(\n ofst_ir,\n typ=member_t,\n location=parent.location,\n encoding=parent.encoding,\n annotation=f\"{parent}{ofst}\",\n )\n\n\n# TODO simplify this code, especially the ABI decoding\ndef _get_element_ptr_tuplelike(parent, key):\n typ = parent.typ\n assert is_tuple_like(typ)\n\n if isinstance(typ, StructT):\n assert isinstance(key, str)\n subtype = typ.member_types[key]\n attrs = list(typ.tuple_keys())\n index = attrs.index(key)\n annotation = key\n else:\n # TupleT\n assert isinstance(key, int)\n subtype = typ.member_types[key]\n attrs = list(typ.tuple_keys())\n index = key\n annotation = None\n\n # generated by empty() + make_setter\n if parent.value == \"~empty\":\n return IRnode.from_list(\"~empty\", typ=subtype)\n\n if parent.value == \"multi\":\n assert parent.encoding != Encoding.ABI, \"no abi-encoded literals\"\n return parent.args[index]\n\n ofst = 0 # offset from parent start\n\n if parent.encoding == Encoding.ABI:\n if parent.location == STORAGE:\n raise CompilerPanic(\"storage variables should not be abi encoded\") # pragma: notest\n\n member_t = typ.member_types[attrs[index]]\n\n for i in range(index):\n member_abi_t = typ.member_types[attrs[i]].abi_type\n ofst += member_abi_t.embedded_static_size()\n\n return _getelemptr_abi_helper(parent, member_t, ofst)\n\n if parent.location.word_addressable:\n for i in range(index):\n ofst += typ.member_types[attrs[i]].storage_size_in_words\n elif parent.location.byte_addressable:\n for i in range(index):\n ofst += typ.member_types[attrs[i]].memory_bytes_required\n else:\n raise CompilerPanic(f\"bad location {parent.location}\") # pragma: notest\n\n return IRnode.from_list(\n add_ofst(parent, ofst),\n typ=subtype,\n location=parent.location,\n encoding=parent.encoding,\n annotation=annotation,\n )\n\n\ndef has_length_word(typ):\n # Consider moving this to an attribute on typ\n return isinstance(typ, (DArrayT, _BytestringT))\n\n\n# TODO simplify this code, especially the ABI decoding\ndef _get_element_ptr_array(parent, key, array_bounds_check):\n\n assert is_array_like(parent.typ)\n\n if not is_integer_type(key.typ):\n raise TypeCheckFailure(f\"{key.typ} used as array index\")\n\n subtype = parent.typ.value_type\n\n if parent.value == \"~empty\":\n if array_bounds_check:\n # this case was previously missing a bounds check. codegen\n # is a bit complicated when bounds check is required, so\n # block it. there is no reason to index into a literal empty\n # array anyways!\n raise TypeCheckFailure(\"indexing into zero array not allowed\")\n return IRnode.from_list(\"~empty\", subtype)\n\n if parent.value == \"multi\":\n assert isinstance(key.value, int)\n return parent.args[key.value]\n\n ix = unwrap_location(key)\n\n if array_bounds_check:\n is_darray = isinstance(parent.typ, DArrayT)\n bound = get_dyn_array_count(parent) if is_darray else parent.typ.count\n # uclamplt works, even for signed ints. since two's-complement\n # is used, if the index is negative, (unsigned) LT will interpret\n # it as a very large number, larger than any practical value for\n # an array index, and the clamp will throw an error.\n # NOTE: there are optimization rules for this when ix or bound is literal\n ix = clamp(\"lt\", ix, bound)\n\n if parent.encoding == Encoding.ABI:\n if parent.location == STORAGE:\n raise CompilerPanic(\"storage variables should not be abi encoded\") # pragma: notest\n\n member_abi_t = subtype.abi_type\n\n ofst = _mul(ix, member_abi_t.embedded_static_size())\n\n return _getelemptr_abi_helper(parent, subtype, ofst)\n\n if parent.location.word_addressable:\n element_size = subtype.storage_size_in_words\n elif parent.location.byte_addressable:\n element_size = subtype.memory_bytes_required\n else:\n raise CompilerPanic(\"unreachable\") # pragma: notest\n\n ofst = _mul(ix, element_size)\n\n if has_length_word(parent.typ):\n data_ptr = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)\n else:\n data_ptr = parent\n\n return IRnode.from_list(add_ofst(data_ptr, ofst), typ=subtype, location=parent.location)\n\n\ndef _get_element_ptr_mapping(parent, key):\n assert isinstance(parent.typ, HashMapT)\n subtype = parent.typ.value_type\n key = unwrap_location(key)\n\n # TODO when is key None?\n if key is None or parent.location != STORAGE:\n raise TypeCheckFailure(f\"bad dereference on mapping {parent}[{key}]\")\n\n return IRnode.from_list([\"sha3_64\", parent, key], typ=subtype, location=STORAGE)\n\n\n# Take a value representing a memory or storage location, and descend down to\n# an element or member variable\n# This is analogous (but not necessarily equivalent to) getelementptr in LLVM.\ndef get_element_ptr(parent, key, array_bounds_check=True):\n with parent.cache_when_complex(\"val\") as (b, parent):\n typ = parent.typ\n\n if is_tuple_like(typ):\n ret = _get_element_ptr_tuplelike(parent, key)\n\n elif isinstance(typ, HashMapT):\n ret = _get_element_ptr_mapping(parent, key)\n\n elif is_array_like(typ):\n ret = _get_element_ptr_array(parent, key, array_bounds_check)\n\n else:\n raise CompilerPanic(f\"get_element_ptr cannot be called on {typ}\") # pragma: notest\n\n return b.resolve(ret)\n\n\ndef LOAD(ptr: IRnode) -> IRnode:\n if ptr.location is None:\n raise CompilerPanic(\"cannot dereference non-pointer type\")\n op = ptr.location.load_op\n if op is None:\n raise CompilerPanic(f\"unreachable {ptr.location}\") # pragma: notest\n return IRnode.from_list([op, ptr])\n\n\ndef eval_once_check(name):\n # an IRnode which enforces uniqueness. include with a side-effecting\n # operation to sanity check that the codegen pipeline only generates\n # the side-effecting operation once (otherwise, IR-to-assembly will\n # throw a duplicate label exception). there is no runtime overhead\n # since the jumpdest gets optimized out in the final stage of assembly.\n return IRnode.from_list([\"unique_symbol\", name])\n\n\ndef STORE(ptr: IRnode, val: IRnode) -> IRnode:\n if ptr.location is None:\n raise CompilerPanic(\"cannot dereference non-pointer type\")\n op = ptr.location.store_op\n if op is None:\n raise CompilerPanic(f\"unreachable {ptr.location}\") # pragma: notest\n\n _check = _freshname(f\"{op}_\")\n\n store = [op, ptr, val]\n # don't use eval_once_check for memory, immutables because it interferes\n # with optimizer\n if ptr.location in (MEMORY, IMMUTABLES):\n return IRnode.from_list(store)\n\n return IRnode.from_list([\"seq\", eval_once_check(_check), store])\n\n\n# Unwrap location\ndef unwrap_location(orig):\n if orig.location is not None:\n return IRnode.from_list(LOAD(orig), typ=orig.typ)\n else:\n # CMC 2022-03-24 TODO refactor so this branch can be removed\n if orig.value == \"~empty\":\n # must be word type\n return IRnode.from_list(0, typ=orig.typ)\n return orig\n\n\n# utility function, constructs an IR tuple out of a list of IR nodes\ndef ir_tuple_from_args(args):\n typ = TupleT([x.typ for x in args])\n return IRnode.from_list([\"multi\"] + [x for x in args], typ=typ)\n\n\ndef needs_external_call_wrap(typ):\n # for calls to ABI conforming contracts.\n # according to the ABI spec, return types are ALWAYS tuples even\n # if only one element is being returned.\n # https://solidity.readthedocs.io/en/latest/abi-spec.html#function-selector-and-argument-encoding\n # \"and the return values v_1, ..., v_k of f are encoded as\n #\n # enc((v_1, ..., v_k))\n # i.e. the values are combined into a tuple and encoded.\n # \"\n # therefore, wrap it in a tuple if it's not already a tuple.\n # for example, `bytes` is returned as abi-encoded (bytes,)\n # and `(bytes,)` is returned as abi-encoded ((bytes,),)\n # In general `-> X` gets returned as (X,)\n # including structs. MyStruct is returned as abi-encoded (MyStruct,).\n # (Sorry this is so confusing. I didn't make these rules.)\n\n return not (isinstance(typ, TupleT) and typ.length > 1)\n\n\ndef calculate_type_for_external_return(typ):\n if needs_external_call_wrap(typ):\n return TupleT([typ])\n return typ\n\n\ndef wrap_value_for_external_return(ir_val):\n # used for LHS promotion\n if needs_external_call_wrap(ir_val.typ):\n return ir_tuple_from_args([ir_val])\n else:\n return ir_val\n\n\ndef set_type_for_external_return(ir_val):\n # used for RHS promotion\n ir_val.typ = calculate_type_for_external_return(ir_val.typ)\n\n\n# return a dummy IRnode with the given type\ndef dummy_node_for_type(typ):\n return IRnode(\"fake_node\", typ=typ)\n\n\ndef _check_assign_bytes(left, right):\n if right.typ.maxlen > left.typ.maxlen:\n raise TypeMismatch(f\"Cannot cast from {right.typ} to {left.typ}\") # pragma: notest\n\n # stricter check for zeroing a byte array.\n if right.value == \"~empty\" and right.typ.maxlen != left.typ.maxlen:\n raise TypeMismatch(f\"Cannot cast from empty({right.typ}) to {left.typ}\") # pragma: notest\n\n\ndef _check_assign_list(left, right):\n def FAIL(): # pragma: no cover\n raise TypeCheckFailure(f\"assigning {right.typ} to {left.typ}\")\n\n if left.value == \"multi\":\n # Cannot do something like [a, b, c] = [1, 2, 3]\n FAIL() # pragma: notest\n\n if isinstance(left.typ, SArrayT):\n if not is_array_like(right.typ):\n FAIL() # pragma: notest\n if left.typ.count != right.typ.count:\n FAIL() # pragma: notest\n\n # TODO recurse into left, right if literals?\n check_assign(\n dummy_node_for_type(left.typ.value_type), dummy_node_for_type(right.typ.value_type)\n )\n\n if isinstance(left.typ, DArrayT):\n if not isinstance(right.typ, DArrayT):\n FAIL() # pragma: notest\n\n if left.typ.count < right.typ.count:\n FAIL() # pragma: notest\n\n # stricter check for zeroing\n if right.value == \"~empty\" and right.typ.count != left.typ.count:\n raise TypeCheckFailure(\n f\"Bad type for clearing bytes: expected {left.typ} but got {right.typ}\"\n ) # pragma: notest\n\n # TODO recurse into left, right if literals?\n check_assign(\n dummy_node_for_type(left.typ.value_type), dummy_node_for_type(right.typ.value_type)\n )\n\n\ndef _check_assign_tuple(left, right):\n def FAIL(): # pragma: no cover\n raise TypeCheckFailure(f\"assigning {right.typ} to {left.typ}\")\n\n if not isinstance(right.typ, left.typ.__class__):\n FAIL() # pragma: notest\n\n if isinstance(left.typ, StructT):\n for k in left.typ.member_types:\n if k not in right.typ.member_types:\n FAIL() # pragma: notest\n # TODO recurse into left, right if literals?\n check_assign(\n dummy_node_for_type(left.typ.member_types[k]),\n dummy_node_for_type(right.typ.member_types[k]),\n )\n\n for k in right.typ.member_types:\n if k not in left.typ.member_types:\n FAIL() # pragma: notest\n\n if left.typ.name != right.typ.name:\n FAIL() # pragma: notest\n\n else:\n if len(left.typ.member_types) != len(right.typ.member_types):\n FAIL() # pragma: notest\n for (l, r) in zip(left.typ.member_types, right.typ.member_types):\n # TODO recurse into left, right if literals?\n check_assign(dummy_node_for_type(l), dummy_node_for_type(r))\n\n\n# sanity check an assignment\n# typechecking source code is done at an earlier phase\n# this function is more of a sanity check for typechecking internally\n# generated assignments\n# TODO: do we still need this?\ndef check_assign(left, right):\n def FAIL(): # pragma: no cover\n raise TypeCheckFailure(f\"assigning {right.typ} to {left.typ} {left} {right}\")\n\n if isinstance(left.typ, _BytestringT):\n _check_assign_bytes(left, right)\n elif is_array_like(left.typ):\n _check_assign_list(left, right)\n elif is_tuple_like(left.typ):\n _check_assign_tuple(left, right)\n\n elif left.typ._is_prim_word:\n # TODO once we propagate types from typechecker, introduce this check:\n # if left.typ != right.typ:\n # FAIL() # pragma: notest\n pass\n\n else: # pragma: no cover\n FAIL()\n\n\n_label = 0\n\n\n# TODO might want to coalesce with Context.fresh_varname and compile_ir.mksymbol\ndef _freshname(name):\n global _label\n _label += 1\n return f\"{name}{_label}\"\n\n\ndef reset_names():\n global _label\n _label = 0\n\n\n# returns True if t is ABI encoded and is a type that needs any kind of\n# validation\ndef needs_clamp(t, encoding):\n if encoding == Encoding.VYPER:\n return False\n if encoding != Encoding.ABI:\n raise CompilerPanic(\"unreachable\") # pragma: notest\n if isinstance(t, (_BytestringT, DArrayT)):\n return True\n if isinstance(t, EnumT):\n return len(t._enum_members) < 256\n if isinstance(t, SArrayT):\n return needs_clamp(t.value_type, encoding)\n if is_tuple_like(t):\n return any(needs_clamp(m, encoding) for m in t.tuple_members())\n if t._is_prim_word:\n return t not in (INT256_T, UINT256_T, BYTES32_T)\n\n raise CompilerPanic(\"unreachable\") # pragma: notest\n\n\n# Create an x=y statement, where the types may be compound\ndef make_setter(left, right):\n check_assign(left, right)\n\n # For types which occupy just one word we can use single load/store\n if left.typ._is_prim_word:\n enc = right.encoding # unwrap_location butchers encoding\n right = unwrap_location(right)\n # TODO rethink/streamline the clamp_basetype logic\n if needs_clamp(right.typ, enc):\n right = clamp_basetype(right)\n\n return STORE(left, right)\n\n # Byte arrays\n elif isinstance(left.typ, _BytestringT):\n # TODO rethink/streamline the clamp_basetype logic\n if needs_clamp(right.typ, right.encoding):\n with right.cache_when_complex(\"bs_ptr\") as (b, right):\n copier = make_byte_array_copier(left, right)\n ret = b.resolve([\"seq\", clamp_bytestring(right), copier])\n else:\n ret = make_byte_array_copier(left, right)\n\n return IRnode.from_list(ret)\n\n elif isinstance(left.typ, DArrayT):\n # TODO should we enable this?\n # implicit conversion from sarray to darray\n # if isinstance(right.typ, SArrayType):\n # return _complex_make_setter(left, right)\n\n # TODO rethink/streamline the clamp_basetype logic\n if needs_clamp(right.typ, right.encoding):\n with right.cache_when_complex(\"arr_ptr\") as (b, right):\n copier = _dynarray_make_setter(left, right)\n ret = b.resolve([\"seq\", clamp_dyn_array(right), copier])\n else:\n ret = _dynarray_make_setter(left, right)\n\n return IRnode.from_list(ret)\n\n # Complex Types\n assert isinstance(left.typ, (SArrayT, TupleT, StructT))\n\n return _complex_make_setter(left, right)\n\n\ndef _complex_make_setter(left, right):\n if right.value == \"~empty\" and left.location == MEMORY:\n # optimized memzero\n return mzero(left, left.typ.memory_bytes_required)\n\n ret = [\"seq\"]\n\n if isinstance(left.typ, SArrayT):\n n_items = right.typ.count\n keys = [IRnode.from_list(i, typ=UINT256_T) for i in range(n_items)]\n\n else:\n assert is_tuple_like(left.typ)\n keys = left.typ.tuple_keys()\n\n # if len(keyz) == 0:\n # return IRnode.from_list([\"pass\"])\n\n # general case\n # TODO use copy_bytes when the generated code is above a certain size\n with left.cache_when_complex(\"_L\") as (b1, left), right.cache_when_complex(\"_R\") as (b2, right):\n\n for k in keys:\n l_i = get_element_ptr(left, k, array_bounds_check=False)\n r_i = get_element_ptr(right, k, array_bounds_check=False)\n ret.append(make_setter(l_i, r_i))\n\n return b1.resolve(b2.resolve(IRnode.from_list(ret)))\n\n\ndef ensure_in_memory(ir_var, context):\n \"\"\"Ensure a variable is in memory. This is useful for functions\n which expect to operate on memory variables.\n \"\"\"\n if ir_var.location == MEMORY:\n return ir_var\n\n typ = ir_var.typ\n buf = IRnode.from_list(context.new_internal_variable(typ), typ=typ, location=MEMORY)\n do_copy = make_setter(buf, ir_var)\n\n return IRnode.from_list([\"seq\", do_copy, buf], typ=typ, location=MEMORY)\n\n\ndef eval_seq(ir_node):\n \"\"\"Tries to find the \"return\" value of a `seq` statement, in order so\n that the value can be known without possibly evaluating side effects\n \"\"\"\n if ir_node.value in (\"seq\", \"with\") and len(ir_node.args) > 0:\n return eval_seq(ir_node.args[-1])\n if isinstance(ir_node.value, int):\n return IRnode.from_list(ir_node)\n return None\n\n\n# TODO move return checks to vyper/semantics/validation\ndef is_return_from_function(node):\n if isinstance(node, vy_ast.Expr) and node.get(\"value.func.id\") == \"selfdestruct\":\n return True\n if isinstance(node, (vy_ast.Return, vy_ast.Raise)):\n return True\n return False\n\n\ndef check_single_exit(fn_node):\n _check_return_body(fn_node, fn_node.body)\n for node in fn_node.get_descendants(vy_ast.If):\n _check_return_body(node, node.body)\n if node.orelse:\n _check_return_body(node, node.orelse)\n\n\ndef _check_return_body(node, node_list):\n return_count = len([n for n in node_list if is_return_from_function(n)])\n if return_count > 1:\n raise StructureException(\n \"Too too many exit statements (return, raise or selfdestruct).\", node\n )\n # Check for invalid code after returns.\n last_node_pos = len(node_list) - 1\n for idx, n in enumerate(node_list):\n if is_return_from_function(n) and idx < last_node_pos:\n # is not last statement in body.\n raise StructureException(\n \"Exit statement with succeeding code (that will not execute).\", node_list[idx + 1]\n )\n\n\ndef mzero(dst, nbytes):\n # calldatacopy from past-the-end gives zero bytes.\n # cf. YP H.2 (ops section) with CALLDATACOPY spec.\n return IRnode.from_list(\n # calldatacopy mempos calldatapos len\n [\"calldatacopy\", dst, \"calldatasize\", nbytes],\n annotation=\"mzero\",\n )\n\n\n# zero pad a bytearray according to the ABI spec. The last word\n# of the byte array needs to be right-padded with zeroes.\ndef zero_pad(bytez_placeholder):\n len_ = [\"mload\", bytez_placeholder]\n dst = [\"add\", [\"add\", bytez_placeholder, 32], \"len\"]\n # the runtime length of the data rounded up to nearest 32\n # from spec:\n # the actual value of X as a byte sequence,\n # followed by the *minimum* number of zero-bytes\n # such that len(enc(X)) is a multiple of 32.\n # optimized form of ceil32(len) - len:\n num_zero_bytes = [\"mod\", [\"sub\", 0, \"len\"], 32]\n return IRnode.from_list(\n [\"with\", \"len\", len_, [\"with\", \"dst\", dst, mzero(\"dst\", num_zero_bytes)]],\n annotation=\"Zero pad\",\n )\n\n\n# convenience rewrites for shr/sar/shl\ndef shr(bits, x):\n if version_check(begin=\"constantinople\"):\n return [\"shr\", bits, x]\n return [\"div\", x, [\"exp\", 2, bits]]\n\n\n# convenience rewrites for shr/sar/shl\ndef shl(bits, x):\n if version_check(begin=\"constantinople\"):\n return [\"shl\", bits, x]\n return [\"mul\", x, [\"exp\", 2, bits]]\n\n\ndef sar(bits, x):\n if version_check(begin=\"constantinople\"):\n return [\"sar\", bits, x]\n\n raise NotImplementedError(\"no SAR emulation for pre-constantinople EVM\")\n\n\ndef clamp_bytestring(ir_node):\n t = ir_node.typ\n if not isinstance(t, _BytestringT):\n raise CompilerPanic(f\"{t} passed to clamp_bytestring\") # pragma: notest\n ret = [\"assert\", [\"le\", get_bytearray_length(ir_node), t.maxlen]]\n return IRnode.from_list(ret, error_msg=f\"{ir_node.typ} bounds check\")\n\n\ndef clamp_dyn_array(ir_node):\n t = ir_node.typ\n assert isinstance(t, DArrayT)\n ret = [\"assert\", [\"le\", get_dyn_array_count(ir_node), t.count]]\n return IRnode.from_list(ret, error_msg=f\"{ir_node.typ} bounds check\")\n\n\n# clampers for basetype\ndef clamp_basetype(ir_node):\n t = ir_node.typ\n if not t._is_prim_word:\n raise CompilerPanic(f\"{t} passed to clamp_basetype\") # pragma: notest\n\n # copy of the input\n ir_node = unwrap_location(ir_node)\n\n if isinstance(t, EnumT):\n bits = len(t._enum_members)\n # assert x >> bits == 0\n ret = int_clamp(ir_node, bits, signed=False)\n\n elif isinstance(t, (IntegerT, DecimalT)):\n if t.bits == 256:\n ret = ir_node\n else:\n ret = int_clamp(ir_node, t.bits, signed=t.is_signed)\n\n elif isinstance(t, BytesM_T):\n if t.m == 32:\n ret = ir_node # special case, no clamp.\n else:\n ret = bytes_clamp(ir_node, t.m)\n\n elif isinstance(t, (AddressT, InterfaceT)):\n ret = int_clamp(ir_node, 160)\n elif t in (BoolT(),):\n ret = int_clamp(ir_node, 1)\n else: # pragma: no cover\n raise CompilerPanic(f\"{t} passed to clamp_basetype\")\n\n return IRnode.from_list(ret, typ=ir_node.typ, error_msg=f\"validate {t}\")\n\n\ndef int_clamp(ir_node, bits, signed=False):\n \"\"\"Generalized clamper for integer types. Takes the number of bits,\n whether it's signed, and returns an IR node which checks it is\n in bounds. (Consumers should use clamp_basetype instead which uses\n type-based dispatch and is a little safer.)\n \"\"\"\n if bits >= 256:\n raise CompilerPanic(f\"invalid clamp: {bits}>=256 ({ir_node})\") # pragma: notest\n\n u = \"u\" if not signed else \"\"\n msg = f\"{u}int{bits} bounds check\"\n with ir_node.cache_when_complex(\"val\") as (b, val):\n if signed:\n # example for bits==128:\n # promote_signed_int(val, bits) is the \"canonical\" version of val\n # if val is in bounds, the bits above bit 128 should be equal.\n # (this works for both val >= 0 and val < 0. in the first case,\n # all upper bits should be 0 if val is a valid int128,\n # in the latter case, all upper bits should be 1.)\n assertion = [\"assert\", [\"eq\", val, promote_signed_int(val, bits)]]\n else:\n assertion = [\"assert\", [\"iszero\", shr(bits, val)]]\n\n assertion = IRnode.from_list(assertion, error_msg=msg)\n\n ret = b.resolve([\"seq\", assertion, val])\n\n return IRnode.from_list(ret, annotation=msg)\n\n\ndef bytes_clamp(ir_node: IRnode, n_bytes: int) -> IRnode:\n if not (0 < n_bytes <= 32):\n raise CompilerPanic(f\"bad type: bytes{n_bytes}\")\n msg = f\"bytes{n_bytes} bounds check\"\n with ir_node.cache_when_complex(\"val\") as (b, val):\n assertion = IRnode.from_list([\"assert\", [\"iszero\", shl(n_bytes * 8, val)]], error_msg=msg)\n ret = b.resolve([\"seq\", assertion, val])\n\n return IRnode.from_list(ret, annotation=msg)\n\n\n# e.g. for int8, promote 255 to -1\ndef promote_signed_int(x, bits):\n assert bits % 8 == 0\n ret = [\"signextend\", bits // 8 - 1, x]\n return IRnode.from_list(ret, annotation=f\"promote int{bits}\")\n\n\n# general clamp function for all ops and numbers\ndef clamp(op, arg, bound):\n with IRnode.from_list(arg).cache_when_complex(\"clamp_arg\") as (b1, arg):\n check = IRnode.from_list([\"assert\", [op, arg, bound]], error_msg=f\"clamp {op} {bound}\")\n ret = [\"seq\", check, arg]\n return IRnode.from_list(b1.resolve(ret), typ=arg.typ)\n\n\ndef clamp_nonzero(arg):\n # TODO: use clamp(\"ne\", arg, 0) once optimizer rules can handle it\n with IRnode.from_list(arg).cache_when_complex(\"should_nonzero\") as (b1, arg):\n check = IRnode.from_list([\"assert\", arg], error_msg=\"check nonzero\")\n ret = [\"seq\", check, arg]\n return IRnode.from_list(b1.resolve(ret), typ=arg.typ)\n\n\ndef clamp2(lo, arg, hi, signed):\n with IRnode.from_list(arg).cache_when_complex(\"clamp2_arg\") as (b1, arg):\n GE = \"sge\" if signed else \"ge\"\n LE = \"sle\" if signed else \"le\"\n ret = [\"seq\", [\"assert\", [\"and\", [GE, arg, lo], [LE, arg, hi]]], arg]\n return IRnode.from_list(b1.resolve(ret), typ=arg.typ)\n",
"path": "vyper/codegen/core.py"
}
] | [
{
"content": "from vyper import ast as vy_ast\nfrom vyper.address_space import CALLDATA, DATA, IMMUTABLES, MEMORY, STORAGE\nfrom vyper.codegen.ir_node import Encoding, IRnode\nfrom vyper.evm.opcodes import version_check\nfrom vyper.exceptions import CompilerPanic, StructureException, TypeCheckFailure, TypeMismatch\nfrom vyper.semantics.types import (\n AddressT,\n BoolT,\n BytesM_T,\n BytesT,\n DArrayT,\n DecimalT,\n HashMapT,\n IntegerT,\n InterfaceT,\n StructT,\n TupleT,\n _BytestringT,\n)\nfrom vyper.semantics.types.shortcuts import BYTES32_T, INT256_T, UINT256_T\nfrom vyper.semantics.types.subscriptable import SArrayT\nfrom vyper.semantics.types.user import EnumT\nfrom vyper.utils import (\n GAS_CALLDATACOPY_WORD,\n GAS_CODECOPY_WORD,\n GAS_IDENTITY,\n GAS_IDENTITYWORD,\n ceil32,\n)\n\nDYNAMIC_ARRAY_OVERHEAD = 1\n\n\ndef is_bytes_m_type(typ):\n return isinstance(typ, BytesM_T)\n\n\ndef is_numeric_type(typ):\n return isinstance(typ, (IntegerT, DecimalT))\n\n\ndef is_integer_type(typ):\n return isinstance(typ, IntegerT)\n\n\ndef is_decimal_type(typ):\n return isinstance(typ, DecimalT)\n\n\ndef is_enum_type(typ):\n return isinstance(typ, EnumT)\n\n\ndef is_tuple_like(typ):\n # A lot of code paths treat tuples and structs similarly\n # so we have a convenience function to detect it\n ret = isinstance(typ, (TupleT, StructT))\n assert ret == hasattr(typ, \"tuple_items\")\n return ret\n\n\ndef is_array_like(typ):\n # For convenience static and dynamic arrays share some code paths\n ret = isinstance(typ, (DArrayT, SArrayT))\n assert ret == typ._is_array_type\n return ret\n\n\ndef get_type_for_exact_size(n_bytes):\n \"\"\"Create a type which will take up exactly n_bytes. Used for allocating internal buffers.\n\n Parameters:\n n_bytes: the number of bytes to allocate\n Returns:\n type: A type which can be passed to context.new_variable\n \"\"\"\n return BytesT(n_bytes - 32 * DYNAMIC_ARRAY_OVERHEAD)\n\n\n# propagate revert message when calls to external contracts fail\ndef check_external_call(call_ir):\n copy_revertdata = [\"returndatacopy\", 0, 0, \"returndatasize\"]\n revert = IRnode.from_list([\"revert\", 0, \"returndatasize\"], error_msg=\"external call failed\")\n\n propagate_revert_ir = [\"seq\", copy_revertdata, revert]\n return [\"if\", [\"iszero\", call_ir], propagate_revert_ir]\n\n\n# cost per byte of the identity precompile\ndef _identity_gas_bound(num_bytes):\n return GAS_IDENTITY + GAS_IDENTITYWORD * (ceil32(num_bytes) // 32)\n\n\ndef _calldatacopy_gas_bound(num_bytes):\n return GAS_CALLDATACOPY_WORD * ceil32(num_bytes) // 32\n\n\ndef _codecopy_gas_bound(num_bytes):\n return GAS_CODECOPY_WORD * ceil32(num_bytes) // 32\n\n\n# Copy byte array word-for-word (including layout)\n# TODO make this a private function\ndef make_byte_array_copier(dst, src):\n assert isinstance(src.typ, _BytestringT)\n assert isinstance(dst.typ, _BytestringT)\n\n _check_assign_bytes(dst, src)\n\n # TODO: remove this branch, copy_bytes and get_bytearray_length should handle\n if src.value == \"~empty\":\n # set length word to 0.\n return STORE(dst, 0)\n\n with src.cache_when_complex(\"src\") as (b1, src):\n with get_bytearray_length(src).cache_when_complex(\"len\") as (b2, len_):\n\n max_bytes = src.typ.maxlen\n\n ret = [\"seq\"]\n # store length\n ret.append(STORE(dst, len_))\n\n dst = bytes_data_ptr(dst)\n src = bytes_data_ptr(src)\n\n ret.append(copy_bytes(dst, src, len_, max_bytes))\n return b1.resolve(b2.resolve(ret))\n\n\ndef bytes_data_ptr(ptr):\n if ptr.location is None:\n raise CompilerPanic(\"tried to modify non-pointer type\")\n assert isinstance(ptr.typ, _BytestringT)\n return add_ofst(ptr, ptr.location.word_scale)\n\n\ndef dynarray_data_ptr(ptr):\n if ptr.location is None:\n raise CompilerPanic(\"tried to modify non-pointer type\")\n assert isinstance(ptr.typ, DArrayT)\n return add_ofst(ptr, ptr.location.word_scale)\n\n\ndef _dynarray_make_setter(dst, src):\n assert isinstance(src.typ, DArrayT)\n assert isinstance(dst.typ, DArrayT)\n\n if src.value == \"~empty\":\n return IRnode.from_list(STORE(dst, 0))\n\n if src.value == \"multi\":\n ret = [\"seq\"]\n # handle literals\n\n # write the length word\n store_length = STORE(dst, len(src.args))\n ann = None\n if src.annotation is not None:\n ann = f\"len({src.annotation})\"\n store_length = IRnode.from_list(store_length, annotation=ann)\n ret.append(store_length)\n\n n_items = len(src.args)\n for i in range(n_items):\n k = IRnode.from_list(i, typ=UINT256_T)\n dst_i = get_element_ptr(dst, k, array_bounds_check=False)\n src_i = get_element_ptr(src, k, array_bounds_check=False)\n ret.append(make_setter(dst_i, src_i))\n\n return ret\n\n with src.cache_when_complex(\"darray_src\") as (b1, src):\n\n # for ABI-encoded dynamic data, we must loop to unpack, since\n # the layout does not match our memory layout\n should_loop = src.encoding == Encoding.ABI and src.typ.value_type.abi_type.is_dynamic()\n\n # if the data is not validated, we must loop to unpack\n should_loop |= needs_clamp(src.typ.value_type, src.encoding)\n\n # performance: if the subtype is dynamic, there might be a lot\n # of unused space inside of each element. for instance\n # DynArray[DynArray[uint256, 100], 5] where all the child\n # arrays are empty - for this case, we recursively call\n # into make_setter instead of straight bytes copy\n # TODO we can make this heuristic more precise, e.g.\n # loop when subtype.is_dynamic AND location == storage\n # OR array_size <= /bound where loop is cheaper than memcpy/\n should_loop |= src.typ.value_type.abi_type.is_dynamic()\n\n with get_dyn_array_count(src).cache_when_complex(\"darray_count\") as (b2, count):\n ret = [\"seq\"]\n\n ret.append(STORE(dst, count))\n\n if should_loop:\n i = IRnode.from_list(_freshname(\"copy_darray_ix\"), typ=UINT256_T)\n\n loop_body = make_setter(\n get_element_ptr(dst, i, array_bounds_check=False),\n get_element_ptr(src, i, array_bounds_check=False),\n )\n loop_body.annotation = f\"{dst}[i] = {src}[i]\"\n\n ret.append([\"repeat\", i, 0, count, src.typ.count, loop_body])\n\n else:\n element_size = src.typ.value_type.memory_bytes_required\n # number of elements * size of element in bytes\n n_bytes = _mul(count, element_size)\n max_bytes = src.typ.count * element_size\n\n src_ = dynarray_data_ptr(src)\n dst_ = dynarray_data_ptr(dst)\n ret.append(copy_bytes(dst_, src_, n_bytes, max_bytes))\n\n return b1.resolve(b2.resolve(ret))\n\n\n# Copy bytes\n# Accepts 4 arguments:\n# (i) an IR node for the start position of the source\n# (ii) an IR node for the start position of the destination\n# (iii) an IR node for the length (in bytes)\n# (iv) a constant for the max length (in bytes)\n# NOTE: may pad to ceil32 of `length`! If you ask to copy 1 byte, it may\n# copy an entire (32-byte) word, depending on the copy routine chosen.\n# TODO maybe always pad to ceil32, to reduce dirty bytes bugs\ndef copy_bytes(dst, src, length, length_bound):\n annotation = f\"copy up to {length_bound} bytes from {src} to {dst}\"\n\n src = IRnode.from_list(src)\n dst = IRnode.from_list(dst)\n length = IRnode.from_list(length)\n\n with src.cache_when_complex(\"src\") as (b1, src), length.cache_when_complex(\n \"copy_bytes_count\"\n ) as (b2, length), dst.cache_when_complex(\"dst\") as (b3, dst):\n\n assert isinstance(length_bound, int) and length_bound >= 0\n\n # correctness: do not clobber dst\n if length_bound == 0:\n return IRnode.from_list([\"seq\"], annotation=annotation)\n # performance: if we know that length is 0, do not copy anything\n if length.value == 0:\n return IRnode.from_list([\"seq\"], annotation=annotation)\n\n assert src.is_pointer and dst.is_pointer\n\n # fast code for common case where num bytes is small\n # TODO expand this for more cases where num words is less than ~8\n if length_bound <= 32:\n copy_op = STORE(dst, LOAD(src))\n ret = IRnode.from_list(copy_op, annotation=annotation)\n return b1.resolve(b2.resolve(b3.resolve(ret)))\n\n if dst.location == MEMORY and src.location in (MEMORY, CALLDATA, DATA):\n # special cases: batch copy to memory\n # TODO: iloadbytes\n if src.location == MEMORY:\n copy_op = [\"staticcall\", \"gas\", 4, src, length, dst, length]\n gas_bound = _identity_gas_bound(length_bound)\n elif src.location == CALLDATA:\n copy_op = [\"calldatacopy\", dst, src, length]\n gas_bound = _calldatacopy_gas_bound(length_bound)\n elif src.location == DATA:\n copy_op = [\"dloadbytes\", dst, src, length]\n # note: dloadbytes compiles to CODECOPY\n gas_bound = _codecopy_gas_bound(length_bound)\n\n ret = IRnode.from_list(copy_op, annotation=annotation, add_gas_estimate=gas_bound)\n return b1.resolve(b2.resolve(b3.resolve(ret)))\n\n if dst.location == IMMUTABLES and src.location in (MEMORY, DATA):\n # TODO istorebytes-from-mem, istorebytes-from-calldata(?)\n # compile to identity, CODECOPY respectively.\n pass\n\n # general case, copy word-for-word\n # pseudocode for our approach (memory-storage as example):\n # for i in range(len, bound=MAX_LEN):\n # sstore(_dst + i, mload(src + i * 32))\n i = IRnode.from_list(_freshname(\"copy_bytes_ix\"), typ=UINT256_T)\n\n # optimized form of (div (ceil32 len) 32)\n n = [\"div\", [\"add\", 31, length], 32]\n n_bound = ceil32(length_bound) // 32\n\n dst_i = add_ofst(dst, _mul(i, dst.location.word_scale))\n src_i = add_ofst(src, _mul(i, src.location.word_scale))\n\n copy_one_word = STORE(dst_i, LOAD(src_i))\n\n main_loop = [\"repeat\", i, 0, n, n_bound, copy_one_word]\n\n return b1.resolve(\n b2.resolve(b3.resolve(IRnode.from_list(main_loop, annotation=annotation)))\n )\n\n\n# get the number of bytes at runtime\ndef get_bytearray_length(arg):\n typ = UINT256_T\n\n # TODO: it would be nice to merge the implementations of get_bytearray_length and\n # get_dynarray_count\n if arg.value == \"~empty\":\n return IRnode.from_list(0, typ=typ)\n\n return IRnode.from_list(LOAD(arg), typ=typ)\n\n\n# get the number of elements at runtime\ndef get_dyn_array_count(arg):\n assert isinstance(arg.typ, DArrayT)\n\n typ = UINT256_T\n\n if arg.value == \"multi\":\n return IRnode.from_list(len(arg.args), typ=typ)\n\n if arg.value == \"~empty\":\n # empty(DynArray[...])\n return IRnode.from_list(0, typ=typ)\n\n return IRnode.from_list(LOAD(arg), typ=typ)\n\n\ndef append_dyn_array(darray_node, elem_node):\n assert isinstance(darray_node.typ, DArrayT)\n\n assert darray_node.typ.count > 0, \"jerk boy u r out\"\n\n ret = [\"seq\"]\n with darray_node.cache_when_complex(\"darray\") as (b1, darray_node):\n len_ = get_dyn_array_count(darray_node)\n with len_.cache_when_complex(\"old_darray_len\") as (b2, len_):\n assertion = [\"assert\", [\"lt\", len_, darray_node.typ.count]]\n ret.append(IRnode.from_list(assertion, error_msg=f\"{darray_node.typ} bounds check\"))\n ret.append(STORE(darray_node, [\"add\", len_, 1]))\n # NOTE: typechecks elem_node\n # NOTE skip array bounds check bc we already asserted len two lines up\n ret.append(\n make_setter(get_element_ptr(darray_node, len_, array_bounds_check=False), elem_node)\n )\n return IRnode.from_list(b1.resolve(b2.resolve(ret)))\n\n\ndef pop_dyn_array(darray_node, return_popped_item):\n assert isinstance(darray_node.typ, DArrayT)\n assert darray_node.encoding == Encoding.VYPER\n ret = [\"seq\"]\n with darray_node.cache_when_complex(\"darray\") as (b1, darray_node):\n old_len = clamp(\"gt\", get_dyn_array_count(darray_node), 0)\n new_len = IRnode.from_list([\"sub\", old_len, 1], typ=UINT256_T)\n\n with new_len.cache_when_complex(\"new_len\") as (b2, new_len):\n ret.append(STORE(darray_node, new_len))\n\n # NOTE skip array bounds check bc we already asserted len two lines up\n if return_popped_item:\n popped_item = get_element_ptr(darray_node, new_len, array_bounds_check=False)\n ret.append(popped_item)\n typ = popped_item.typ\n location = popped_item.location\n else:\n typ, location = None, None\n return IRnode.from_list(b1.resolve(b2.resolve(ret)), typ=typ, location=location)\n\n\ndef getpos(node):\n return (\n node.lineno,\n node.col_offset,\n getattr(node, \"end_lineno\", None),\n getattr(node, \"end_col_offset\", None),\n )\n\n\n# add an offset to a pointer, keeping location and encoding info\ndef add_ofst(ptr, ofst):\n ret = [\"add\", ptr, ofst]\n return IRnode.from_list(ret, location=ptr.location, encoding=ptr.encoding)\n\n\n# shorthand util\ndef _mul(x, y):\n ret = [\"mul\", x, y]\n return IRnode.from_list(ret)\n\n\n# Resolve pointer locations for ABI-encoded data\ndef _getelemptr_abi_helper(parent, member_t, ofst, clamp=True):\n member_abi_t = member_t.abi_type\n\n # ABI encoding has length word and then pretends length is not there\n # e.g. [[1,2]] is encoded as 0x01 <len> 0x20 <inner array ofst> <encode(inner array)>\n # note that inner array ofst is 0x20, not 0x40.\n if has_length_word(parent.typ):\n parent = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)\n\n ofst_ir = add_ofst(parent, ofst)\n\n if member_abi_t.is_dynamic():\n # double dereference, according to ABI spec\n # TODO optimize special case: first dynamic item\n # offset is statically known.\n ofst_ir = add_ofst(parent, unwrap_location(ofst_ir))\n\n return IRnode.from_list(\n ofst_ir,\n typ=member_t,\n location=parent.location,\n encoding=parent.encoding,\n annotation=f\"{parent}{ofst}\",\n )\n\n\n# TODO simplify this code, especially the ABI decoding\ndef _get_element_ptr_tuplelike(parent, key):\n typ = parent.typ\n assert is_tuple_like(typ)\n\n if isinstance(typ, StructT):\n assert isinstance(key, str)\n subtype = typ.member_types[key]\n attrs = list(typ.tuple_keys())\n index = attrs.index(key)\n annotation = key\n else:\n # TupleT\n assert isinstance(key, int)\n subtype = typ.member_types[key]\n attrs = list(typ.tuple_keys())\n index = key\n annotation = None\n\n # generated by empty() + make_setter\n if parent.value == \"~empty\":\n return IRnode.from_list(\"~empty\", typ=subtype)\n\n if parent.value == \"multi\":\n assert parent.encoding != Encoding.ABI, \"no abi-encoded literals\"\n return parent.args[index]\n\n ofst = 0 # offset from parent start\n\n if parent.encoding == Encoding.ABI:\n if parent.location == STORAGE:\n raise CompilerPanic(\"storage variables should not be abi encoded\") # pragma: notest\n\n member_t = typ.member_types[attrs[index]]\n\n for i in range(index):\n member_abi_t = typ.member_types[attrs[i]].abi_type\n ofst += member_abi_t.embedded_static_size()\n\n return _getelemptr_abi_helper(parent, member_t, ofst)\n\n if parent.location.word_addressable:\n for i in range(index):\n ofst += typ.member_types[attrs[i]].storage_size_in_words\n elif parent.location.byte_addressable:\n for i in range(index):\n ofst += typ.member_types[attrs[i]].memory_bytes_required\n else:\n raise CompilerPanic(f\"bad location {parent.location}\") # pragma: notest\n\n return IRnode.from_list(\n add_ofst(parent, ofst),\n typ=subtype,\n location=parent.location,\n encoding=parent.encoding,\n annotation=annotation,\n )\n\n\ndef has_length_word(typ):\n # Consider moving this to an attribute on typ\n return isinstance(typ, (DArrayT, _BytestringT))\n\n\n# TODO simplify this code, especially the ABI decoding\ndef _get_element_ptr_array(parent, key, array_bounds_check):\n\n assert is_array_like(parent.typ)\n\n if not is_integer_type(key.typ):\n raise TypeCheckFailure(f\"{key.typ} used as array index\")\n\n subtype = parent.typ.value_type\n\n if parent.value == \"~empty\":\n if array_bounds_check:\n # this case was previously missing a bounds check. codegen\n # is a bit complicated when bounds check is required, so\n # block it. there is no reason to index into a literal empty\n # array anyways!\n raise TypeCheckFailure(\"indexing into zero array not allowed\")\n return IRnode.from_list(\"~empty\", subtype)\n\n if parent.value == \"multi\":\n assert isinstance(key.value, int)\n return parent.args[key.value]\n\n ix = unwrap_location(key)\n\n if array_bounds_check:\n is_darray = isinstance(parent.typ, DArrayT)\n bound = get_dyn_array_count(parent) if is_darray else parent.typ.count\n # uclamplt works, even for signed ints. since two's-complement\n # is used, if the index is negative, (unsigned) LT will interpret\n # it as a very large number, larger than any practical value for\n # an array index, and the clamp will throw an error.\n # NOTE: there are optimization rules for this when ix or bound is literal\n ix = clamp(\"lt\", ix, bound)\n\n if parent.encoding == Encoding.ABI:\n if parent.location == STORAGE:\n raise CompilerPanic(\"storage variables should not be abi encoded\") # pragma: notest\n\n member_abi_t = subtype.abi_type\n\n ofst = _mul(ix, member_abi_t.embedded_static_size())\n\n return _getelemptr_abi_helper(parent, subtype, ofst)\n\n if parent.location.word_addressable:\n element_size = subtype.storage_size_in_words\n elif parent.location.byte_addressable:\n element_size = subtype.memory_bytes_required\n else:\n raise CompilerPanic(\"unreachable\") # pragma: notest\n\n ofst = _mul(ix, element_size)\n\n if has_length_word(parent.typ):\n data_ptr = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)\n else:\n data_ptr = parent\n\n return IRnode.from_list(add_ofst(data_ptr, ofst), typ=subtype, location=parent.location)\n\n\ndef _get_element_ptr_mapping(parent, key):\n assert isinstance(parent.typ, HashMapT)\n subtype = parent.typ.value_type\n key = unwrap_location(key)\n\n # TODO when is key None?\n if key is None or parent.location != STORAGE:\n raise TypeCheckFailure(f\"bad dereference on mapping {parent}[{key}]\")\n\n return IRnode.from_list([\"sha3_64\", parent, key], typ=subtype, location=STORAGE)\n\n\n# Take a value representing a memory or storage location, and descend down to\n# an element or member variable\n# This is analogous (but not necessarily equivalent to) getelementptr in LLVM.\ndef get_element_ptr(parent, key, array_bounds_check=True):\n with parent.cache_when_complex(\"val\") as (b, parent):\n typ = parent.typ\n\n if is_tuple_like(typ):\n ret = _get_element_ptr_tuplelike(parent, key)\n\n elif isinstance(typ, HashMapT):\n ret = _get_element_ptr_mapping(parent, key)\n\n elif is_array_like(typ):\n ret = _get_element_ptr_array(parent, key, array_bounds_check)\n\n else:\n raise CompilerPanic(f\"get_element_ptr cannot be called on {typ}\") # pragma: notest\n\n return b.resolve(ret)\n\n\ndef LOAD(ptr: IRnode) -> IRnode:\n if ptr.location is None:\n raise CompilerPanic(\"cannot dereference non-pointer type\")\n op = ptr.location.load_op\n if op is None:\n raise CompilerPanic(f\"unreachable {ptr.location}\") # pragma: notest\n return IRnode.from_list([op, ptr])\n\n\ndef eval_once_check(name):\n # an IRnode which enforces uniqueness. include with a side-effecting\n # operation to sanity check that the codegen pipeline only generates\n # the side-effecting operation once (otherwise, IR-to-assembly will\n # throw a duplicate label exception). there is no runtime overhead\n # since the jumpdest gets optimized out in the final stage of assembly.\n return IRnode.from_list([\"unique_symbol\", name])\n\n\ndef STORE(ptr: IRnode, val: IRnode) -> IRnode:\n if ptr.location is None:\n raise CompilerPanic(\"cannot dereference non-pointer type\")\n op = ptr.location.store_op\n if op is None:\n raise CompilerPanic(f\"unreachable {ptr.location}\") # pragma: notest\n\n _check = _freshname(f\"{op}_\")\n\n store = [op, ptr, val]\n # don't use eval_once_check for memory, immutables because it interferes\n # with optimizer\n if ptr.location in (MEMORY, IMMUTABLES):\n return IRnode.from_list(store)\n\n return IRnode.from_list([\"seq\", eval_once_check(_check), store])\n\n\n# Unwrap location\ndef unwrap_location(orig):\n if orig.location is not None:\n return IRnode.from_list(LOAD(orig), typ=orig.typ)\n else:\n # CMC 2022-03-24 TODO refactor so this branch can be removed\n if orig.value == \"~empty\":\n # must be word type\n return IRnode.from_list(0, typ=orig.typ)\n return orig\n\n\n# utility function, constructs an IR tuple out of a list of IR nodes\ndef ir_tuple_from_args(args):\n typ = TupleT([x.typ for x in args])\n return IRnode.from_list([\"multi\"] + [x for x in args], typ=typ)\n\n\ndef needs_external_call_wrap(typ):\n # for calls to ABI conforming contracts.\n # according to the ABI spec, return types are ALWAYS tuples even\n # if only one element is being returned.\n # https://solidity.readthedocs.io/en/latest/abi-spec.html#function-selector-and-argument-encoding\n # \"and the return values v_1, ..., v_k of f are encoded as\n #\n # enc((v_1, ..., v_k))\n # i.e. the values are combined into a tuple and encoded.\n # \"\n # therefore, wrap it in a tuple if it's not already a tuple.\n # for example, `bytes` is returned as abi-encoded (bytes,)\n # and `(bytes,)` is returned as abi-encoded ((bytes,),)\n # In general `-> X` gets returned as (X,)\n # including structs. MyStruct is returned as abi-encoded (MyStruct,).\n # (Sorry this is so confusing. I didn't make these rules.)\n\n return not (isinstance(typ, TupleT) and typ.length > 1)\n\n\ndef calculate_type_for_external_return(typ):\n if needs_external_call_wrap(typ):\n return TupleT([typ])\n return typ\n\n\ndef wrap_value_for_external_return(ir_val):\n # used for LHS promotion\n if needs_external_call_wrap(ir_val.typ):\n return ir_tuple_from_args([ir_val])\n else:\n return ir_val\n\n\ndef set_type_for_external_return(ir_val):\n # used for RHS promotion\n ir_val.typ = calculate_type_for_external_return(ir_val.typ)\n\n\n# return a dummy IRnode with the given type\ndef dummy_node_for_type(typ):\n return IRnode(\"fake_node\", typ=typ)\n\n\ndef _check_assign_bytes(left, right):\n if right.typ.maxlen > left.typ.maxlen:\n raise TypeMismatch(f\"Cannot cast from {right.typ} to {left.typ}\") # pragma: notest\n\n # stricter check for zeroing a byte array.\n if right.value == \"~empty\" and right.typ.maxlen != left.typ.maxlen:\n raise TypeMismatch(f\"Cannot cast from empty({right.typ}) to {left.typ}\") # pragma: notest\n\n\ndef _check_assign_list(left, right):\n def FAIL(): # pragma: no cover\n raise TypeCheckFailure(f\"assigning {right.typ} to {left.typ}\")\n\n if left.value == \"multi\":\n # Cannot do something like [a, b, c] = [1, 2, 3]\n FAIL() # pragma: notest\n\n if isinstance(left.typ, SArrayT):\n if not is_array_like(right.typ):\n FAIL() # pragma: notest\n if left.typ.count != right.typ.count:\n FAIL() # pragma: notest\n\n # TODO recurse into left, right if literals?\n check_assign(\n dummy_node_for_type(left.typ.value_type), dummy_node_for_type(right.typ.value_type)\n )\n\n if isinstance(left.typ, DArrayT):\n if not isinstance(right.typ, DArrayT):\n FAIL() # pragma: notest\n\n if left.typ.count < right.typ.count:\n FAIL() # pragma: notest\n\n # stricter check for zeroing\n if right.value == \"~empty\" and right.typ.count != left.typ.count:\n raise TypeCheckFailure(\n f\"Bad type for clearing bytes: expected {left.typ} but got {right.typ}\"\n ) # pragma: notest\n\n # TODO recurse into left, right if literals?\n check_assign(\n dummy_node_for_type(left.typ.value_type), dummy_node_for_type(right.typ.value_type)\n )\n\n\ndef _check_assign_tuple(left, right):\n def FAIL(): # pragma: no cover\n raise TypeCheckFailure(f\"assigning {right.typ} to {left.typ}\")\n\n if not isinstance(right.typ, left.typ.__class__):\n FAIL() # pragma: notest\n\n if isinstance(left.typ, StructT):\n for k in left.typ.member_types:\n if k not in right.typ.member_types:\n FAIL() # pragma: notest\n # TODO recurse into left, right if literals?\n check_assign(\n dummy_node_for_type(left.typ.member_types[k]),\n dummy_node_for_type(right.typ.member_types[k]),\n )\n\n for k in right.typ.member_types:\n if k not in left.typ.member_types:\n FAIL() # pragma: notest\n\n if left.typ.name != right.typ.name:\n FAIL() # pragma: notest\n\n else:\n if len(left.typ.member_types) != len(right.typ.member_types):\n FAIL() # pragma: notest\n for (l, r) in zip(left.typ.member_types, right.typ.member_types):\n # TODO recurse into left, right if literals?\n check_assign(dummy_node_for_type(l), dummy_node_for_type(r))\n\n\n# sanity check an assignment\n# typechecking source code is done at an earlier phase\n# this function is more of a sanity check for typechecking internally\n# generated assignments\n# TODO: do we still need this?\ndef check_assign(left, right):\n def FAIL(): # pragma: no cover\n raise TypeCheckFailure(f\"assigning {right.typ} to {left.typ} {left} {right}\")\n\n if isinstance(left.typ, _BytestringT):\n _check_assign_bytes(left, right)\n elif is_array_like(left.typ):\n _check_assign_list(left, right)\n elif is_tuple_like(left.typ):\n _check_assign_tuple(left, right)\n\n elif left.typ._is_prim_word:\n # TODO once we propagate types from typechecker, introduce this check:\n # if left.typ != right.typ:\n # FAIL() # pragma: notest\n pass\n\n else: # pragma: no cover\n FAIL()\n\n\n_label = 0\n\n\n# TODO might want to coalesce with Context.fresh_varname and compile_ir.mksymbol\ndef _freshname(name):\n global _label\n _label += 1\n return f\"{name}{_label}\"\n\n\ndef reset_names():\n global _label\n _label = 0\n\n\n# returns True if t is ABI encoded and is a type that needs any kind of\n# validation\ndef needs_clamp(t, encoding):\n if encoding == Encoding.VYPER:\n return False\n if encoding != Encoding.ABI:\n raise CompilerPanic(\"unreachable\") # pragma: notest\n if isinstance(t, (_BytestringT, DArrayT)):\n return True\n if isinstance(t, EnumT):\n return len(t._enum_members) < 256\n if isinstance(t, SArrayT):\n return needs_clamp(t.value_type, encoding)\n if is_tuple_like(t):\n return any(needs_clamp(m, encoding) for m in t.tuple_members())\n if t._is_prim_word:\n return t not in (INT256_T, UINT256_T, BYTES32_T)\n\n raise CompilerPanic(\"unreachable\") # pragma: notest\n\n\n# Create an x=y statement, where the types may be compound\ndef make_setter(left, right):\n check_assign(left, right)\n\n # For types which occupy just one word we can use single load/store\n if left.typ._is_prim_word:\n enc = right.encoding # unwrap_location butchers encoding\n right = unwrap_location(right)\n # TODO rethink/streamline the clamp_basetype logic\n if needs_clamp(right.typ, enc):\n right = clamp_basetype(right)\n\n return STORE(left, right)\n\n # Byte arrays\n elif isinstance(left.typ, _BytestringT):\n # TODO rethink/streamline the clamp_basetype logic\n if needs_clamp(right.typ, right.encoding):\n with right.cache_when_complex(\"bs_ptr\") as (b, right):\n copier = make_byte_array_copier(left, right)\n ret = b.resolve([\"seq\", clamp_bytestring(right), copier])\n else:\n ret = make_byte_array_copier(left, right)\n\n return IRnode.from_list(ret)\n\n elif isinstance(left.typ, DArrayT):\n # TODO should we enable this?\n # implicit conversion from sarray to darray\n # if isinstance(right.typ, SArrayType):\n # return _complex_make_setter(left, right)\n\n # TODO rethink/streamline the clamp_basetype logic\n if needs_clamp(right.typ, right.encoding):\n with right.cache_when_complex(\"arr_ptr\") as (b, right):\n copier = _dynarray_make_setter(left, right)\n ret = b.resolve([\"seq\", clamp_dyn_array(right), copier])\n else:\n ret = _dynarray_make_setter(left, right)\n\n return IRnode.from_list(ret)\n\n # Complex Types\n assert isinstance(left.typ, (SArrayT, TupleT, StructT))\n\n return _complex_make_setter(left, right)\n\n\ndef _complex_make_setter(left, right):\n if right.value == \"~empty\" and left.location == MEMORY:\n # optimized memzero\n return mzero(left, left.typ.memory_bytes_required)\n\n ret = [\"seq\"]\n\n if isinstance(left.typ, SArrayT):\n n_items = right.typ.count\n keys = [IRnode.from_list(i, typ=UINT256_T) for i in range(n_items)]\n\n else:\n assert is_tuple_like(left.typ)\n keys = left.typ.tuple_keys()\n\n # if len(keyz) == 0:\n # return IRnode.from_list([\"pass\"])\n\n # general case\n # TODO use copy_bytes when the generated code is above a certain size\n with left.cache_when_complex(\"_L\") as (b1, left), right.cache_when_complex(\"_R\") as (b2, right):\n\n for k in keys:\n l_i = get_element_ptr(left, k, array_bounds_check=False)\n r_i = get_element_ptr(right, k, array_bounds_check=False)\n ret.append(make_setter(l_i, r_i))\n\n return b1.resolve(b2.resolve(IRnode.from_list(ret)))\n\n\ndef ensure_in_memory(ir_var, context):\n \"\"\"Ensure a variable is in memory. This is useful for functions\n which expect to operate on memory variables.\n \"\"\"\n if ir_var.location == MEMORY:\n return ir_var\n\n typ = ir_var.typ\n buf = IRnode.from_list(context.new_internal_variable(typ), typ=typ, location=MEMORY)\n do_copy = make_setter(buf, ir_var)\n\n return IRnode.from_list([\"seq\", do_copy, buf], typ=typ, location=MEMORY)\n\n\ndef eval_seq(ir_node):\n \"\"\"Tries to find the \"return\" value of a `seq` statement, in order so\n that the value can be known without possibly evaluating side effects\n \"\"\"\n if ir_node.value in (\"seq\", \"with\") and len(ir_node.args) > 0:\n return eval_seq(ir_node.args[-1])\n if isinstance(ir_node.value, int):\n return IRnode.from_list(ir_node)\n return None\n\n\n# TODO move return checks to vyper/semantics/validation\ndef is_return_from_function(node):\n if isinstance(node, vy_ast.Expr) and node.get(\"value.func.id\") == \"selfdestruct\":\n return True\n if isinstance(node, (vy_ast.Return, vy_ast.Raise)):\n return True\n return False\n\n\ndef check_single_exit(fn_node):\n _check_return_body(fn_node, fn_node.body)\n for node in fn_node.get_descendants(vy_ast.If):\n _check_return_body(node, node.body)\n if node.orelse:\n _check_return_body(node, node.orelse)\n\n\ndef _check_return_body(node, node_list):\n return_count = len([n for n in node_list if is_return_from_function(n)])\n if return_count > 1:\n raise StructureException(\n \"Too too many exit statements (return, raise or selfdestruct).\", node\n )\n # Check for invalid code after returns.\n last_node_pos = len(node_list) - 1\n for idx, n in enumerate(node_list):\n if is_return_from_function(n) and idx < last_node_pos:\n # is not last statement in body.\n raise StructureException(\n \"Exit statement with succeeding code (that will not execute).\", node_list[idx + 1]\n )\n\n\ndef mzero(dst, nbytes):\n # calldatacopy from past-the-end gives zero bytes.\n # cf. YP H.2 (ops section) with CALLDATACOPY spec.\n return IRnode.from_list(\n # calldatacopy mempos calldatapos len\n [\"calldatacopy\", dst, \"calldatasize\", nbytes],\n annotation=\"mzero\",\n )\n\n\n# zero pad a bytearray according to the ABI spec. The last word\n# of the byte array needs to be right-padded with zeroes.\ndef zero_pad(bytez_placeholder):\n len_ = [\"mload\", bytez_placeholder]\n dst = [\"add\", [\"add\", bytez_placeholder, 32], \"len\"]\n # the runtime length of the data rounded up to nearest 32\n # from spec:\n # the actual value of X as a byte sequence,\n # followed by the *minimum* number of zero-bytes\n # such that len(enc(X)) is a multiple of 32.\n # optimized form of ceil32(len) - len:\n num_zero_bytes = [\"mod\", [\"sub\", 0, \"len\"], 32]\n return IRnode.from_list(\n [\"with\", \"len\", len_, [\"with\", \"dst\", dst, mzero(\"dst\", num_zero_bytes)]],\n annotation=\"Zero pad\",\n )\n\n\n# convenience rewrites for shr/sar/shl\ndef shr(bits, x):\n if version_check(begin=\"constantinople\"):\n return [\"shr\", bits, x]\n return [\"div\", x, [\"exp\", 2, bits]]\n\n\n# convenience rewrites for shr/sar/shl\ndef shl(bits, x):\n if version_check(begin=\"constantinople\"):\n return [\"shl\", bits, x]\n return [\"mul\", x, [\"exp\", 2, bits]]\n\n\ndef sar(bits, x):\n if version_check(begin=\"constantinople\"):\n return [\"sar\", bits, x]\n\n raise NotImplementedError(\"no SAR emulation for pre-constantinople EVM\")\n\n\ndef clamp_bytestring(ir_node):\n t = ir_node.typ\n if not isinstance(t, _BytestringT):\n raise CompilerPanic(f\"{t} passed to clamp_bytestring\") # pragma: notest\n ret = [\"assert\", [\"le\", get_bytearray_length(ir_node), t.maxlen]]\n return IRnode.from_list(ret, error_msg=f\"{ir_node.typ} bounds check\")\n\n\ndef clamp_dyn_array(ir_node):\n t = ir_node.typ\n assert isinstance(t, DArrayT)\n ret = [\"assert\", [\"le\", get_dyn_array_count(ir_node), t.count]]\n return IRnode.from_list(ret, error_msg=f\"{ir_node.typ} bounds check\")\n\n\n# clampers for basetype\ndef clamp_basetype(ir_node):\n t = ir_node.typ\n if not t._is_prim_word:\n raise CompilerPanic(f\"{t} passed to clamp_basetype\") # pragma: notest\n\n # copy of the input\n ir_node = unwrap_location(ir_node)\n\n if isinstance(t, EnumT):\n bits = len(t._enum_members)\n # assert x >> bits == 0\n ret = int_clamp(ir_node, bits, signed=False)\n\n elif isinstance(t, (IntegerT, DecimalT)):\n if t.bits == 256:\n ret = ir_node\n else:\n ret = int_clamp(ir_node, t.bits, signed=t.is_signed)\n\n elif isinstance(t, BytesM_T):\n if t.m == 32:\n ret = ir_node # special case, no clamp.\n else:\n ret = bytes_clamp(ir_node, t.m)\n\n elif isinstance(t, (AddressT, InterfaceT)):\n ret = int_clamp(ir_node, 160)\n elif t in (BoolT(),):\n ret = int_clamp(ir_node, 1)\n else: # pragma: no cover\n raise CompilerPanic(f\"{t} passed to clamp_basetype\")\n\n return IRnode.from_list(ret, typ=ir_node.typ, error_msg=f\"validate {t}\")\n\n\ndef int_clamp(ir_node, bits, signed=False):\n \"\"\"Generalized clamper for integer types. Takes the number of bits,\n whether it's signed, and returns an IR node which checks it is\n in bounds. (Consumers should use clamp_basetype instead which uses\n type-based dispatch and is a little safer.)\n \"\"\"\n if bits >= 256:\n raise CompilerPanic(f\"invalid clamp: {bits}>=256 ({ir_node})\") # pragma: notest\n\n u = \"u\" if not signed else \"\"\n msg = f\"{u}int{bits} bounds check\"\n with ir_node.cache_when_complex(\"val\") as (b, val):\n if signed:\n # example for bits==128:\n # promote_signed_int(val, bits) is the \"canonical\" version of val\n # if val is in bounds, the bits above bit 128 should be equal.\n # (this works for both val >= 0 and val < 0. in the first case,\n # all upper bits should be 0 if val is a valid int128,\n # in the latter case, all upper bits should be 1.)\n assertion = [\"assert\", [\"eq\", val, promote_signed_int(val, bits)]]\n else:\n assertion = [\"assert\", [\"iszero\", shr(bits, val)]]\n\n assertion = IRnode.from_list(assertion, error_msg=msg)\n\n ret = b.resolve([\"seq\", assertion, val])\n\n return IRnode.from_list(ret, annotation=msg)\n\n\ndef bytes_clamp(ir_node: IRnode, n_bytes: int) -> IRnode:\n if not (0 < n_bytes <= 32):\n raise CompilerPanic(f\"bad type: bytes{n_bytes}\")\n msg = f\"bytes{n_bytes} bounds check\"\n with ir_node.cache_when_complex(\"val\") as (b, val):\n assertion = IRnode.from_list([\"assert\", [\"iszero\", shl(n_bytes * 8, val)]], error_msg=msg)\n ret = b.resolve([\"seq\", assertion, val])\n\n return IRnode.from_list(ret, annotation=msg)\n\n\n# e.g. for int8, promote 255 to -1\ndef promote_signed_int(x, bits):\n assert bits % 8 == 0\n ret = [\"signextend\", bits // 8 - 1, x]\n return IRnode.from_list(ret, annotation=f\"promote int{bits}\")\n\n\n# general clamp function for all ops and numbers\ndef clamp(op, arg, bound):\n with IRnode.from_list(arg).cache_when_complex(\"clamp_arg\") as (b1, arg):\n check = IRnode.from_list([\"assert\", [op, arg, bound]], error_msg=f\"clamp {op} {bound}\")\n ret = [\"seq\", check, arg]\n return IRnode.from_list(b1.resolve(ret), typ=arg.typ)\n\n\ndef clamp_nonzero(arg):\n # TODO: use clamp(\"ne\", arg, 0) once optimizer rules can handle it\n with IRnode.from_list(arg).cache_when_complex(\"should_nonzero\") as (b1, arg):\n check = IRnode.from_list([\"assert\", arg], error_msg=\"check nonzero\")\n ret = [\"seq\", check, arg]\n return IRnode.from_list(b1.resolve(ret), typ=arg.typ)\n\n\ndef clamp2(lo, arg, hi, signed):\n with IRnode.from_list(arg).cache_when_complex(\"clamp2_arg\") as (b1, arg):\n GE = \"sge\" if signed else \"ge\"\n LE = \"sle\" if signed else \"le\"\n ret = [\"seq\", [\"assert\", [\"and\", [GE, arg, lo], [LE, arg, hi]]], arg]\n return IRnode.from_list(b1.resolve(ret), typ=arg.typ)\n",
"path": "vyper/codegen/core.py"
}
] | diff --git a/docs/built-in-functions.rst b/docs/built-in-functions.rst
index 5c3f406f77..8d531729f3 100644
--- a/docs/built-in-functions.rst
+++ b/docs/built-in-functions.rst
@@ -918,9 +918,9 @@ Utilities
def foo():
x: uint256[2][5] = empty(uint256[2][5])
-.. py:function:: len(b: Union[Bytes, String]) -> uint256
+.. py:function:: len(b: Union[Bytes, String, DynArray[_Type, _Integer]]) -> uint256
- Return the length of a given ``Bytes`` or ``String``.
+ Return the length of a given ``Bytes``, ``String`` or ``DynArray[_Type, _Integer]``.
.. code-block:: python
diff --git a/tests/parser/functions/test_length.py b/tests/parser/functions/test_length.py
index 4a1404a292..e127405b90 100644
--- a/tests/parser/functions/test_length.py
+++ b/tests/parser/functions/test_length.py
@@ -1,3 +1,6 @@
+import pytest
+
+
def test_test_length(get_contract_with_gas_estimation):
test_length = """
y: Bytes[10]
@@ -12,3 +15,16 @@ def foo(inp: Bytes[10]) -> uint256:
c = get_contract_with_gas_estimation(test_length)
assert c.foo(b"badminton") == 954, c.foo(b"badminton")
print("Passed length test")
+
+
[email protected]("typ", ["DynArray[uint256, 50]", "Bytes[50]", "String[50]"])
+def test_zero_length(get_contract_with_gas_estimation, typ):
+ code = f"""
+@external
+def boo() -> uint256:
+ e: uint256 = len(empty({typ}))
+ return e
+ """
+
+ c = get_contract_with_gas_estimation(code)
+ assert c.boo() == 0
diff --git a/vyper/codegen/core.py b/vyper/codegen/core.py
index c3ac489501..cde65adc9e 100644
--- a/vyper/codegen/core.py
+++ b/vyper/codegen/core.py
@@ -304,7 +304,10 @@ def copy_bytes(dst, src, length, length_bound):
def get_bytearray_length(arg):
typ = UINT256_T
- # TODO add "~empty" case to mirror get_dyn_array_count
+ # TODO: it would be nice to merge the implementations of get_bytearray_length and
+ # get_dynarray_count
+ if arg.value == "~empty":
+ return IRnode.from_list(0, typ=typ)
return IRnode.from_list(LOAD(arg), typ=typ)
|
sotetsuk__pgx-544 | [Visualizer, Hex] Flipping white and black in turn





このように白と黒がターンが変わるごとに反転しています. (バッチ数10で可視化したもので, みにくくなっています. すみません.)
| [
{
"content": "# Copyright 2023 The Pgx Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import Literal, Optional\n\nimport svgwrite # type: ignore\n\nfrom ._dwg.animalshogi import AnimalShogiState, _make_animalshogi_dwg\nfrom ._dwg.backgammon import BackgammonState, _make_backgammon_dwg\nfrom ._dwg.bridge_bidding import BridgeBiddingState, _make_bridge_dwg\nfrom ._dwg.chess import ChessState, _make_chess_dwg\nfrom ._dwg.connect_four import ConnectFourState, _make_connect_four_dwg\nfrom ._dwg.go import GoState, _make_go_dwg\nfrom ._dwg.hex import HexState, _make_hex_dwg\nfrom ._dwg.othello import OthelloState, _make_othello_dwg\nfrom ._dwg.shogi import ShogiState, _make_shogi_dwg\nfrom ._dwg.sparrowmahjong import SparrowMahjongState, _make_sparrowmahjong_dwg\nfrom ._dwg.tictactoe import TictactoeState, _make_tictactoe_dwg\n\nColorTheme = Literal[\"light\", \"dark\"]\n\n\n@dataclass\nclass Config:\n color_theme: ColorTheme = \"light\"\n scale: float = 1.0\n\n\nglobal_config = Config()\n\n\ndef set_visualization_config(\n *, color_theme: ColorTheme = \"light\", scale: float = 1.0\n):\n global_config.color_theme = color_theme\n global_config.scale = scale\n\n\n@dataclass\nclass ColorSet:\n p1_color: str = \"black\"\n p2_color: str = \"white\"\n p1_outline: str = \"black\"\n p2_outline: str = \"black\"\n background_color: str = \"white\"\n grid_color: str = \"black\"\n text_color: str = \"black\"\n\n\nclass Visualizer:\n \"\"\"The Pgx Visualizer\n\n color_theme: Default(None) is \"light\"\n scale: change image size. Default(None) is 1.0\n \"\"\"\n\n def __init__(\n self,\n *,\n color_theme: Optional[ColorTheme] = None,\n scale: Optional[float] = None,\n ) -> None:\n color_theme = (\n color_theme\n if color_theme is not None\n else global_config.color_theme\n )\n scale = scale if scale is not None else global_config.scale\n\n self.config = {\n \"GRID_SIZE\": -1,\n \"BOARD_WIDTH\": -1,\n \"BOARD_HEIGHT\": -1,\n \"COLOR_THEME\": color_theme,\n \"COLOR_SET\": ColorSet(),\n \"SCALE\": scale,\n }\n self._make_dwg_group = None\n\n \"\"\"\n notebook で可視化する際に、変数名のみで表示させる場合\n def _repr_html_(self) -> str:\n assert self.state is not None\n return self._to_dwg_from_states(states=self.state).tostring()\n \"\"\"\n\n def save_svg(\n self,\n state,\n filename=\"temp.svg\",\n ) -> None:\n assert filename.endswith(\".svg\")\n self.get_dwg(states=state).saveas(filename=filename)\n\n def get_dwg(\n self,\n states,\n ):\n try:\n SIZE = len(states.current_player)\n WIDTH = math.ceil(math.sqrt(SIZE - 0.1))\n if SIZE - (WIDTH - 1) ** 2 >= WIDTH:\n HEIGHT = WIDTH\n else:\n HEIGHT = WIDTH - 1\n except TypeError:\n SIZE = 1\n WIDTH = 1\n HEIGHT = 1\n\n self._set_config_by_state(states)\n assert self._make_dwg_group is not None\n\n GRID_SIZE = self.config[\"GRID_SIZE\"]\n BOARD_WIDTH = self.config[\"BOARD_WIDTH\"]\n BOARD_HEIGHT = self.config[\"BOARD_HEIGHT\"]\n SCALE = self.config[\"SCALE\"]\n\n dwg = svgwrite.Drawing(\n \"temp.svg\",\n (\n (BOARD_WIDTH + 1) * GRID_SIZE * WIDTH * SCALE,\n (BOARD_HEIGHT + 1) * GRID_SIZE * HEIGHT * SCALE,\n ),\n )\n group = dwg.g()\n\n # background\n group.add(\n dwg.rect(\n (0, 0),\n (\n (BOARD_WIDTH + 1) * GRID_SIZE * WIDTH,\n (BOARD_HEIGHT + 1) * GRID_SIZE * HEIGHT,\n ),\n fill=self.config[\"COLOR_SET\"].background_color,\n )\n )\n\n if SIZE == 1:\n g = self._make_dwg_group(dwg, states, self.config)\n g.translate(\n GRID_SIZE * 1 / 2,\n GRID_SIZE * 1 / 2,\n )\n group.add(g)\n group.scale(SCALE)\n dwg.add(group)\n return dwg\n\n for i in range(SIZE):\n x = i % WIDTH\n y = i // WIDTH\n _state = self._get_nth_state(states, i)\n g = self._make_dwg_group(\n dwg,\n _state, # type:ignore\n self.config,\n )\n\n g.translate(\n GRID_SIZE * 1 / 2 + (BOARD_WIDTH + 1) * GRID_SIZE * x,\n GRID_SIZE * 1 / 2 + (BOARD_HEIGHT + 1) * GRID_SIZE * y,\n )\n group.add(g)\n group.add(\n dwg.rect(\n (\n (BOARD_WIDTH + 1) * GRID_SIZE * x,\n (BOARD_HEIGHT + 1) * GRID_SIZE * y,\n ),\n (\n (BOARD_WIDTH + 1) * GRID_SIZE,\n (BOARD_HEIGHT + 1) * GRID_SIZE,\n ),\n fill=\"none\",\n stroke=\"gray\",\n )\n )\n group.scale(SCALE)\n dwg.add(group)\n return dwg\n\n def _set_config_by_state(self, _state): # noqa: C901\n if isinstance(_state, AnimalShogiState):\n self.config[\"GRID_SIZE\"] = 60\n self.config[\"BOARD_WIDTH\"] = 4\n self.config[\"BOARD_HEIGHT\"] = 4\n self._make_dwg_group = _make_animalshogi_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"dimgray\",\n \"black\",\n \"whitesmoke\",\n \"whitesmoke\",\n \"#1e1e1e\",\n \"white\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"lightgray\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, BackgammonState):\n self.config[\"GRID_SIZE\"] = 25\n self.config[\"BOARD_WIDTH\"] = 17\n self.config[\"BOARD_HEIGHT\"] = 14\n self._make_dwg_group = _make_backgammon_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\",\n \"black\",\n \"black\",\n \"dimgray\",\n \"#1e1e1e\",\n \"gainsboro\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"black\",\n \"lightgray\",\n \"white\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, BridgeBiddingState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 14\n self.config[\"BOARD_HEIGHT\"] = 10\n self._make_dwg_group = _make_bridge_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\",\n \"black\",\n \"black\",\n \"dimgray\",\n \"#1e1e1e\",\n \"gainsboro\",\n \"white\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"black\",\n \"lightgray\",\n \"white\",\n \"white\",\n \"black\",\n \"black\",\n )\n elif isinstance(_state, ChessState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 8\n self.config[\"BOARD_HEIGHT\"] = 8\n self._make_dwg_group = _make_chess_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"none\",\n \"none\",\n \"#404040\",\n \"gray\",\n \"#1e1e1e\",\n \"silver\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"none\",\n \"none\",\n \"gray\",\n \"white\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, ConnectFourState):\n self.config[\"GRID_SIZE\"] = 35\n self.config[\"BOARD_WIDTH\"] = 7\n self.config[\"BOARD_HEIGHT\"] = 7\n self._make_dwg_group = _make_connect_four_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"lightgray\",\n \"white\",\n \"lightgray\",\n \"#1e1e1e\",\n \"lightgray\",\n \"gray\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"gray\",\n )\n elif isinstance(_state, GoState):\n self.config[\"GRID_SIZE\"] = 25\n try:\n self.config[\"BOARD_WIDTH\"] = int(_state.size[0])\n self.config[\"BOARD_HEIGHT\"] = int(_state.size[0])\n except IndexError:\n self.config[\"BOARD_WIDTH\"] = int(_state.size)\n self.config[\"BOARD_HEIGHT\"] = int(_state.size)\n self._make_dwg_group = _make_go_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\", \"gray\", \"white\", \"white\", \"#1e1e1e\", \"white\", \"\"\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, HexState):\n self.config[\"GRID_SIZE\"] = 30\n try:\n self.config[\"BOARD_WIDTH\"] = int(_state.size[0] * 1.3)\n self.config[\"BOARD_HEIGHT\"] = int(_state.size[0] * 0.8)\n except IndexError:\n self.config[\"BOARD_WIDTH\"] = int(_state.size * 1.3)\n self.config[\"BOARD_HEIGHT\"] = int(_state.size * 0.8)\n self._make_dwg_group = _make_hex_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"white\",\n \"black\",\n \"#1e1e1e\",\n \"white\",\n \"dimgray\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"lightgray\",\n )\n elif isinstance(_state, OthelloState):\n self.config[\"GRID_SIZE\"] = 30\n self.config[\"BOARD_WIDTH\"] = 8\n self.config[\"BOARD_HEIGHT\"] = 8\n self._make_dwg_group = _make_othello_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"lightgray\",\n \"white\",\n \"lightgray\",\n \"#1e1e1e\",\n \"lightgray\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, ShogiState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 10\n self.config[\"BOARD_HEIGHT\"] = 9\n self._make_dwg_group = _make_shogi_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\", \"black\", \"gray\", \"gray\", \"#1e1e1e\", \"gray\", \"\"\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"lightgray\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, SparrowMahjongState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 15\n self.config[\"BOARD_HEIGHT\"] = 10\n self._make_dwg_group = _make_sparrowmahjong_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"lightgray\",\n \"dimgray\",\n \"#404040\",\n \"gray\",\n \"#1e1e1e\",\n \"darkgray\",\n \"whitesmoke\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"white\",\n \"gray\",\n \"white\",\n \"white\",\n \"silver\",\n \"black\",\n )\n elif isinstance(_state, TictactoeState):\n self.config[\"GRID_SIZE\"] = 60\n self.config[\"BOARD_WIDTH\"] = 3\n self.config[\"BOARD_HEIGHT\"] = 3\n self._make_dwg_group = _make_tictactoe_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\",\n \"black\",\n \"black\",\n \"dimgray\",\n \"#1e1e1e\",\n \"gainsboro\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\", \"black\", \"lightgray\", \"white\", \"white\", \"black\"\n )\n else:\n assert False\n\n def _get_nth_state(self, _states, _i):\n if isinstance(_states, AnimalShogiState):\n return AnimalShogiState(\n turn=_states.turn[_i], # type:ignore\n board=_states.board[_i],\n hand=_states.hand[_i],\n )\n elif isinstance(_states, BackgammonState):\n return BackgammonState(\n turn=_states.turn[_i], # type:ignore\n board=_states.board[_i],\n )\n elif isinstance(_states, ChessState):\n return ChessState(\n turn=_states.turn[_i], # type:ignore\n board=_states.board[_i],\n )\n elif isinstance(_states, BridgeBiddingState):\n return BridgeBiddingState( # type:ignore\n turn=_states.turn[_i],\n current_player=_states.current_player[_i],\n hand=_states.hand[_i],\n bidding_history=_states.bidding_history[_i],\n vul_NS=_states.vul_NS[_i],\n vul_EW=_states.vul_EW[_i],\n )\n elif isinstance(_states, GoState):\n return GoState( # type:ignore\n size=_states.size[_i],\n chain_id_board=_states.chain_id_board[_i],\n turn=_states.turn[_i],\n )\n elif isinstance(_states, HexState):\n return HexState(\n board=_states.board[_i],\n )\n elif isinstance(_states, OthelloState):\n return OthelloState(\n board=_states.board[_i],\n )\n elif isinstance(_states, ShogiState):\n return ShogiState( # type:ignore\n turn=_states.turn[_i],\n piece_board=_states.piece_board[_i],\n hand=_states.hand[_i],\n )\n elif isinstance(_states, SparrowMahjongState):\n return SparrowMahjongState(\n current_player=_states.current_player[_i],\n turn=_states.turn[_i],\n rivers=_states.rivers[_i],\n hands=_states.hands[_i],\n n_red_in_hands=_states.n_red_in_hands[_i],\n is_red_in_river=_states.is_red_in_river[_i],\n wall=_states.wall[_i],\n draw_ix=_states.draw_ix[_i],\n shuffled_players=_states.shuffled_players[_i],\n dora=_states.dora[_i],\n )\n elif isinstance(_states, TictactoeState):\n return TictactoeState(\n current_player=_states.current_player[_i],\n legal_action_mask=_states.legal_action_mask[_i],\n terminated=_states.terminated[_i],\n turn=_states.turn[_i],\n board=_states.board[_i],\n )\n else:\n assert False\n",
"path": "pgx/_visualizer.py"
}
] | [
{
"content": "# Copyright 2023 The Pgx Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import Literal, Optional\n\nimport svgwrite # type: ignore\n\nfrom ._dwg.animalshogi import AnimalShogiState, _make_animalshogi_dwg\nfrom ._dwg.backgammon import BackgammonState, _make_backgammon_dwg\nfrom ._dwg.bridge_bidding import BridgeBiddingState, _make_bridge_dwg\nfrom ._dwg.chess import ChessState, _make_chess_dwg\nfrom ._dwg.connect_four import ConnectFourState, _make_connect_four_dwg\nfrom ._dwg.go import GoState, _make_go_dwg\nfrom ._dwg.hex import HexState, _make_hex_dwg\nfrom ._dwg.othello import OthelloState, _make_othello_dwg\nfrom ._dwg.shogi import ShogiState, _make_shogi_dwg\nfrom ._dwg.sparrowmahjong import SparrowMahjongState, _make_sparrowmahjong_dwg\nfrom ._dwg.tictactoe import TictactoeState, _make_tictactoe_dwg\n\nColorTheme = Literal[\"light\", \"dark\"]\n\n\n@dataclass\nclass Config:\n color_theme: ColorTheme = \"light\"\n scale: float = 1.0\n\n\nglobal_config = Config()\n\n\ndef set_visualization_config(\n *, color_theme: ColorTheme = \"light\", scale: float = 1.0\n):\n global_config.color_theme = color_theme\n global_config.scale = scale\n\n\n@dataclass\nclass ColorSet:\n p1_color: str = \"black\"\n p2_color: str = \"white\"\n p1_outline: str = \"black\"\n p2_outline: str = \"black\"\n background_color: str = \"white\"\n grid_color: str = \"black\"\n text_color: str = \"black\"\n\n\nclass Visualizer:\n \"\"\"The Pgx Visualizer\n\n color_theme: Default(None) is \"light\"\n scale: change image size. Default(None) is 1.0\n \"\"\"\n\n def __init__(\n self,\n *,\n color_theme: Optional[ColorTheme] = None,\n scale: Optional[float] = None,\n ) -> None:\n color_theme = (\n color_theme\n if color_theme is not None\n else global_config.color_theme\n )\n scale = scale if scale is not None else global_config.scale\n\n self.config = {\n \"GRID_SIZE\": -1,\n \"BOARD_WIDTH\": -1,\n \"BOARD_HEIGHT\": -1,\n \"COLOR_THEME\": color_theme,\n \"COLOR_SET\": ColorSet(),\n \"SCALE\": scale,\n }\n self._make_dwg_group = None\n\n \"\"\"\n notebook で可視化する際に、変数名のみで表示させる場合\n def _repr_html_(self) -> str:\n assert self.state is not None\n return self._to_dwg_from_states(states=self.state).tostring()\n \"\"\"\n\n def save_svg(\n self,\n state,\n filename=\"temp.svg\",\n ) -> None:\n assert filename.endswith(\".svg\")\n self.get_dwg(states=state).saveas(filename=filename)\n\n def get_dwg(\n self,\n states,\n ):\n try:\n SIZE = len(states.current_player)\n WIDTH = math.ceil(math.sqrt(SIZE - 0.1))\n if SIZE - (WIDTH - 1) ** 2 >= WIDTH:\n HEIGHT = WIDTH\n else:\n HEIGHT = WIDTH - 1\n except TypeError:\n SIZE = 1\n WIDTH = 1\n HEIGHT = 1\n\n self._set_config_by_state(states)\n assert self._make_dwg_group is not None\n\n GRID_SIZE = self.config[\"GRID_SIZE\"]\n BOARD_WIDTH = self.config[\"BOARD_WIDTH\"]\n BOARD_HEIGHT = self.config[\"BOARD_HEIGHT\"]\n SCALE = self.config[\"SCALE\"]\n\n dwg = svgwrite.Drawing(\n \"temp.svg\",\n (\n (BOARD_WIDTH + 1) * GRID_SIZE * WIDTH * SCALE,\n (BOARD_HEIGHT + 1) * GRID_SIZE * HEIGHT * SCALE,\n ),\n )\n group = dwg.g()\n\n # background\n group.add(\n dwg.rect(\n (0, 0),\n (\n (BOARD_WIDTH + 1) * GRID_SIZE * WIDTH,\n (BOARD_HEIGHT + 1) * GRID_SIZE * HEIGHT,\n ),\n fill=self.config[\"COLOR_SET\"].background_color,\n )\n )\n\n if SIZE == 1:\n g = self._make_dwg_group(dwg, states, self.config)\n g.translate(\n GRID_SIZE * 1 / 2,\n GRID_SIZE * 1 / 2,\n )\n group.add(g)\n group.scale(SCALE)\n dwg.add(group)\n return dwg\n\n for i in range(SIZE):\n x = i % WIDTH\n y = i // WIDTH\n _state = self._get_nth_state(states, i)\n g = self._make_dwg_group(\n dwg,\n _state, # type:ignore\n self.config,\n )\n\n g.translate(\n GRID_SIZE * 1 / 2 + (BOARD_WIDTH + 1) * GRID_SIZE * x,\n GRID_SIZE * 1 / 2 + (BOARD_HEIGHT + 1) * GRID_SIZE * y,\n )\n group.add(g)\n group.add(\n dwg.rect(\n (\n (BOARD_WIDTH + 1) * GRID_SIZE * x,\n (BOARD_HEIGHT + 1) * GRID_SIZE * y,\n ),\n (\n (BOARD_WIDTH + 1) * GRID_SIZE,\n (BOARD_HEIGHT + 1) * GRID_SIZE,\n ),\n fill=\"none\",\n stroke=\"gray\",\n )\n )\n group.scale(SCALE)\n dwg.add(group)\n return dwg\n\n def _set_config_by_state(self, _state): # noqa: C901\n if isinstance(_state, AnimalShogiState):\n self.config[\"GRID_SIZE\"] = 60\n self.config[\"BOARD_WIDTH\"] = 4\n self.config[\"BOARD_HEIGHT\"] = 4\n self._make_dwg_group = _make_animalshogi_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"dimgray\",\n \"black\",\n \"whitesmoke\",\n \"whitesmoke\",\n \"#1e1e1e\",\n \"white\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"lightgray\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, BackgammonState):\n self.config[\"GRID_SIZE\"] = 25\n self.config[\"BOARD_WIDTH\"] = 17\n self.config[\"BOARD_HEIGHT\"] = 14\n self._make_dwg_group = _make_backgammon_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\",\n \"black\",\n \"black\",\n \"dimgray\",\n \"#1e1e1e\",\n \"gainsboro\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"black\",\n \"lightgray\",\n \"white\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, BridgeBiddingState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 14\n self.config[\"BOARD_HEIGHT\"] = 10\n self._make_dwg_group = _make_bridge_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\",\n \"black\",\n \"black\",\n \"dimgray\",\n \"#1e1e1e\",\n \"gainsboro\",\n \"white\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"black\",\n \"lightgray\",\n \"white\",\n \"white\",\n \"black\",\n \"black\",\n )\n elif isinstance(_state, ChessState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 8\n self.config[\"BOARD_HEIGHT\"] = 8\n self._make_dwg_group = _make_chess_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"none\",\n \"none\",\n \"#404040\",\n \"gray\",\n \"#1e1e1e\",\n \"silver\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"none\",\n \"none\",\n \"gray\",\n \"white\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, ConnectFourState):\n self.config[\"GRID_SIZE\"] = 35\n self.config[\"BOARD_WIDTH\"] = 7\n self.config[\"BOARD_HEIGHT\"] = 7\n self._make_dwg_group = _make_connect_four_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"lightgray\",\n \"white\",\n \"lightgray\",\n \"#1e1e1e\",\n \"lightgray\",\n \"gray\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"gray\",\n )\n elif isinstance(_state, GoState):\n self.config[\"GRID_SIZE\"] = 25\n try:\n self.config[\"BOARD_WIDTH\"] = int(_state.size[0])\n self.config[\"BOARD_HEIGHT\"] = int(_state.size[0])\n except IndexError:\n self.config[\"BOARD_WIDTH\"] = int(_state.size)\n self.config[\"BOARD_HEIGHT\"] = int(_state.size)\n self._make_dwg_group = _make_go_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\", \"gray\", \"white\", \"white\", \"#1e1e1e\", \"white\", \"\"\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, HexState):\n self.config[\"GRID_SIZE\"] = 30\n try:\n self.config[\"BOARD_WIDTH\"] = int(_state.size[0] * 1.3)\n self.config[\"BOARD_HEIGHT\"] = int(_state.size[0] * 0.8)\n except IndexError:\n self.config[\"BOARD_WIDTH\"] = int(_state.size * 1.3)\n self.config[\"BOARD_HEIGHT\"] = int(_state.size * 0.8)\n self._make_dwg_group = _make_hex_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"white\",\n \"black\",\n \"#1e1e1e\",\n \"white\",\n \"dimgray\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"lightgray\",\n )\n elif isinstance(_state, OthelloState):\n self.config[\"GRID_SIZE\"] = 30\n self.config[\"BOARD_WIDTH\"] = 8\n self.config[\"BOARD_HEIGHT\"] = 8\n self._make_dwg_group = _make_othello_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"lightgray\",\n \"white\",\n \"lightgray\",\n \"#1e1e1e\",\n \"lightgray\",\n \"\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"black\",\n \"white\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, ShogiState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 10\n self.config[\"BOARD_HEIGHT\"] = 9\n self._make_dwg_group = _make_shogi_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\", \"black\", \"gray\", \"gray\", \"#1e1e1e\", \"gray\", \"\"\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"lightgray\",\n \"black\",\n \"black\",\n \"white\",\n \"black\",\n \"\",\n )\n elif isinstance(_state, SparrowMahjongState):\n self.config[\"GRID_SIZE\"] = 50\n self.config[\"BOARD_WIDTH\"] = 15\n self.config[\"BOARD_HEIGHT\"] = 10\n self._make_dwg_group = _make_sparrowmahjong_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"lightgray\",\n \"dimgray\",\n \"#404040\",\n \"gray\",\n \"#1e1e1e\",\n \"darkgray\",\n \"whitesmoke\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\",\n \"white\",\n \"gray\",\n \"white\",\n \"white\",\n \"silver\",\n \"black\",\n )\n elif isinstance(_state, TictactoeState):\n self.config[\"GRID_SIZE\"] = 60\n self.config[\"BOARD_WIDTH\"] = 3\n self.config[\"BOARD_HEIGHT\"] = 3\n self._make_dwg_group = _make_tictactoe_dwg\n if (\n self.config[\"COLOR_THEME\"] is None\n and self.config[\"COLOR_THEME\"] == \"dark\"\n ) or self.config[\"COLOR_THEME\"] == \"dark\":\n self.config[\"COLOR_SET\"] = ColorSet(\n \"gray\",\n \"black\",\n \"black\",\n \"dimgray\",\n \"#1e1e1e\",\n \"gainsboro\",\n )\n else:\n self.config[\"COLOR_SET\"] = ColorSet(\n \"white\", \"black\", \"lightgray\", \"white\", \"white\", \"black\"\n )\n else:\n assert False\n\n def _get_nth_state(self, _states, _i):\n if isinstance(_states, AnimalShogiState):\n return AnimalShogiState(\n turn=_states.turn[_i], # type:ignore\n board=_states.board[_i],\n hand=_states.hand[_i],\n )\n elif isinstance(_states, BackgammonState):\n return BackgammonState(\n turn=_states.turn[_i], # type:ignore\n board=_states.board[_i],\n )\n elif isinstance(_states, ChessState):\n return ChessState(\n turn=_states.turn[_i], # type:ignore\n board=_states.board[_i],\n )\n elif isinstance(_states, BridgeBiddingState):\n return BridgeBiddingState( # type:ignore\n turn=_states.turn[_i],\n current_player=_states.current_player[_i],\n hand=_states.hand[_i],\n bidding_history=_states.bidding_history[_i],\n vul_NS=_states.vul_NS[_i],\n vul_EW=_states.vul_EW[_i],\n )\n elif isinstance(_states, GoState):\n return GoState( # type:ignore\n size=_states.size[_i],\n chain_id_board=_states.chain_id_board[_i],\n turn=_states.turn[_i],\n )\n elif isinstance(_states, HexState):\n return HexState(\n size=_states.size[_i],\n turn=_states.turn[_i],\n board=_states.board[_i],\n )\n elif isinstance(_states, OthelloState):\n return OthelloState(\n board=_states.board[_i],\n )\n elif isinstance(_states, ShogiState):\n return ShogiState( # type:ignore\n turn=_states.turn[_i],\n piece_board=_states.piece_board[_i],\n hand=_states.hand[_i],\n )\n elif isinstance(_states, SparrowMahjongState):\n return SparrowMahjongState(\n current_player=_states.current_player[_i],\n turn=_states.turn[_i],\n rivers=_states.rivers[_i],\n hands=_states.hands[_i],\n n_red_in_hands=_states.n_red_in_hands[_i],\n is_red_in_river=_states.is_red_in_river[_i],\n wall=_states.wall[_i],\n draw_ix=_states.draw_ix[_i],\n shuffled_players=_states.shuffled_players[_i],\n dora=_states.dora[_i],\n )\n elif isinstance(_states, TictactoeState):\n return TictactoeState(\n current_player=_states.current_player[_i],\n legal_action_mask=_states.legal_action_mask[_i],\n terminated=_states.terminated[_i],\n turn=_states.turn[_i],\n board=_states.board[_i],\n )\n else:\n assert False\n",
"path": "pgx/_visualizer.py"
}
] | diff --git a/pgx/_visualizer.py b/pgx/_visualizer.py
index 1bba63c60..def53c38a 100644
--- a/pgx/_visualizer.py
+++ b/pgx/_visualizer.py
@@ -529,6 +529,8 @@ def _get_nth_state(self, _states, _i):
)
elif isinstance(_states, HexState):
return HexState(
+ size=_states.size[_i],
+ turn=_states.turn[_i],
board=_states.board[_i],
)
elif isinstance(_states, OthelloState):
|
pallets__werkzeug-2073 | Debugger's library detection doesn't work with symlinks
The debugger's stacktrace nicely colors lines differently if they come from your own code vs library code. However, this doesn't work correctly if the library path (virtualenv) is a symlink.
I am not 100% sure how to replicate this simply because my environment is part of a fairly complex build system. But `sysconfig.get_paths()` returns paths like `/var/local/venv/...` and that is a symlink to `/other/path/venv/`
Environment:
- Python version: 3.7
- Werkzeug version: 1.0.1
| [
{
"content": "import codecs\nimport inspect\nimport os\nimport re\nimport sys\nimport sysconfig\nimport traceback\nimport typing as t\nfrom html import escape\nfrom tokenize import TokenError\nfrom types import CodeType\nfrom types import TracebackType\n\nfrom .._internal import _to_str\nfrom ..filesystem import get_filesystem_encoding\nfrom ..utils import cached_property\nfrom .console import Console\n\n_coding_re = re.compile(br\"coding[:=]\\s*([-\\w.]+)\")\n_line_re = re.compile(br\"^(.*?)$\", re.MULTILINE)\n_funcdef_re = re.compile(r\"^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)\")\n\nHEADER = \"\"\"\\\n<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\n \"http://www.w3.org/TR/html4/loose.dtd\">\n<html>\n <head>\n <title>%(title)s // Werkzeug Debugger</title>\n <link rel=\"stylesheet\" href=\"?__debugger__=yes&cmd=resource&f=style.css\"\n type=\"text/css\">\n <!-- We need to make sure this has a favicon so that the debugger does\n not accidentally trigger a request to /favicon.ico which might\n change the application's state. -->\n <link rel=\"shortcut icon\"\n href=\"?__debugger__=yes&cmd=resource&f=console.png\">\n <script src=\"?__debugger__=yes&cmd=resource&f=debugger.js\"></script>\n <script type=\"text/javascript\">\n var TRACEBACK = %(traceback_id)d,\n CONSOLE_MODE = %(console)s,\n EVALEX = %(evalex)s,\n EVALEX_TRUSTED = %(evalex_trusted)s,\n SECRET = \"%(secret)s\";\n </script>\n </head>\n <body style=\"background-color: #fff\">\n <div class=\"debugger\">\n\"\"\"\nFOOTER = \"\"\"\\\n <div class=\"footer\">\n Brought to you by <strong class=\"arthur\">DON'T PANIC</strong>, your\n friendly Werkzeug powered traceback interpreter.\n </div>\n </div>\n\n <div class=\"pin-prompt\">\n <div class=\"inner\">\n <h3>Console Locked</h3>\n <p>\n The console is locked and needs to be unlocked by entering the PIN.\n You can find the PIN printed out on the standard output of your\n shell that runs the server.\n <form>\n <p>PIN:\n <input type=text name=pin size=14>\n <input type=submit name=btn value=\"Confirm Pin\">\n </form>\n </div>\n </div>\n </body>\n</html>\n\"\"\"\n\nPAGE_HTML = (\n HEADER\n + \"\"\"\\\n<h1>%(exception_type)s</h1>\n<div class=\"detail\">\n <p class=\"errormsg\">%(exception)s</p>\n</div>\n<h2 class=\"traceback\">Traceback <em>(most recent call last)</em></h2>\n%(summary)s\n<div class=\"plain\">\n <p>\n This is the Copy/Paste friendly version of the traceback.\n </p>\n <textarea cols=\"50\" rows=\"10\" name=\"code\" readonly>%(plaintext)s</textarea>\n</div>\n<div class=\"explanation\">\n The debugger caught an exception in your WSGI application. You can now\n look at the traceback which led to the error. <span class=\"nojavascript\">\n If you enable JavaScript you can also use additional features such as code\n execution (if the evalex feature is enabled), automatic pasting of the\n exceptions and much more.</span>\n</div>\n\"\"\"\n + FOOTER\n + \"\"\"\n<!--\n\n%(plaintext_cs)s\n\n-->\n\"\"\"\n)\n\nCONSOLE_HTML = (\n HEADER\n + \"\"\"\\\n<h1>Interactive Console</h1>\n<div class=\"explanation\">\nIn this console you can execute Python expressions in the context of the\napplication. The initial namespace was created by the debugger automatically.\n</div>\n<div class=\"console\"><div class=\"inner\">The Console requires JavaScript.</div></div>\n\"\"\"\n + FOOTER\n)\n\nSUMMARY_HTML = \"\"\"\\\n<div class=\"%(classes)s\">\n %(title)s\n <ul>%(frames)s</ul>\n %(description)s\n</div>\n\"\"\"\n\nFRAME_HTML = \"\"\"\\\n<div class=\"frame\" id=\"frame-%(id)d\">\n <h4>File <cite class=\"filename\">\"%(filename)s\"</cite>,\n line <em class=\"line\">%(lineno)s</em>,\n in <code class=\"function\">%(function_name)s</code></h4>\n <div class=\"source %(library)s\">%(lines)s</div>\n</div>\n\"\"\"\n\nSOURCE_LINE_HTML = \"\"\"\\\n<tr class=\"%(classes)s\">\n <td class=lineno>%(lineno)s</td>\n <td>%(code)s</td>\n</tr>\n\"\"\"\n\n\ndef render_console_html(secret: str, evalex_trusted: bool = True) -> str:\n return CONSOLE_HTML % {\n \"evalex\": \"true\",\n \"evalex_trusted\": \"true\" if evalex_trusted else \"false\",\n \"console\": \"true\",\n \"title\": \"Console\",\n \"secret\": secret,\n \"traceback_id\": -1,\n }\n\n\ndef get_current_traceback(\n ignore_system_exceptions: bool = False,\n show_hidden_frames: bool = False,\n skip: int = 0,\n) -> \"Traceback\":\n \"\"\"Get the current exception info as `Traceback` object. Per default\n calling this method will reraise system exceptions such as generator exit,\n system exit or others. This behavior can be disabled by passing `False`\n to the function as first parameter.\n \"\"\"\n info = t.cast(\n t.Tuple[t.Type[BaseException], BaseException, TracebackType], sys.exc_info()\n )\n exc_type, exc_value, tb = info\n\n if ignore_system_exceptions and exc_type in {\n SystemExit,\n KeyboardInterrupt,\n GeneratorExit,\n }:\n raise\n for _ in range(skip):\n if tb.tb_next is None:\n break\n tb = tb.tb_next\n tb = Traceback(exc_type, exc_value, tb)\n if not show_hidden_frames:\n tb.filter_hidden_frames()\n return tb\n\n\nclass Line:\n \"\"\"Helper for the source renderer.\"\"\"\n\n __slots__ = (\"lineno\", \"code\", \"in_frame\", \"current\")\n\n def __init__(self, lineno: int, code: str) -> None:\n self.lineno = lineno\n self.code = code\n self.in_frame = False\n self.current = False\n\n @property\n def classes(self) -> t.List[str]:\n rv = [\"line\"]\n if self.in_frame:\n rv.append(\"in-frame\")\n if self.current:\n rv.append(\"current\")\n return rv\n\n def render(self) -> str:\n return SOURCE_LINE_HTML % {\n \"classes\": \" \".join(self.classes),\n \"lineno\": self.lineno,\n \"code\": escape(self.code),\n }\n\n\nclass Traceback:\n \"\"\"Wraps a traceback.\"\"\"\n\n def __init__(\n self,\n exc_type: t.Type[BaseException],\n exc_value: BaseException,\n tb: TracebackType,\n ) -> None:\n self.exc_type = exc_type\n self.exc_value = exc_value\n self.tb = tb\n\n exception_type = exc_type.__name__\n if exc_type.__module__ not in {\"builtins\", \"__builtin__\", \"exceptions\"}:\n exception_type = f\"{exc_type.__module__}.{exception_type}\"\n self.exception_type = exception_type\n\n self.groups = []\n memo = set()\n while True:\n self.groups.append(Group(exc_type, exc_value, tb))\n memo.add(id(exc_value))\n exc_value = exc_value.__cause__ or exc_value.__context__ # type: ignore\n if exc_value is None or id(exc_value) in memo:\n break\n exc_type = type(exc_value)\n tb = exc_value.__traceback__ # type: ignore\n self.groups.reverse()\n self.frames = [frame for group in self.groups for frame in group.frames]\n\n def filter_hidden_frames(self) -> None:\n \"\"\"Remove the frames according to the paste spec.\"\"\"\n for group in self.groups:\n group.filter_hidden_frames()\n\n self.frames[:] = [frame for group in self.groups for frame in group.frames]\n\n @property\n def is_syntax_error(self) -> bool:\n \"\"\"Is it a syntax error?\"\"\"\n return isinstance(self.exc_value, SyntaxError)\n\n @property\n def exception(self) -> str:\n \"\"\"String representation of the final exception.\"\"\"\n return self.groups[-1].exception\n\n def log(self, logfile: t.Optional[t.TextIO] = None) -> None:\n \"\"\"Log the ASCII traceback into a file object.\"\"\"\n if logfile is None:\n logfile = sys.stderr\n tb = f\"{self.plaintext.rstrip()}\\n\"\n logfile.write(tb)\n\n def render_summary(self, include_title: bool = True) -> str:\n \"\"\"Render the traceback for the interactive console.\"\"\"\n title = \"\"\n classes = [\"traceback\"]\n if not self.frames:\n classes.append(\"noframe-traceback\")\n frames = []\n else:\n library_frames = sum(frame.is_library for frame in self.frames)\n mark_lib = 0 < library_frames < len(self.frames)\n frames = [group.render(mark_lib=mark_lib) for group in self.groups]\n\n if include_title:\n if self.is_syntax_error:\n title = \"Syntax Error\"\n else:\n title = \"Traceback <em>(most recent call last)</em>:\"\n\n if self.is_syntax_error:\n description = f\"<pre class=syntaxerror>{escape(self.exception)}</pre>\"\n else:\n description = f\"<blockquote>{escape(self.exception)}</blockquote>\"\n\n return SUMMARY_HTML % {\n \"classes\": \" \".join(classes),\n \"title\": f\"<h3>{title if title else ''}</h3>\",\n \"frames\": \"\\n\".join(frames),\n \"description\": description,\n }\n\n def render_full(\n self,\n evalex: bool = False,\n secret: t.Optional[str] = None,\n evalex_trusted: bool = True,\n ) -> str:\n \"\"\"Render the Full HTML page with the traceback info.\"\"\"\n exc = escape(self.exception)\n return PAGE_HTML % {\n \"evalex\": \"true\" if evalex else \"false\",\n \"evalex_trusted\": \"true\" if evalex_trusted else \"false\",\n \"console\": \"false\",\n \"title\": exc,\n \"exception\": exc,\n \"exception_type\": escape(self.exception_type),\n \"summary\": self.render_summary(include_title=False),\n \"plaintext\": escape(self.plaintext),\n \"plaintext_cs\": re.sub(\"-{2,}\", \"-\", self.plaintext),\n \"traceback_id\": self.id,\n \"secret\": secret,\n }\n\n @cached_property\n def plaintext(self) -> str:\n return \"\\n\".join([group.render_text() for group in self.groups])\n\n @property\n def id(self) -> int:\n return id(self)\n\n\nclass Group:\n \"\"\"A group of frames for an exception in a traceback. If the\n exception has a ``__cause__`` or ``__context__``, there are multiple\n exception groups.\n \"\"\"\n\n def __init__(\n self,\n exc_type: t.Type[BaseException],\n exc_value: BaseException,\n tb: TracebackType,\n ) -> None:\n self.exc_type = exc_type\n self.exc_value = exc_value\n self.info = None\n if exc_value.__cause__ is not None:\n self.info = (\n \"The above exception was the direct cause of the following exception\"\n )\n elif exc_value.__context__ is not None:\n self.info = (\n \"During handling of the above exception, another exception occurred\"\n )\n\n self.frames = []\n while tb is not None:\n self.frames.append(Frame(exc_type, exc_value, tb))\n tb = tb.tb_next # type: ignore\n\n def filter_hidden_frames(self) -> None:\n new_frames: t.List[Frame] = []\n hidden = False\n\n for frame in self.frames:\n hide = frame.hide\n if hide in (\"before\", \"before_and_this\"):\n new_frames = []\n hidden = False\n if hide == \"before_and_this\":\n continue\n elif hide in (\"reset\", \"reset_and_this\"):\n hidden = False\n if hide == \"reset_and_this\":\n continue\n elif hide in (\"after\", \"after_and_this\"):\n hidden = True\n if hide == \"after_and_this\":\n continue\n elif hide or hidden:\n continue\n new_frames.append(frame)\n\n # if we only have one frame and that frame is from the codeop\n # module, remove it.\n if len(new_frames) == 1 and self.frames[0].module == \"codeop\":\n del self.frames[:]\n\n # if the last frame is missing something went terrible wrong :(\n elif self.frames[-1] in new_frames:\n self.frames[:] = new_frames\n\n @property\n def exception(self) -> str:\n \"\"\"String representation of the exception.\"\"\"\n buf = traceback.format_exception_only(self.exc_type, self.exc_value)\n rv = \"\".join(buf).strip()\n return _to_str(rv, \"utf-8\", \"replace\")\n\n def render(self, mark_lib: bool = True) -> str:\n out = []\n if self.info is not None:\n out.append(f'<li><div class=\"exc-divider\">{self.info}:</div>')\n for frame in self.frames:\n title = f' title=\"{escape(frame.info)}\"' if frame.info else \"\"\n out.append(f\"<li{title}>{frame.render(mark_lib=mark_lib)}\")\n return \"\\n\".join(out)\n\n def render_text(self) -> str:\n out = []\n if self.info is not None:\n out.append(f\"\\n{self.info}:\\n\")\n out.append(\"Traceback (most recent call last):\")\n for frame in self.frames:\n out.append(frame.render_text())\n out.append(self.exception)\n return \"\\n\".join(out)\n\n\nclass Frame:\n \"\"\"A single frame in a traceback.\"\"\"\n\n def __init__(\n self,\n exc_type: t.Type[BaseException],\n exc_value: BaseException,\n tb: TracebackType,\n ) -> None:\n self.lineno = tb.tb_lineno\n self.function_name = tb.tb_frame.f_code.co_name\n self.locals = tb.tb_frame.f_locals\n self.globals = tb.tb_frame.f_globals\n\n fn = inspect.getsourcefile(tb) or inspect.getfile(tb)\n if fn[-4:] in (\".pyo\", \".pyc\"):\n fn = fn[:-1]\n # if it's a file on the file system resolve the real filename.\n if os.path.isfile(fn):\n fn = os.path.realpath(fn)\n self.filename = _to_str(fn, get_filesystem_encoding())\n self.module = self.globals.get(\"__name__\", self.locals.get(\"__name__\"))\n self.loader = self.globals.get(\"__loader__\", self.locals.get(\"__loader__\"))\n self.code = tb.tb_frame.f_code\n\n # support for paste's traceback extensions\n self.hide = self.locals.get(\"__traceback_hide__\", False)\n info = self.locals.get(\"__traceback_info__\")\n if info is not None:\n info = _to_str(info, \"utf-8\", \"replace\")\n self.info = info\n\n def render(self, mark_lib: bool = True) -> str:\n \"\"\"Render a single frame in a traceback.\"\"\"\n return FRAME_HTML % {\n \"id\": self.id,\n \"filename\": escape(self.filename),\n \"lineno\": self.lineno,\n \"function_name\": escape(self.function_name),\n \"lines\": self.render_line_context(),\n \"library\": \"library\" if mark_lib and self.is_library else \"\",\n }\n\n @cached_property\n def is_library(self) -> bool:\n return any(\n self.filename.startswith(path) for path in sysconfig.get_paths().values()\n )\n\n def render_text(self) -> str:\n return (\n f' File \"{self.filename}\", line {self.lineno}, in {self.function_name}\\n'\n f\" {self.current_line.strip()}\"\n )\n\n def render_line_context(self) -> str:\n before, current, after = self.get_context_lines()\n rv = []\n\n def render_line(line: str, cls: str) -> None:\n line = line.expandtabs().rstrip()\n stripped_line = line.strip()\n prefix = len(line) - len(stripped_line)\n rv.append(\n f'<pre class=\"line {cls}\"><span class=\"ws\">{\" \" * prefix}</span>'\n f\"{escape(stripped_line) if stripped_line else ' '}</pre>\"\n )\n\n for line in before:\n render_line(line, \"before\")\n render_line(current, \"current\")\n for line in after:\n render_line(line, \"after\")\n\n return \"\\n\".join(rv)\n\n def get_annotated_lines(self) -> t.List[Line]:\n \"\"\"Helper function that returns lines with extra information.\"\"\"\n lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]\n\n # find function definition and mark lines\n if hasattr(self.code, \"co_firstlineno\"):\n lineno = self.code.co_firstlineno - 1\n while lineno > 0:\n if _funcdef_re.match(lines[lineno].code):\n break\n lineno -= 1\n try:\n offset = len(inspect.getblock([f\"{x.code}\\n\" for x in lines[lineno:]]))\n except TokenError:\n offset = 0\n for line in lines[lineno : lineno + offset]:\n line.in_frame = True\n\n # mark current line\n try:\n lines[self.lineno - 1].current = True\n except IndexError:\n pass\n\n return lines\n\n def eval(self, code: t.Union[str, CodeType], mode: str = \"single\") -> t.Any:\n \"\"\"Evaluate code in the context of the frame.\"\"\"\n if isinstance(code, str):\n code = compile(code, \"<interactive>\", mode)\n return eval(code, self.globals, self.locals)\n\n @cached_property\n def sourcelines(self) -> t.List[str]:\n \"\"\"The sourcecode of the file as list of strings.\"\"\"\n # get sourcecode from loader or file\n source = None\n if self.loader is not None:\n try:\n if hasattr(self.loader, \"get_source\"):\n source = self.loader.get_source(self.module)\n elif hasattr(self.loader, \"get_source_by_code\"):\n source = self.loader.get_source_by_code(self.code)\n except Exception:\n # we munch the exception so that we don't cause troubles\n # if the loader is broken.\n pass\n\n if source is None:\n try:\n with open(self.filename, mode=\"rb\") as f:\n source = f.read()\n except OSError:\n return []\n\n # already str? return right away\n if isinstance(source, str):\n return source.splitlines()\n\n charset = \"utf-8\"\n if source.startswith(codecs.BOM_UTF8):\n source = source[3:]\n else:\n for idx, match in enumerate(_line_re.finditer(source)):\n coding_match = _coding_re.search(match.group())\n if coding_match is not None:\n charset = coding_match.group(1).decode(\"utf-8\")\n break\n if idx > 1:\n break\n\n # on broken cookies we fall back to utf-8 too\n charset = _to_str(charset)\n try:\n codecs.lookup(charset)\n except LookupError:\n charset = \"utf-8\"\n\n return source.decode(charset, \"replace\").splitlines()\n\n def get_context_lines(\n self, context: int = 5\n ) -> t.Tuple[t.List[str], str, t.List[str]]:\n before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]\n past = self.sourcelines[self.lineno : self.lineno + context]\n return (before, self.current_line, past)\n\n @property\n def current_line(self) -> str:\n try:\n return self.sourcelines[self.lineno - 1]\n except IndexError:\n return \"\"\n\n @cached_property\n def console(self) -> Console:\n return Console(self.globals, self.locals)\n\n @property\n def id(self) -> int:\n return id(self)\n",
"path": "src/werkzeug/debug/tbtools.py"
}
] | [
{
"content": "import codecs\nimport inspect\nimport os\nimport re\nimport sys\nimport sysconfig\nimport traceback\nimport typing as t\nfrom html import escape\nfrom tokenize import TokenError\nfrom types import CodeType\nfrom types import TracebackType\n\nfrom .._internal import _to_str\nfrom ..filesystem import get_filesystem_encoding\nfrom ..utils import cached_property\nfrom .console import Console\n\n_coding_re = re.compile(br\"coding[:=]\\s*([-\\w.]+)\")\n_line_re = re.compile(br\"^(.*?)$\", re.MULTILINE)\n_funcdef_re = re.compile(r\"^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)\")\n\nHEADER = \"\"\"\\\n<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\n \"http://www.w3.org/TR/html4/loose.dtd\">\n<html>\n <head>\n <title>%(title)s // Werkzeug Debugger</title>\n <link rel=\"stylesheet\" href=\"?__debugger__=yes&cmd=resource&f=style.css\"\n type=\"text/css\">\n <!-- We need to make sure this has a favicon so that the debugger does\n not accidentally trigger a request to /favicon.ico which might\n change the application's state. -->\n <link rel=\"shortcut icon\"\n href=\"?__debugger__=yes&cmd=resource&f=console.png\">\n <script src=\"?__debugger__=yes&cmd=resource&f=debugger.js\"></script>\n <script type=\"text/javascript\">\n var TRACEBACK = %(traceback_id)d,\n CONSOLE_MODE = %(console)s,\n EVALEX = %(evalex)s,\n EVALEX_TRUSTED = %(evalex_trusted)s,\n SECRET = \"%(secret)s\";\n </script>\n </head>\n <body style=\"background-color: #fff\">\n <div class=\"debugger\">\n\"\"\"\nFOOTER = \"\"\"\\\n <div class=\"footer\">\n Brought to you by <strong class=\"arthur\">DON'T PANIC</strong>, your\n friendly Werkzeug powered traceback interpreter.\n </div>\n </div>\n\n <div class=\"pin-prompt\">\n <div class=\"inner\">\n <h3>Console Locked</h3>\n <p>\n The console is locked and needs to be unlocked by entering the PIN.\n You can find the PIN printed out on the standard output of your\n shell that runs the server.\n <form>\n <p>PIN:\n <input type=text name=pin size=14>\n <input type=submit name=btn value=\"Confirm Pin\">\n </form>\n </div>\n </div>\n </body>\n</html>\n\"\"\"\n\nPAGE_HTML = (\n HEADER\n + \"\"\"\\\n<h1>%(exception_type)s</h1>\n<div class=\"detail\">\n <p class=\"errormsg\">%(exception)s</p>\n</div>\n<h2 class=\"traceback\">Traceback <em>(most recent call last)</em></h2>\n%(summary)s\n<div class=\"plain\">\n <p>\n This is the Copy/Paste friendly version of the traceback.\n </p>\n <textarea cols=\"50\" rows=\"10\" name=\"code\" readonly>%(plaintext)s</textarea>\n</div>\n<div class=\"explanation\">\n The debugger caught an exception in your WSGI application. You can now\n look at the traceback which led to the error. <span class=\"nojavascript\">\n If you enable JavaScript you can also use additional features such as code\n execution (if the evalex feature is enabled), automatic pasting of the\n exceptions and much more.</span>\n</div>\n\"\"\"\n + FOOTER\n + \"\"\"\n<!--\n\n%(plaintext_cs)s\n\n-->\n\"\"\"\n)\n\nCONSOLE_HTML = (\n HEADER\n + \"\"\"\\\n<h1>Interactive Console</h1>\n<div class=\"explanation\">\nIn this console you can execute Python expressions in the context of the\napplication. The initial namespace was created by the debugger automatically.\n</div>\n<div class=\"console\"><div class=\"inner\">The Console requires JavaScript.</div></div>\n\"\"\"\n + FOOTER\n)\n\nSUMMARY_HTML = \"\"\"\\\n<div class=\"%(classes)s\">\n %(title)s\n <ul>%(frames)s</ul>\n %(description)s\n</div>\n\"\"\"\n\nFRAME_HTML = \"\"\"\\\n<div class=\"frame\" id=\"frame-%(id)d\">\n <h4>File <cite class=\"filename\">\"%(filename)s\"</cite>,\n line <em class=\"line\">%(lineno)s</em>,\n in <code class=\"function\">%(function_name)s</code></h4>\n <div class=\"source %(library)s\">%(lines)s</div>\n</div>\n\"\"\"\n\nSOURCE_LINE_HTML = \"\"\"\\\n<tr class=\"%(classes)s\">\n <td class=lineno>%(lineno)s</td>\n <td>%(code)s</td>\n</tr>\n\"\"\"\n\n\ndef render_console_html(secret: str, evalex_trusted: bool = True) -> str:\n return CONSOLE_HTML % {\n \"evalex\": \"true\",\n \"evalex_trusted\": \"true\" if evalex_trusted else \"false\",\n \"console\": \"true\",\n \"title\": \"Console\",\n \"secret\": secret,\n \"traceback_id\": -1,\n }\n\n\ndef get_current_traceback(\n ignore_system_exceptions: bool = False,\n show_hidden_frames: bool = False,\n skip: int = 0,\n) -> \"Traceback\":\n \"\"\"Get the current exception info as `Traceback` object. Per default\n calling this method will reraise system exceptions such as generator exit,\n system exit or others. This behavior can be disabled by passing `False`\n to the function as first parameter.\n \"\"\"\n info = t.cast(\n t.Tuple[t.Type[BaseException], BaseException, TracebackType], sys.exc_info()\n )\n exc_type, exc_value, tb = info\n\n if ignore_system_exceptions and exc_type in {\n SystemExit,\n KeyboardInterrupt,\n GeneratorExit,\n }:\n raise\n for _ in range(skip):\n if tb.tb_next is None:\n break\n tb = tb.tb_next\n tb = Traceback(exc_type, exc_value, tb)\n if not show_hidden_frames:\n tb.filter_hidden_frames()\n return tb\n\n\nclass Line:\n \"\"\"Helper for the source renderer.\"\"\"\n\n __slots__ = (\"lineno\", \"code\", \"in_frame\", \"current\")\n\n def __init__(self, lineno: int, code: str) -> None:\n self.lineno = lineno\n self.code = code\n self.in_frame = False\n self.current = False\n\n @property\n def classes(self) -> t.List[str]:\n rv = [\"line\"]\n if self.in_frame:\n rv.append(\"in-frame\")\n if self.current:\n rv.append(\"current\")\n return rv\n\n def render(self) -> str:\n return SOURCE_LINE_HTML % {\n \"classes\": \" \".join(self.classes),\n \"lineno\": self.lineno,\n \"code\": escape(self.code),\n }\n\n\nclass Traceback:\n \"\"\"Wraps a traceback.\"\"\"\n\n def __init__(\n self,\n exc_type: t.Type[BaseException],\n exc_value: BaseException,\n tb: TracebackType,\n ) -> None:\n self.exc_type = exc_type\n self.exc_value = exc_value\n self.tb = tb\n\n exception_type = exc_type.__name__\n if exc_type.__module__ not in {\"builtins\", \"__builtin__\", \"exceptions\"}:\n exception_type = f\"{exc_type.__module__}.{exception_type}\"\n self.exception_type = exception_type\n\n self.groups = []\n memo = set()\n while True:\n self.groups.append(Group(exc_type, exc_value, tb))\n memo.add(id(exc_value))\n exc_value = exc_value.__cause__ or exc_value.__context__ # type: ignore\n if exc_value is None or id(exc_value) in memo:\n break\n exc_type = type(exc_value)\n tb = exc_value.__traceback__ # type: ignore\n self.groups.reverse()\n self.frames = [frame for group in self.groups for frame in group.frames]\n\n def filter_hidden_frames(self) -> None:\n \"\"\"Remove the frames according to the paste spec.\"\"\"\n for group in self.groups:\n group.filter_hidden_frames()\n\n self.frames[:] = [frame for group in self.groups for frame in group.frames]\n\n @property\n def is_syntax_error(self) -> bool:\n \"\"\"Is it a syntax error?\"\"\"\n return isinstance(self.exc_value, SyntaxError)\n\n @property\n def exception(self) -> str:\n \"\"\"String representation of the final exception.\"\"\"\n return self.groups[-1].exception\n\n def log(self, logfile: t.Optional[t.TextIO] = None) -> None:\n \"\"\"Log the ASCII traceback into a file object.\"\"\"\n if logfile is None:\n logfile = sys.stderr\n tb = f\"{self.plaintext.rstrip()}\\n\"\n logfile.write(tb)\n\n def render_summary(self, include_title: bool = True) -> str:\n \"\"\"Render the traceback for the interactive console.\"\"\"\n title = \"\"\n classes = [\"traceback\"]\n if not self.frames:\n classes.append(\"noframe-traceback\")\n frames = []\n else:\n library_frames = sum(frame.is_library for frame in self.frames)\n mark_lib = 0 < library_frames < len(self.frames)\n frames = [group.render(mark_lib=mark_lib) for group in self.groups]\n\n if include_title:\n if self.is_syntax_error:\n title = \"Syntax Error\"\n else:\n title = \"Traceback <em>(most recent call last)</em>:\"\n\n if self.is_syntax_error:\n description = f\"<pre class=syntaxerror>{escape(self.exception)}</pre>\"\n else:\n description = f\"<blockquote>{escape(self.exception)}</blockquote>\"\n\n return SUMMARY_HTML % {\n \"classes\": \" \".join(classes),\n \"title\": f\"<h3>{title if title else ''}</h3>\",\n \"frames\": \"\\n\".join(frames),\n \"description\": description,\n }\n\n def render_full(\n self,\n evalex: bool = False,\n secret: t.Optional[str] = None,\n evalex_trusted: bool = True,\n ) -> str:\n \"\"\"Render the Full HTML page with the traceback info.\"\"\"\n exc = escape(self.exception)\n return PAGE_HTML % {\n \"evalex\": \"true\" if evalex else \"false\",\n \"evalex_trusted\": \"true\" if evalex_trusted else \"false\",\n \"console\": \"false\",\n \"title\": exc,\n \"exception\": exc,\n \"exception_type\": escape(self.exception_type),\n \"summary\": self.render_summary(include_title=False),\n \"plaintext\": escape(self.plaintext),\n \"plaintext_cs\": re.sub(\"-{2,}\", \"-\", self.plaintext),\n \"traceback_id\": self.id,\n \"secret\": secret,\n }\n\n @cached_property\n def plaintext(self) -> str:\n return \"\\n\".join([group.render_text() for group in self.groups])\n\n @property\n def id(self) -> int:\n return id(self)\n\n\nclass Group:\n \"\"\"A group of frames for an exception in a traceback. If the\n exception has a ``__cause__`` or ``__context__``, there are multiple\n exception groups.\n \"\"\"\n\n def __init__(\n self,\n exc_type: t.Type[BaseException],\n exc_value: BaseException,\n tb: TracebackType,\n ) -> None:\n self.exc_type = exc_type\n self.exc_value = exc_value\n self.info = None\n if exc_value.__cause__ is not None:\n self.info = (\n \"The above exception was the direct cause of the following exception\"\n )\n elif exc_value.__context__ is not None:\n self.info = (\n \"During handling of the above exception, another exception occurred\"\n )\n\n self.frames = []\n while tb is not None:\n self.frames.append(Frame(exc_type, exc_value, tb))\n tb = tb.tb_next # type: ignore\n\n def filter_hidden_frames(self) -> None:\n new_frames: t.List[Frame] = []\n hidden = False\n\n for frame in self.frames:\n hide = frame.hide\n if hide in (\"before\", \"before_and_this\"):\n new_frames = []\n hidden = False\n if hide == \"before_and_this\":\n continue\n elif hide in (\"reset\", \"reset_and_this\"):\n hidden = False\n if hide == \"reset_and_this\":\n continue\n elif hide in (\"after\", \"after_and_this\"):\n hidden = True\n if hide == \"after_and_this\":\n continue\n elif hide or hidden:\n continue\n new_frames.append(frame)\n\n # if we only have one frame and that frame is from the codeop\n # module, remove it.\n if len(new_frames) == 1 and self.frames[0].module == \"codeop\":\n del self.frames[:]\n\n # if the last frame is missing something went terrible wrong :(\n elif self.frames[-1] in new_frames:\n self.frames[:] = new_frames\n\n @property\n def exception(self) -> str:\n \"\"\"String representation of the exception.\"\"\"\n buf = traceback.format_exception_only(self.exc_type, self.exc_value)\n rv = \"\".join(buf).strip()\n return _to_str(rv, \"utf-8\", \"replace\")\n\n def render(self, mark_lib: bool = True) -> str:\n out = []\n if self.info is not None:\n out.append(f'<li><div class=\"exc-divider\">{self.info}:</div>')\n for frame in self.frames:\n title = f' title=\"{escape(frame.info)}\"' if frame.info else \"\"\n out.append(f\"<li{title}>{frame.render(mark_lib=mark_lib)}\")\n return \"\\n\".join(out)\n\n def render_text(self) -> str:\n out = []\n if self.info is not None:\n out.append(f\"\\n{self.info}:\\n\")\n out.append(\"Traceback (most recent call last):\")\n for frame in self.frames:\n out.append(frame.render_text())\n out.append(self.exception)\n return \"\\n\".join(out)\n\n\nclass Frame:\n \"\"\"A single frame in a traceback.\"\"\"\n\n def __init__(\n self,\n exc_type: t.Type[BaseException],\n exc_value: BaseException,\n tb: TracebackType,\n ) -> None:\n self.lineno = tb.tb_lineno\n self.function_name = tb.tb_frame.f_code.co_name\n self.locals = tb.tb_frame.f_locals\n self.globals = tb.tb_frame.f_globals\n\n fn = inspect.getsourcefile(tb) or inspect.getfile(tb)\n if fn[-4:] in (\".pyo\", \".pyc\"):\n fn = fn[:-1]\n # if it's a file on the file system resolve the real filename.\n if os.path.isfile(fn):\n fn = os.path.realpath(fn)\n self.filename = _to_str(fn, get_filesystem_encoding())\n self.module = self.globals.get(\"__name__\", self.locals.get(\"__name__\"))\n self.loader = self.globals.get(\"__loader__\", self.locals.get(\"__loader__\"))\n self.code = tb.tb_frame.f_code\n\n # support for paste's traceback extensions\n self.hide = self.locals.get(\"__traceback_hide__\", False)\n info = self.locals.get(\"__traceback_info__\")\n if info is not None:\n info = _to_str(info, \"utf-8\", \"replace\")\n self.info = info\n\n def render(self, mark_lib: bool = True) -> str:\n \"\"\"Render a single frame in a traceback.\"\"\"\n return FRAME_HTML % {\n \"id\": self.id,\n \"filename\": escape(self.filename),\n \"lineno\": self.lineno,\n \"function_name\": escape(self.function_name),\n \"lines\": self.render_line_context(),\n \"library\": \"library\" if mark_lib and self.is_library else \"\",\n }\n\n @cached_property\n def is_library(self) -> bool:\n return any(\n self.filename.startswith(os.path.realpath(path))\n for path in sysconfig.get_paths().values()\n )\n\n def render_text(self) -> str:\n return (\n f' File \"{self.filename}\", line {self.lineno}, in {self.function_name}\\n'\n f\" {self.current_line.strip()}\"\n )\n\n def render_line_context(self) -> str:\n before, current, after = self.get_context_lines()\n rv = []\n\n def render_line(line: str, cls: str) -> None:\n line = line.expandtabs().rstrip()\n stripped_line = line.strip()\n prefix = len(line) - len(stripped_line)\n rv.append(\n f'<pre class=\"line {cls}\"><span class=\"ws\">{\" \" * prefix}</span>'\n f\"{escape(stripped_line) if stripped_line else ' '}</pre>\"\n )\n\n for line in before:\n render_line(line, \"before\")\n render_line(current, \"current\")\n for line in after:\n render_line(line, \"after\")\n\n return \"\\n\".join(rv)\n\n def get_annotated_lines(self) -> t.List[Line]:\n \"\"\"Helper function that returns lines with extra information.\"\"\"\n lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]\n\n # find function definition and mark lines\n if hasattr(self.code, \"co_firstlineno\"):\n lineno = self.code.co_firstlineno - 1\n while lineno > 0:\n if _funcdef_re.match(lines[lineno].code):\n break\n lineno -= 1\n try:\n offset = len(inspect.getblock([f\"{x.code}\\n\" for x in lines[lineno:]]))\n except TokenError:\n offset = 0\n for line in lines[lineno : lineno + offset]:\n line.in_frame = True\n\n # mark current line\n try:\n lines[self.lineno - 1].current = True\n except IndexError:\n pass\n\n return lines\n\n def eval(self, code: t.Union[str, CodeType], mode: str = \"single\") -> t.Any:\n \"\"\"Evaluate code in the context of the frame.\"\"\"\n if isinstance(code, str):\n code = compile(code, \"<interactive>\", mode)\n return eval(code, self.globals, self.locals)\n\n @cached_property\n def sourcelines(self) -> t.List[str]:\n \"\"\"The sourcecode of the file as list of strings.\"\"\"\n # get sourcecode from loader or file\n source = None\n if self.loader is not None:\n try:\n if hasattr(self.loader, \"get_source\"):\n source = self.loader.get_source(self.module)\n elif hasattr(self.loader, \"get_source_by_code\"):\n source = self.loader.get_source_by_code(self.code)\n except Exception:\n # we munch the exception so that we don't cause troubles\n # if the loader is broken.\n pass\n\n if source is None:\n try:\n with open(self.filename, mode=\"rb\") as f:\n source = f.read()\n except OSError:\n return []\n\n # already str? return right away\n if isinstance(source, str):\n return source.splitlines()\n\n charset = \"utf-8\"\n if source.startswith(codecs.BOM_UTF8):\n source = source[3:]\n else:\n for idx, match in enumerate(_line_re.finditer(source)):\n coding_match = _coding_re.search(match.group())\n if coding_match is not None:\n charset = coding_match.group(1).decode(\"utf-8\")\n break\n if idx > 1:\n break\n\n # on broken cookies we fall back to utf-8 too\n charset = _to_str(charset)\n try:\n codecs.lookup(charset)\n except LookupError:\n charset = \"utf-8\"\n\n return source.decode(charset, \"replace\").splitlines()\n\n def get_context_lines(\n self, context: int = 5\n ) -> t.Tuple[t.List[str], str, t.List[str]]:\n before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]\n past = self.sourcelines[self.lineno : self.lineno + context]\n return (before, self.current_line, past)\n\n @property\n def current_line(self) -> str:\n try:\n return self.sourcelines[self.lineno - 1]\n except IndexError:\n return \"\"\n\n @cached_property\n def console(self) -> Console:\n return Console(self.globals, self.locals)\n\n @property\n def id(self) -> int:\n return id(self)\n",
"path": "src/werkzeug/debug/tbtools.py"
}
] | diff --git a/src/werkzeug/debug/tbtools.py b/src/werkzeug/debug/tbtools.py
index ec9e7c6b1..75b4cd2c5 100644
--- a/src/werkzeug/debug/tbtools.py
+++ b/src/werkzeug/debug/tbtools.py
@@ -461,7 +461,8 @@ def render(self, mark_lib: bool = True) -> str:
@cached_property
def is_library(self) -> bool:
return any(
- self.filename.startswith(path) for path in sysconfig.get_paths().values()
+ self.filename.startswith(os.path.realpath(path))
+ for path in sysconfig.get_paths().values()
)
def render_text(self) -> str:
|
celery__celery-8152 | CLI help output: avoid click text rewrapping
# Checklist
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical enhancement to an existing feature.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22Issue+Type%3A+Enhancement%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed enhancements.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/main)
to find out if the if the same enhancement was already implemented in the
main branch.
- [x] I have included all related issues and possible duplicate issues in this issue
(If there are none, check this box anyway).
# Brief Summary
The command `celery worker --help` gives
```bash
Usage: celery worker [OPTIONS]
Start worker instance.
Examples
--------
$ celery --app=proj worker -l INFO $ celery -A proj worker -l INFO -Q
hipri,lopri $ celery -A proj worker --concurrency=4 $ celery -A proj worker
--concurrency=1000 -P eventlet $ celery worker --autoscale=10,0
```
This is caused by the [text rewrapping of `click`](https://click.palletsprojects.com/en/8.1.x/documentation/#preventing-rewrapping). The solution is to add `\b` before every paragraph which results in
```bash
Usage: celery worker [OPTIONS]
Start worker instance.
Examples
--------
$ celery --app=proj worker -l INFO
$ celery -A proj worker -l INFO -Q hipri,lopri
$ celery -A proj worker --concurrency=4
$ celery -A proj worker --concurrency=1000 -P eventlet
$ celery worker --autoscale=10,0
```
| [
{
"content": "\"\"\"Program used to start a Celery worker instance.\"\"\"\n\nimport os\nimport sys\n\nimport click\nfrom click import ParamType\nfrom click.types import StringParamType\n\nfrom celery import concurrency\nfrom celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL, CeleryDaemonCommand, CeleryOption,\n handle_preload_options)\nfrom celery.concurrency.base import BasePool\nfrom celery.exceptions import SecurityError\nfrom celery.platforms import EX_FAILURE, EX_OK, detached, maybe_drop_privileges\nfrom celery.utils.log import get_logger\nfrom celery.utils.nodenames import default_nodename, host_format, node_format\n\nlogger = get_logger(__name__)\n\n\nclass CeleryBeat(ParamType):\n \"\"\"Celery Beat flag.\"\"\"\n\n name = \"beat\"\n\n def convert(self, value, param, ctx):\n if ctx.obj.app.IS_WINDOWS and value:\n self.fail('-B option does not work on Windows. '\n 'Please run celery beat as a separate service.')\n\n return value\n\n\nclass WorkersPool(click.Choice):\n \"\"\"Workers pool option.\"\"\"\n\n name = \"pool\"\n\n def __init__(self):\n \"\"\"Initialize the workers pool option with the relevant choices.\"\"\"\n super().__init__(concurrency.get_available_pool_names())\n\n def convert(self, value, param, ctx):\n # Pools like eventlet/gevent needs to patch libs as early\n # as possible.\n if isinstance(value, type) and issubclass(value, BasePool):\n return value\n\n value = super().convert(value, param, ctx)\n worker_pool = ctx.obj.app.conf.worker_pool\n if value == 'prefork' and worker_pool:\n # If we got the default pool through the CLI\n # we need to check if the worker pool was configured.\n # If the worker pool was configured, we shouldn't use the default.\n value = concurrency.get_implementation(worker_pool)\n else:\n value = concurrency.get_implementation(value)\n\n if not value:\n value = concurrency.get_implementation(worker_pool)\n\n return value\n\n\nclass Hostname(StringParamType):\n \"\"\"Hostname option.\"\"\"\n\n name = \"hostname\"\n\n def convert(self, value, param, ctx):\n return host_format(default_nodename(value))\n\n\nclass Autoscale(ParamType):\n \"\"\"Autoscaling parameter.\"\"\"\n\n name = \"<min workers>, <max workers>\"\n\n def convert(self, value, param, ctx):\n value = value.split(',')\n\n if len(value) > 2:\n self.fail(\"Expected two comma separated integers or one integer.\"\n f\"Got {len(value)} instead.\")\n\n if len(value) == 1:\n try:\n value = (int(value[0]), 0)\n except ValueError:\n self.fail(f\"Expected an integer. Got {value} instead.\")\n\n try:\n return tuple(reversed(sorted(map(int, value))))\n except ValueError:\n self.fail(\"Expected two comma separated integers.\"\n f\"Got {value.join(',')} instead.\")\n\n\nCELERY_BEAT = CeleryBeat()\nWORKERS_POOL = WorkersPool()\nHOSTNAME = Hostname()\nAUTOSCALE = Autoscale()\n\nC_FAKEFORK = os.environ.get('C_FAKEFORK')\n\n\ndef detach(path, argv, logfile=None, pidfile=None, uid=None,\n gid=None, umask=None, workdir=None, fake=False, app=None,\n executable=None, hostname=None):\n \"\"\"Detach program by argv.\"\"\"\n fake = 1 if C_FAKEFORK else fake\n # `detached()` will attempt to touch the logfile to confirm that error\n # messages won't be lost after detaching stdout/err, but this means we need\n # to pre-format it rather than relying on `setup_logging_subsystem()` like\n # we can elsewhere.\n logfile = node_format(logfile, hostname)\n with detached(logfile, pidfile, uid, gid, umask, workdir, fake,\n after_forkers=False):\n try:\n if executable is not None:\n path = executable\n os.execv(path, [path] + argv)\n return EX_OK\n except Exception: # pylint: disable=broad-except\n if app is None:\n from celery import current_app\n app = current_app\n app.log.setup_logging_subsystem(\n 'ERROR', logfile, hostname=hostname)\n logger.critical(\"Can't exec %r\", ' '.join([path] + argv),\n exc_info=True)\n return EX_FAILURE\n\n\[email protected](cls=CeleryDaemonCommand,\n context_settings={'allow_extra_args': True})\[email protected]('-n',\n '--hostname',\n default=host_format(default_nodename(None)),\n cls=CeleryOption,\n type=HOSTNAME,\n help_group=\"Worker Options\",\n help=\"Set custom hostname (e.g., 'w1@%%h'). \"\n \"Expands: %%h (hostname), %%n (name) and %%d, (domain).\")\[email protected]('-D',\n '--detach',\n cls=CeleryOption,\n is_flag=True,\n default=False,\n help_group=\"Worker Options\",\n help=\"Start worker as a background process.\")\[email protected]('-S',\n '--statedb',\n cls=CeleryOption,\n type=click.Path(),\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.worker_state_db,\n help_group=\"Worker Options\",\n help=\"Path to the state database. The extension '.db' may be \"\n \"appended to the filename.\")\[email protected]('-l',\n '--loglevel',\n default='WARNING',\n cls=CeleryOption,\n type=LOG_LEVEL,\n help_group=\"Worker Options\",\n help=\"Logging level.\")\[email protected]('optimization',\n '-O',\n default='default',\n cls=CeleryOption,\n type=click.Choice(('default', 'fair')),\n help_group=\"Worker Options\",\n help=\"Apply optimization profile.\")\[email protected]('--prefetch-multiplier',\n type=int,\n metavar=\"<prefetch multiplier>\",\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.worker_prefetch_multiplier,\n cls=CeleryOption,\n help_group=\"Worker Options\",\n help=\"Set custom prefetch multiplier value \"\n \"for this worker instance.\")\[email protected]('-c',\n '--concurrency',\n type=int,\n metavar=\"<concurrency>\",\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.worker_concurrency,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Number of child processes processing the queue. \"\n \"The default is the number of CPUs available\"\n \" on your system.\")\[email protected]('-P',\n '--pool',\n default='prefork',\n type=WORKERS_POOL,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Pool implementation.\")\[email protected]('-E',\n '--task-events',\n '--events',\n is_flag=True,\n default=None,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Send task-related events that can be captured by monitors\"\n \" like celery events, celerymon, and others.\")\[email protected]('--time-limit',\n type=float,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Enables a hard time limit \"\n \"(in seconds int/float) for tasks.\")\[email protected]('--soft-time-limit',\n type=float,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Enables a soft time limit \"\n \"(in seconds int/float) for tasks.\")\[email protected]('--max-tasks-per-child',\n type=int,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Maximum number of tasks a pool worker can execute before \"\n \"it's terminated and replaced by a new worker.\")\[email protected]('--max-memory-per-child',\n type=int,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Maximum amount of resident memory, in KiB, that may be \"\n \"consumed by a child process before it will be replaced \"\n \"by a new one. If a single task causes a child process \"\n \"to exceed this limit, the task will be completed and \"\n \"the child process will be replaced afterwards.\\n\"\n \"Default: no limit.\")\[email protected]('--purge',\n '--discard',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--queues',\n '-Q',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--exclude-queues',\n '-X',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--include',\n '-I',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--without-gossip',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\")\[email protected]('--without-mingle',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\")\[email protected]('--without-heartbeat',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('--heartbeat-interval',\n type=int,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('--autoscale',\n type=AUTOSCALE,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('-B',\n '--beat',\n type=CELERY_BEAT,\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Embedded Beat Options\")\[email protected]('-s',\n '--schedule-filename',\n '--schedule',\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.beat_schedule_filename,\n cls=CeleryOption,\n help_group=\"Embedded Beat Options\")\[email protected]('--scheduler',\n cls=CeleryOption,\n help_group=\"Embedded Beat Options\")\[email protected]_context\n@handle_preload_options\ndef worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,\n loglevel=None, logfile=None, pidfile=None, statedb=None,\n **kwargs):\n \"\"\"Start worker instance.\n\n Examples\n --------\n $ celery --app=proj worker -l INFO\n $ celery -A proj worker -l INFO -Q hipri,lopri\n $ celery -A proj worker --concurrency=4\n $ celery -A proj worker --concurrency=1000 -P eventlet\n $ celery worker --autoscale=10,0\n\n \"\"\"\n try:\n app = ctx.obj.app\n if ctx.args:\n try:\n app.config_from_cmdline(ctx.args, namespace='worker')\n except (KeyError, ValueError) as e:\n # TODO: Improve the error messages\n raise click.UsageError(\n \"Unable to parse extra configuration from command line.\\n\"\n f\"Reason: {e}\", ctx=ctx)\n if kwargs.get('detach', False):\n argv = ['-m', 'celery'] + sys.argv[1:]\n if '--detach' in argv:\n argv.remove('--detach')\n if '-D' in argv:\n argv.remove('-D')\n if \"--uid\" in argv:\n argv.remove('--uid')\n if \"--gid\" in argv:\n argv.remove('--gid')\n\n return detach(sys.executable,\n argv,\n logfile=logfile,\n pidfile=pidfile,\n uid=uid, gid=gid,\n umask=kwargs.get('umask', None),\n workdir=kwargs.get('workdir', None),\n app=app,\n executable=kwargs.get('executable', None),\n hostname=hostname)\n\n maybe_drop_privileges(uid=uid, gid=gid)\n worker = app.Worker(\n hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,\n logfile=logfile, # node format handled by celery.app.log.setup\n pidfile=node_format(pidfile, hostname),\n statedb=node_format(statedb, hostname),\n no_color=ctx.obj.no_color,\n quiet=ctx.obj.quiet,\n **kwargs)\n worker.start()\n ctx.exit(worker.exitcode)\n except SecurityError as e:\n ctx.obj.error(e.args[0])\n ctx.exit(1)\n",
"path": "celery/bin/worker.py"
}
] | [
{
"content": "\"\"\"Program used to start a Celery worker instance.\"\"\"\n\nimport os\nimport sys\n\nimport click\nfrom click import ParamType\nfrom click.types import StringParamType\n\nfrom celery import concurrency\nfrom celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL, CeleryDaemonCommand, CeleryOption,\n handle_preload_options)\nfrom celery.concurrency.base import BasePool\nfrom celery.exceptions import SecurityError\nfrom celery.platforms import EX_FAILURE, EX_OK, detached, maybe_drop_privileges\nfrom celery.utils.log import get_logger\nfrom celery.utils.nodenames import default_nodename, host_format, node_format\n\nlogger = get_logger(__name__)\n\n\nclass CeleryBeat(ParamType):\n \"\"\"Celery Beat flag.\"\"\"\n\n name = \"beat\"\n\n def convert(self, value, param, ctx):\n if ctx.obj.app.IS_WINDOWS and value:\n self.fail('-B option does not work on Windows. '\n 'Please run celery beat as a separate service.')\n\n return value\n\n\nclass WorkersPool(click.Choice):\n \"\"\"Workers pool option.\"\"\"\n\n name = \"pool\"\n\n def __init__(self):\n \"\"\"Initialize the workers pool option with the relevant choices.\"\"\"\n super().__init__(concurrency.get_available_pool_names())\n\n def convert(self, value, param, ctx):\n # Pools like eventlet/gevent needs to patch libs as early\n # as possible.\n if isinstance(value, type) and issubclass(value, BasePool):\n return value\n\n value = super().convert(value, param, ctx)\n worker_pool = ctx.obj.app.conf.worker_pool\n if value == 'prefork' and worker_pool:\n # If we got the default pool through the CLI\n # we need to check if the worker pool was configured.\n # If the worker pool was configured, we shouldn't use the default.\n value = concurrency.get_implementation(worker_pool)\n else:\n value = concurrency.get_implementation(value)\n\n if not value:\n value = concurrency.get_implementation(worker_pool)\n\n return value\n\n\nclass Hostname(StringParamType):\n \"\"\"Hostname option.\"\"\"\n\n name = \"hostname\"\n\n def convert(self, value, param, ctx):\n return host_format(default_nodename(value))\n\n\nclass Autoscale(ParamType):\n \"\"\"Autoscaling parameter.\"\"\"\n\n name = \"<min workers>, <max workers>\"\n\n def convert(self, value, param, ctx):\n value = value.split(',')\n\n if len(value) > 2:\n self.fail(\"Expected two comma separated integers or one integer.\"\n f\"Got {len(value)} instead.\")\n\n if len(value) == 1:\n try:\n value = (int(value[0]), 0)\n except ValueError:\n self.fail(f\"Expected an integer. Got {value} instead.\")\n\n try:\n return tuple(reversed(sorted(map(int, value))))\n except ValueError:\n self.fail(\"Expected two comma separated integers.\"\n f\"Got {value.join(',')} instead.\")\n\n\nCELERY_BEAT = CeleryBeat()\nWORKERS_POOL = WorkersPool()\nHOSTNAME = Hostname()\nAUTOSCALE = Autoscale()\n\nC_FAKEFORK = os.environ.get('C_FAKEFORK')\n\n\ndef detach(path, argv, logfile=None, pidfile=None, uid=None,\n gid=None, umask=None, workdir=None, fake=False, app=None,\n executable=None, hostname=None):\n \"\"\"Detach program by argv.\"\"\"\n fake = 1 if C_FAKEFORK else fake\n # `detached()` will attempt to touch the logfile to confirm that error\n # messages won't be lost after detaching stdout/err, but this means we need\n # to pre-format it rather than relying on `setup_logging_subsystem()` like\n # we can elsewhere.\n logfile = node_format(logfile, hostname)\n with detached(logfile, pidfile, uid, gid, umask, workdir, fake,\n after_forkers=False):\n try:\n if executable is not None:\n path = executable\n os.execv(path, [path] + argv)\n return EX_OK\n except Exception: # pylint: disable=broad-except\n if app is None:\n from celery import current_app\n app = current_app\n app.log.setup_logging_subsystem(\n 'ERROR', logfile, hostname=hostname)\n logger.critical(\"Can't exec %r\", ' '.join([path] + argv),\n exc_info=True)\n return EX_FAILURE\n\n\[email protected](cls=CeleryDaemonCommand,\n context_settings={'allow_extra_args': True})\[email protected]('-n',\n '--hostname',\n default=host_format(default_nodename(None)),\n cls=CeleryOption,\n type=HOSTNAME,\n help_group=\"Worker Options\",\n help=\"Set custom hostname (e.g., 'w1@%%h'). \"\n \"Expands: %%h (hostname), %%n (name) and %%d, (domain).\")\[email protected]('-D',\n '--detach',\n cls=CeleryOption,\n is_flag=True,\n default=False,\n help_group=\"Worker Options\",\n help=\"Start worker as a background process.\")\[email protected]('-S',\n '--statedb',\n cls=CeleryOption,\n type=click.Path(),\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.worker_state_db,\n help_group=\"Worker Options\",\n help=\"Path to the state database. The extension '.db' may be \"\n \"appended to the filename.\")\[email protected]('-l',\n '--loglevel',\n default='WARNING',\n cls=CeleryOption,\n type=LOG_LEVEL,\n help_group=\"Worker Options\",\n help=\"Logging level.\")\[email protected]('optimization',\n '-O',\n default='default',\n cls=CeleryOption,\n type=click.Choice(('default', 'fair')),\n help_group=\"Worker Options\",\n help=\"Apply optimization profile.\")\[email protected]('--prefetch-multiplier',\n type=int,\n metavar=\"<prefetch multiplier>\",\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.worker_prefetch_multiplier,\n cls=CeleryOption,\n help_group=\"Worker Options\",\n help=\"Set custom prefetch multiplier value \"\n \"for this worker instance.\")\[email protected]('-c',\n '--concurrency',\n type=int,\n metavar=\"<concurrency>\",\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.worker_concurrency,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Number of child processes processing the queue. \"\n \"The default is the number of CPUs available\"\n \" on your system.\")\[email protected]('-P',\n '--pool',\n default='prefork',\n type=WORKERS_POOL,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Pool implementation.\")\[email protected]('-E',\n '--task-events',\n '--events',\n is_flag=True,\n default=None,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Send task-related events that can be captured by monitors\"\n \" like celery events, celerymon, and others.\")\[email protected]('--time-limit',\n type=float,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Enables a hard time limit \"\n \"(in seconds int/float) for tasks.\")\[email protected]('--soft-time-limit',\n type=float,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Enables a soft time limit \"\n \"(in seconds int/float) for tasks.\")\[email protected]('--max-tasks-per-child',\n type=int,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Maximum number of tasks a pool worker can execute before \"\n \"it's terminated and replaced by a new worker.\")\[email protected]('--max-memory-per-child',\n type=int,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Maximum amount of resident memory, in KiB, that may be \"\n \"consumed by a child process before it will be replaced \"\n \"by a new one. If a single task causes a child process \"\n \"to exceed this limit, the task will be completed and \"\n \"the child process will be replaced afterwards.\\n\"\n \"Default: no limit.\")\[email protected]('--purge',\n '--discard',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--queues',\n '-Q',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--exclude-queues',\n '-X',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--include',\n '-I',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--without-gossip',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\")\[email protected]('--without-mingle',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\")\[email protected]('--without-heartbeat',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('--heartbeat-interval',\n type=int,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('--autoscale',\n type=AUTOSCALE,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('-B',\n '--beat',\n type=CELERY_BEAT,\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Embedded Beat Options\")\[email protected]('-s',\n '--schedule-filename',\n '--schedule',\n callback=lambda ctx, _,\n value: value or ctx.obj.app.conf.beat_schedule_filename,\n cls=CeleryOption,\n help_group=\"Embedded Beat Options\")\[email protected]('--scheduler',\n cls=CeleryOption,\n help_group=\"Embedded Beat Options\")\[email protected]_context\n@handle_preload_options\ndef worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,\n loglevel=None, logfile=None, pidfile=None, statedb=None,\n **kwargs):\n \"\"\"Start worker instance.\n\n \\b\n Examples\n --------\n\n \\b\n $ celery --app=proj worker -l INFO\n $ celery -A proj worker -l INFO -Q hipri,lopri\n $ celery -A proj worker --concurrency=4\n $ celery -A proj worker --concurrency=1000 -P eventlet\n $ celery worker --autoscale=10,0\n\n \"\"\"\n try:\n app = ctx.obj.app\n if ctx.args:\n try:\n app.config_from_cmdline(ctx.args, namespace='worker')\n except (KeyError, ValueError) as e:\n # TODO: Improve the error messages\n raise click.UsageError(\n \"Unable to parse extra configuration from command line.\\n\"\n f\"Reason: {e}\", ctx=ctx)\n if kwargs.get('detach', False):\n argv = ['-m', 'celery'] + sys.argv[1:]\n if '--detach' in argv:\n argv.remove('--detach')\n if '-D' in argv:\n argv.remove('-D')\n if \"--uid\" in argv:\n argv.remove('--uid')\n if \"--gid\" in argv:\n argv.remove('--gid')\n\n return detach(sys.executable,\n argv,\n logfile=logfile,\n pidfile=pidfile,\n uid=uid, gid=gid,\n umask=kwargs.get('umask', None),\n workdir=kwargs.get('workdir', None),\n app=app,\n executable=kwargs.get('executable', None),\n hostname=hostname)\n\n maybe_drop_privileges(uid=uid, gid=gid)\n worker = app.Worker(\n hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,\n logfile=logfile, # node format handled by celery.app.log.setup\n pidfile=node_format(pidfile, hostname),\n statedb=node_format(statedb, hostname),\n no_color=ctx.obj.no_color,\n quiet=ctx.obj.quiet,\n **kwargs)\n worker.start()\n ctx.exit(worker.exitcode)\n except SecurityError as e:\n ctx.obj.error(e.args[0])\n ctx.exit(1)\n",
"path": "celery/bin/worker.py"
}
] | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index 6a4b5533692..9dd1582030e 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -300,8 +300,11 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
**kwargs):
"""Start worker instance.
+ \b
Examples
--------
+
+ \b
$ celery --app=proj worker -l INFO
$ celery -A proj worker -l INFO -Q hipri,lopri
$ celery -A proj worker --concurrency=4
|
mathesar-foundation__mathesar-3523 | Demo is broken for 0.1.6
## Description
<!-- A clear and concise description of what the bug is. -->
The demo can't be deployed with the current `master` branch (0.1.6)
The reasons are:
- We didn't add the fix to the demo settings in #3499 (just to prod), and
- We have an extra dependency in the demo setup which we need to build in but the current `Dockerfile` doesn't support that.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
We should be able to demo version 0.1.6 on our demo site.
| [
{
"content": "from config.settings.common_settings import * # noqa\nfrom decouple import config as decouple_config\n\nINSTALLED_APPS += [ # noqa\n \"demo\",\n \"health_check\",\n]\n\nMIDDLEWARE += [ # noqa\n \"demo.middleware.LiveDemoModeMiddleware\",\n]\n\nMATHESAR_MODE = 'PRODUCTION'\nMATHESAR_LIVE_DEMO = True\nMATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\nMATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)\n\nMATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'\nMATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(\n 'MATHESAR_DEMO_ARXIV_LOG_PATH',\n default='/var/lib/mathesar/demo/arxiv_db_schema_log'\n)\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa\nROOT_URLCONF = \"demo.urls\"\n",
"path": "demo/settings.py"
}
] | [
{
"content": "from config.settings.common_settings import * # noqa\nfrom decouple import config as decouple_config\n\nINSTALLED_APPS += [ # noqa\n \"demo\",\n \"health_check\",\n]\n\nMIDDLEWARE += [ # noqa\n \"demo.middleware.LiveDemoModeMiddleware\",\n]\n\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nMATHESAR_MODE = 'PRODUCTION'\nMATHESAR_LIVE_DEMO = True\nMATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\nMATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)\n\nMATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'\nMATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(\n 'MATHESAR_DEMO_ARXIV_LOG_PATH',\n default='/var/lib/mathesar/demo/arxiv_db_schema_log'\n)\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa\nROOT_URLCONF = \"demo.urls\"\n",
"path": "demo/settings.py"
}
] | diff --git a/Dockerfile b/Dockerfile
index 84b2fb3a9f..49f667b82a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -87,9 +87,9 @@ EXPOSE 8000 3000 6006
ENTRYPOINT ["./dev-run.sh"]
-#=========== STAGE: PRODUCTION ===============================================#
+#=========== STAGE: COMMON ===================================================#
-FROM base AS production
+from base as common
# Install prod requirements
RUN pip install --no-cache-dir -r requirements-prod.txt
@@ -105,6 +105,23 @@ RUN rm -rf ./mathesar_ui
RUN rm -rf ./mathesar/tests ./db/tests
RUN rm -rf ./docs
+
+#=========== STAGE: DEMO =====================================================#
+
+FROM common AS demo
+
+# Install prod requirements
+RUN pip install --no-cache-dir -r requirements-demo.txt
+
+EXPOSE 8000
+
+ENTRYPOINT ["./run.sh"]
+
+
+#=========== STAGE: PRODUCTION ===============================================#
+
+FROM common AS production
+
EXPOSE 8000
ENTRYPOINT ["./run.sh"]
diff --git a/demo/settings.py b/demo/settings.py
index 4ef3245d01..6487340f17 100644
--- a/demo/settings.py
+++ b/demo/settings.py
@@ -10,6 +10,9 @@
"demo.middleware.LiveDemoModeMiddleware",
]
+
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
MATHESAR_MODE = 'PRODUCTION'
MATHESAR_LIVE_DEMO = True
MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)
|
django-helpdesk__django-helpdesk-475 | Unable to create new Queue because of multiple content_type
`get() returned more than one ContentType -- it returned 4!`
This can be replicated if you deploy django-helpdesk by adding the source to an existing Django Project which possibly has other apps with one or more models named queue.
In my case:
select * from django_content_type where model = 'queue'; returns 4 records.
Trace this to ....../helpdesk/models.py line 323 `content_type=ContentType.objects.get(model="queue"),`
**My Proposed Solution**
Replace line 323 with content_type = ContentType.objects.get_for_model(self.__class__)
| [
{
"content": "\"\"\"\ndjango-helpdesk - A Django powered ticket tracker for small enterprise.\n\n(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.\n\nmodels.py - Model (and hence database) definitions. This is the core of the\n helpdesk structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _, ugettext\nfrom django.utils.encoding import python_2_unicode_compatible\n\n\n@python_2_unicode_compatible\nclass Queue(models.Model):\n \"\"\"\n A queue is a collection of tickets into what would generally be business\n areas or departments.\n\n For example, a company may have a queue for each Product they provide, or\n a queue for each of Accounts, Pre-Sales, and Support.\n\n \"\"\"\n\n title = models.CharField(\n _('Title'),\n max_length=100,\n )\n\n slug = models.SlugField(\n _('Slug'),\n max_length=50,\n unique=True,\n help_text=_('This slug is used when building ticket ID\\'s. Once set, '\n 'try not to change it or e-mailing may get messy.'),\n )\n\n email_address = models.EmailField(\n _('E-Mail Address'),\n blank=True,\n null=True,\n help_text=_('All outgoing e-mails for this queue will use this e-mail '\n 'address. If you use IMAP or POP3, this should be the e-mail '\n 'address for that mailbox.'),\n )\n\n locale = models.CharField(\n _('Locale'),\n max_length=10,\n blank=True,\n null=True,\n help_text=_('Locale of this queue. All correspondence in this '\n 'queue will be in this language.'),\n )\n\n allow_public_submission = models.BooleanField(\n _('Allow Public Submission?'),\n blank=True,\n default=False,\n help_text=_('Should this queue be listed on the public submission form?'),\n )\n\n allow_email_submission = models.BooleanField(\n _('Allow E-Mail Submission?'),\n blank=True,\n default=False,\n help_text=_('Do you want to poll the e-mail box below for new '\n 'tickets?'),\n )\n\n escalate_days = models.IntegerField(\n _('Escalation Days'),\n blank=True,\n null=True,\n help_text=_('For tickets which are not held, how often do you wish to '\n 'increase their priority? Set to 0 for no escalation.'),\n )\n\n new_ticket_cc = models.CharField(\n _('New Ticket CC Address'),\n blank=True,\n null=True,\n max_length=200,\n help_text=_('If an e-mail address is entered here, then it will '\n 'receive notification of all new tickets created for this queue. '\n 'Enter a comma between multiple e-mail addresses.'),\n )\n\n updated_ticket_cc = models.CharField(\n _('Updated Ticket CC Address'),\n blank=True,\n null=True,\n max_length=200,\n help_text=_('If an e-mail address is entered here, then it will '\n 'receive notification of all activity (new tickets, closed '\n 'tickets, updates, reassignments, etc) for this queue. Separate '\n 'multiple addresses with a comma.'),\n )\n\n email_box_type = models.CharField(\n _('E-Mail Box Type'),\n max_length=5,\n choices=(('pop3', _('POP 3')), ('imap', _('IMAP')), ('local', _('Local Directory'))),\n blank=True,\n null=True,\n help_text=_('E-Mail server type for creating tickets automatically '\n 'from a mailbox - both POP3 and IMAP are supported, as well as '\n 'reading from a local directory.'),\n )\n\n email_box_host = models.CharField(\n _('E-Mail Hostname'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('Your e-mail server address - either the domain name or '\n 'IP address. May be \"localhost\".'),\n )\n\n email_box_port = models.IntegerField(\n _('E-Mail Port'),\n blank=True,\n null=True,\n help_text=_('Port number to use for accessing e-mail. Default for '\n 'POP3 is \"110\", and for IMAP is \"143\". This may differ on some '\n 'servers. Leave it blank to use the defaults.'),\n )\n\n email_box_ssl = models.BooleanField(\n _('Use SSL for E-Mail?'),\n blank=True,\n default=False,\n help_text=_('Whether to use SSL for IMAP or POP3 - the default ports '\n 'when using SSL are 993 for IMAP and 995 for POP3.'),\n )\n\n email_box_user = models.CharField(\n _('E-Mail Username'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('Username for accessing this mailbox.'),\n )\n\n email_box_pass = models.CharField(\n _('E-Mail Password'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('Password for the above username'),\n )\n\n email_box_imap_folder = models.CharField(\n _('IMAP Folder'),\n max_length=100,\n blank=True,\n null=True,\n help_text=_('If using IMAP, what folder do you wish to fetch messages '\n 'from? This allows you to use one IMAP account for multiple '\n 'queues, by filtering messages on your IMAP server into separate '\n 'folders. Default: INBOX.'),\n )\n\n email_box_local_dir = models.CharField(\n _('E-Mail Local Directory'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('If using a local directory, what directory path do you '\n 'wish to poll for new email? '\n 'Example: /var/lib/mail/helpdesk/'),\n )\n\n permission_name = models.CharField(\n _('Django auth permission name'),\n max_length=50,\n blank=True,\n null=True,\n editable=False,\n help_text=_('Name used in the django.contrib.auth permission system'),\n )\n\n email_box_interval = models.IntegerField(\n _('E-Mail Check Interval'),\n help_text=_('How often do you wish to check this mailbox? (in Minutes)'),\n blank=True,\n null=True,\n default='5',\n )\n\n email_box_last_check = models.DateTimeField(\n blank=True,\n null=True,\n editable=False,\n # This is updated by management/commands/get_mail.py.\n )\n\n socks_proxy_type = models.CharField(\n _('Socks Proxy Type'),\n max_length=8,\n choices=(('socks4', _('SOCKS4')), ('socks5', _('SOCKS5'))),\n blank=True,\n null=True,\n help_text=_('SOCKS4 or SOCKS5 allows you to proxy your connections through a SOCKS server.'),\n )\n\n socks_proxy_host = models.GenericIPAddressField(\n _('Socks Proxy Host'),\n blank=True,\n null=True,\n help_text=_('Socks proxy IP address. Default: 127.0.0.1'),\n )\n\n socks_proxy_port = models.IntegerField(\n _('Socks Proxy Port'),\n blank=True,\n null=True,\n help_text=_('Socks proxy port number. Default: 9150 (default TOR port)'),\n )\n\n logging_type = models.CharField(\n _('Logging Type'),\n max_length=5,\n choices=(\n ('none', _('None')),\n ('debug', _('Debug')),\n ('info', _('Information')),\n ('warn', _('Warning')),\n ('error', _('Error')),\n ('crit', _('Critical'))\n ),\n blank=True,\n null=True,\n help_text=_('Set the default logging level. All messages at that '\n 'level or above will be logged to the directory set '\n 'below. If no level is set, logging will be disabled.'),\n )\n\n logging_dir = models.CharField(\n _('Logging Directory'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('If logging is enabled, what directory should we use to '\n 'store log files for this queue? '\n 'If no directory is set, default to /var/log/helpdesk/'),\n )\n\n default_owner = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name='default_owner',\n blank=True,\n null=True,\n verbose_name=_('Default owner'),\n )\n\n def __str__(self):\n return \"%s\" % self.title\n\n class Meta:\n ordering = ('title',)\n verbose_name = _('Queue')\n verbose_name_plural = _('Queues')\n\n def _from_address(self):\n \"\"\"\n Short property to provide a sender address in SMTP format,\n eg 'Name <email>'. We do this so we can put a simple error message\n in the sender name field, so hopefully the admin can see and fix it.\n \"\"\"\n if not self.email_address:\n return u'NO QUEUE EMAIL ADDRESS DEFINED <%s>' % settings.DEFAULT_FROM_EMAIL\n else:\n return u'%s <%s>' % (self.title, self.email_address)\n from_address = property(_from_address)\n\n def prepare_permission_name(self):\n \"\"\"Prepare internally the codename for the permission and store it in permission_name.\n :return: The codename that can be used to create a new Permission object.\n \"\"\"\n # Prepare the permission associated to this Queue\n basename = \"queue_access_%s\" % self.slug\n self.permission_name = \"helpdesk.%s\" % basename\n return basename\n\n def save(self, *args, **kwargs):\n if self.email_box_type == 'imap' and not self.email_box_imap_folder:\n self.email_box_imap_folder = 'INBOX'\n\n if self.socks_proxy_type:\n if not self.socks_proxy_host:\n self.socks_proxy_host = '127.0.0.1'\n if not self.socks_proxy_port:\n self.socks_proxy_port = 9150\n else:\n self.socks_proxy_host = None\n self.socks_proxy_port = None\n\n if not self.email_box_port:\n if self.email_box_type == 'imap' and self.email_box_ssl:\n self.email_box_port = 993\n elif self.email_box_type == 'imap' and not self.email_box_ssl:\n self.email_box_port = 143\n elif self.email_box_type == 'pop3' and self.email_box_ssl:\n self.email_box_port = 995\n elif self.email_box_type == 'pop3' and not self.email_box_ssl:\n self.email_box_port = 110\n\n if not self.id:\n # Prepare the permission codename and the permission\n # (even if they are not needed with the current configuration)\n basename = self.prepare_permission_name()\n\n Permission.objects.create(\n name=_(\"Permission for queue: \") + self.title,\n content_type=ContentType.objects.get(model=\"queue\"),\n codename=basename,\n )\n\n super(Queue, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n permission_name = self.permission_name\n super(Queue, self).delete(*args, **kwargs)\n\n # once the Queue is safely deleted, remove the permission (if exists)\n if permission_name:\n try:\n p = Permission.objects.get(codename=permission_name[9:])\n p.delete()\n except ObjectDoesNotExist:\n pass\n\n\nclass Ticket(models.Model):\n \"\"\"\n To allow a ticket to be entered as quickly as possible, only the\n bare minimum fields are required. These basically allow us to\n sort and manage the ticket. The user can always go back and\n enter more information later.\n\n A good example of this is when a customer is on the phone, and\n you want to give them a ticket ID as quickly as possible. You can\n enter some basic info, save the ticket, give the customer the ID\n and get off the phone, then add in further detail at a later time\n (once the customer is not on the line).\n\n Note that assigned_to is optional - unassigned tickets are displayed on\n the dashboard to prompt users to take ownership of them.\n \"\"\"\n\n OPEN_STATUS = 1\n REOPENED_STATUS = 2\n RESOLVED_STATUS = 3\n CLOSED_STATUS = 4\n DUPLICATE_STATUS = 5\n\n STATUS_CHOICES = (\n (OPEN_STATUS, _('Open')),\n (REOPENED_STATUS, _('Reopened')),\n (RESOLVED_STATUS, _('Resolved')),\n (CLOSED_STATUS, _('Closed')),\n (DUPLICATE_STATUS, _('Duplicate')),\n )\n\n PRIORITY_CHOICES = (\n (1, _('1. Critical')),\n (2, _('2. High')),\n (3, _('3. Normal')),\n (4, _('4. Low')),\n (5, _('5. Very Low')),\n )\n\n title = models.CharField(\n _('Title'),\n max_length=200,\n )\n\n queue = models.ForeignKey(\n Queue,\n verbose_name=_('Queue'),\n )\n\n created = models.DateTimeField(\n _('Created'),\n blank=True,\n help_text=_('Date this ticket was first created'),\n )\n\n modified = models.DateTimeField(\n _('Modified'),\n blank=True,\n help_text=_('Date this ticket was most recently changed.'),\n )\n\n submitter_email = models.EmailField(\n _('Submitter E-Mail'),\n blank=True,\n null=True,\n help_text=_('The submitter will receive an email for all public '\n 'follow-ups left for this task.'),\n )\n\n assigned_to = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name='assigned_to',\n blank=True,\n null=True,\n verbose_name=_('Assigned to'),\n )\n\n status = models.IntegerField(\n _('Status'),\n choices=STATUS_CHOICES,\n default=OPEN_STATUS,\n )\n\n on_hold = models.BooleanField(\n _('On Hold'),\n blank=True,\n default=False,\n help_text=_('If a ticket is on hold, it will not automatically be escalated.'),\n )\n\n description = models.TextField(\n _('Description'),\n blank=True,\n null=True,\n help_text=_('The content of the customers query.'),\n )\n\n resolution = models.TextField(\n _('Resolution'),\n blank=True,\n null=True,\n help_text=_('The resolution provided to the customer by our staff.'),\n )\n\n priority = models.IntegerField(\n _('Priority'),\n choices=PRIORITY_CHOICES,\n default=3,\n blank=3,\n help_text=_('1 = Highest Priority, 5 = Low Priority'),\n )\n\n due_date = models.DateTimeField(\n _('Due on'),\n blank=True,\n null=True,\n )\n\n last_escalation = models.DateTimeField(\n blank=True,\n null=True,\n editable=False,\n help_text=_('The date this ticket was last escalated - updated '\n 'automatically by management/commands/escalate_tickets.py.'),\n )\n\n def _get_assigned_to(self):\n \"\"\" Custom property to allow us to easily print 'Unassigned' if a\n ticket has no owner, or the users name if it's assigned. If the user\n has a full name configured, we use that, otherwise their username. \"\"\"\n if not self.assigned_to:\n return _('Unassigned')\n else:\n if self.assigned_to.get_full_name():\n return self.assigned_to.get_full_name()\n else:\n return self.assigned_to.get_username()\n get_assigned_to = property(_get_assigned_to)\n\n def _get_ticket(self):\n \"\"\" A user-friendly ticket ID, which is a combination of ticket ID\n and queue slug. This is generally used in e-mail subjects. \"\"\"\n\n return u\"[%s]\" % self.ticket_for_url\n ticket = property(_get_ticket)\n\n def _get_ticket_for_url(self):\n \"\"\" A URL-friendly ticket ID, used in links. \"\"\"\n return u\"%s-%s\" % (self.queue.slug, self.id)\n ticket_for_url = property(_get_ticket_for_url)\n\n def _get_priority_css_class(self):\n \"\"\"\n Return the boostrap class corresponding to the priority.\n \"\"\"\n if self.priority == 2:\n return \"warning\"\n elif self.priority == 1:\n return \"danger\"\n elif self.priority == 5:\n return \"success\"\n else:\n return \"\"\n get_priority_css_class = property(_get_priority_css_class)\n\n def _get_status(self):\n \"\"\"\n Displays the ticket status, with an \"On Hold\" message if needed.\n \"\"\"\n held_msg = ''\n if self.on_hold:\n held_msg = _(' - On Hold')\n dep_msg = ''\n if not self.can_be_resolved:\n dep_msg = _(' - Open dependencies')\n return u'%s%s%s' % (self.get_status_display(), held_msg, dep_msg)\n get_status = property(_get_status)\n\n def _get_ticket_url(self):\n \"\"\"\n Returns a publicly-viewable URL for this ticket, used when giving\n a URL to the submitter of a ticket.\n \"\"\"\n from django.contrib.sites.models import Site\n from django.core.urlresolvers import reverse\n try:\n site = Site.objects.get_current()\n except:\n site = Site(domain='configure-django-sites.com')\n return u\"http://%s%s?ticket=%s&email=%s\" % (\n site.domain,\n reverse('helpdesk:public_view'),\n self.ticket_for_url,\n self.submitter_email\n )\n ticket_url = property(_get_ticket_url)\n\n def _get_staff_url(self):\n \"\"\"\n Returns a staff-only URL for this ticket, used when giving a URL to\n a staff member (in emails etc)\n \"\"\"\n from django.contrib.sites.models import Site\n from django.core.urlresolvers import reverse\n try:\n site = Site.objects.get_current()\n except:\n site = Site(domain='configure-django-sites.com')\n return u\"http://%s%s\" % (\n site.domain,\n reverse('helpdesk:view',\n args=[self.id])\n )\n staff_url = property(_get_staff_url)\n\n def _can_be_resolved(self):\n \"\"\"\n Returns a boolean.\n True = any dependencies are resolved\n False = There are non-resolved dependencies\n \"\"\"\n OPEN_STATUSES = (Ticket.OPEN_STATUS, Ticket.REOPENED_STATUS)\n return TicketDependency.objects.filter(ticket=self).filter(\n depends_on__status__in=OPEN_STATUSES).count() == 0\n can_be_resolved = property(_can_be_resolved)\n\n class Meta:\n get_latest_by = \"created\"\n ordering = ('id',)\n verbose_name = _('Ticket')\n verbose_name_plural = _('Tickets')\n\n def __str__(self):\n return '%s %s' % (self.id, self.title)\n\n def get_absolute_url(self):\n return 'helpdesk:view', (self.id,)\n get_absolute_url = models.permalink(get_absolute_url)\n\n def save(self, *args, **kwargs):\n if not self.id:\n # This is a new ticket as no ID yet exists.\n self.created = timezone.now()\n\n if not self.priority:\n self.priority = 3\n\n self.modified = timezone.now()\n\n super(Ticket, self).save(*args, **kwargs)\n\n @staticmethod\n def queue_and_id_from_query(query):\n # Apply the opposite logic here compared to self._get_ticket_for_url\n # Ensure that queues with '-' in them will work\n parts = query.split('-')\n queue = '-'.join(parts[0:-1])\n return queue, parts[-1]\n\n\nclass FollowUpManager(models.Manager):\n\n def private_followups(self):\n return self.filter(public=False)\n\n def public_followups(self):\n return self.filter(public=True)\n\n\n@python_2_unicode_compatible\nclass FollowUp(models.Model):\n \"\"\"\n A FollowUp is a comment and/or change to a ticket. We keep a simple\n title, the comment entered by the user, and the new status of a ticket\n to enable easy flagging of details on the view-ticket page.\n\n The title is automatically generated at save-time, based on what action\n the user took.\n\n Tickets that aren't public are never shown to or e-mailed to the submitter,\n although all staff can see them.\n \"\"\"\n\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n )\n\n date = models.DateTimeField(\n _('Date'),\n default=timezone.now\n )\n\n title = models.CharField(\n _('Title'),\n max_length=200,\n blank=True,\n null=True,\n )\n\n comment = models.TextField(\n _('Comment'),\n blank=True,\n null=True,\n )\n\n public = models.BooleanField(\n _('Public'),\n blank=True,\n default=False,\n help_text=_('Public tickets are viewable by the submitter and all '\n 'staff, but non-public tickets can only be seen by staff.'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=True,\n null=True,\n verbose_name=_('User'),\n )\n\n new_status = models.IntegerField(\n _('New Status'),\n choices=Ticket.STATUS_CHOICES,\n blank=True,\n null=True,\n help_text=_('If the status was changed, what was it changed to?'),\n )\n\n objects = FollowUpManager()\n\n class Meta:\n ordering = ('date',)\n verbose_name = _('Follow-up')\n verbose_name_plural = _('Follow-ups')\n\n def __str__(self):\n return '%s' % self.title\n\n def get_absolute_url(self):\n return u\"%s#followup%s\" % (self.ticket.get_absolute_url(), self.id)\n\n def save(self, *args, **kwargs):\n t = self.ticket\n t.modified = timezone.now()\n t.save()\n super(FollowUp, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass TicketChange(models.Model):\n \"\"\"\n For each FollowUp, any changes to the parent ticket (eg Title, Priority,\n etc) are tracked here for display purposes.\n \"\"\"\n\n followup = models.ForeignKey(\n FollowUp,\n verbose_name=_('Follow-up'),\n )\n\n field = models.CharField(\n _('Field'),\n max_length=100,\n )\n\n old_value = models.TextField(\n _('Old Value'),\n blank=True,\n null=True,\n )\n\n new_value = models.TextField(\n _('New Value'),\n blank=True,\n null=True,\n )\n\n def __str__(self):\n out = '%s ' % self.field\n if not self.new_value:\n out += ugettext('removed')\n elif not self.old_value:\n out += ugettext('set to %s') % self.new_value\n else:\n out += ugettext('changed from \"%(old_value)s\" to \"%(new_value)s\"') % {\n 'old_value': self.old_value,\n 'new_value': self.new_value\n }\n return out\n\n class Meta:\n verbose_name = _('Ticket change')\n verbose_name_plural = _('Ticket changes')\n\n\ndef attachment_path(instance, filename):\n \"\"\"\n Provide a file path that will help prevent files being overwritten, by\n putting attachments in a folder off attachments for ticket/followup_id/.\n \"\"\"\n import os\n os.umask(0)\n path = 'helpdesk/attachments/%s/%s' % (instance.followup.ticket.ticket_for_url, instance.followup.id)\n att_path = os.path.join(settings.MEDIA_ROOT, path)\n if settings.DEFAULT_FILE_STORAGE == \"django.core.files.storage.FileSystemStorage\":\n if not os.path.exists(att_path):\n os.makedirs(att_path, 0o777)\n return os.path.join(path, filename)\n\n\n@python_2_unicode_compatible\nclass Attachment(models.Model):\n \"\"\"\n Represents a file attached to a follow-up. This could come from an e-mail\n attachment, or it could be uploaded via the web interface.\n \"\"\"\n\n followup = models.ForeignKey(\n FollowUp,\n verbose_name=_('Follow-up'),\n )\n\n file = models.FileField(\n _('File'),\n upload_to=attachment_path,\n max_length=1000,\n )\n\n filename = models.CharField(\n _('Filename'),\n max_length=1000,\n )\n\n mime_type = models.CharField(\n _('MIME Type'),\n max_length=255,\n )\n\n size = models.IntegerField(\n _('Size'),\n help_text=_('Size of this file in bytes'),\n )\n\n def __str__(self):\n return '%s' % self.filename\n\n class Meta:\n ordering = ('filename',)\n verbose_name = _('Attachment')\n verbose_name_plural = _('Attachments')\n\n\n@python_2_unicode_compatible\nclass PreSetReply(models.Model):\n \"\"\"\n We can allow the admin to define a number of pre-set replies, used to\n simplify the sending of updates and resolutions. These are basically Django\n templates with a limited context - however if you wanted to get crafy it would\n be easy to write a reply that displays ALL updates in hierarchical order etc\n with use of for loops over {{ ticket.followup_set.all }} and friends.\n\n When replying to a ticket, the user can select any reply set for the current\n queue, and the body text is fetched via AJAX.\n \"\"\"\n class Meta:\n ordering = ('name',)\n verbose_name = _('Pre-set reply')\n verbose_name_plural = _('Pre-set replies')\n\n queues = models.ManyToManyField(\n Queue,\n blank=True,\n help_text=_('Leave blank to allow this reply to be used for all '\n 'queues, or select those queues you wish to limit this reply to.'),\n )\n\n name = models.CharField(\n _('Name'),\n max_length=100,\n help_text=_('Only used to assist users with selecting a reply - not '\n 'shown to the user.'),\n )\n\n body = models.TextField(\n _('Body'),\n help_text=_('Context available: {{ ticket }} - ticket object (eg '\n '{{ ticket.title }}); {{ queue }} - The queue; and {{ user }} '\n '- the current user.'),\n )\n\n def __str__(self):\n return '%s' % self.name\n\n\n@python_2_unicode_compatible\nclass EscalationExclusion(models.Model):\n \"\"\"\n An 'EscalationExclusion' lets us define a date on which escalation should\n not happen, for example a weekend or public holiday.\n\n You may also have a queue that is only used on one day per week.\n\n To create these on a regular basis, check out the README file for an\n example cronjob that runs 'create_escalation_exclusions.py'.\n \"\"\"\n\n queues = models.ManyToManyField(\n Queue,\n blank=True,\n help_text=_('Leave blank for this exclusion to be applied to all queues, '\n 'or select those queues you wish to exclude with this entry.'),\n )\n\n name = models.CharField(\n _('Name'),\n max_length=100,\n )\n\n date = models.DateField(\n _('Date'),\n help_text=_('Date on which escalation should not happen'),\n )\n\n def __str__(self):\n return '%s' % self.name\n\n class Meta:\n verbose_name = _('Escalation exclusion')\n verbose_name_plural = _('Escalation exclusions')\n\n\n@python_2_unicode_compatible\nclass EmailTemplate(models.Model):\n \"\"\"\n Since these are more likely to be changed than other templates, we store\n them in the database.\n\n This means that an admin can change email templates without having to have\n access to the filesystem.\n \"\"\"\n\n template_name = models.CharField(\n _('Template Name'),\n max_length=100,\n )\n\n subject = models.CharField(\n _('Subject'),\n max_length=100,\n help_text=_('This will be prefixed with \"[ticket.ticket] ticket.title\"'\n '. We recommend something simple such as \"(Updated\") or \"(Closed)\"'\n ' - the same context is available as in plain_text, below.'),\n )\n\n heading = models.CharField(\n _('Heading'),\n max_length=100,\n help_text=_('In HTML e-mails, this will be the heading at the top of '\n 'the email - the same context is available as in plain_text, '\n 'below.'),\n )\n\n plain_text = models.TextField(\n _('Plain Text'),\n help_text=_('The context available to you includes {{ ticket }}, '\n '{{ queue }}, and depending on the time of the call: '\n '{{ resolution }} or {{ comment }}.'),\n )\n\n html = models.TextField(\n _('HTML'),\n help_text=_('The same context is available here as in plain_text, above.'),\n )\n\n locale = models.CharField(\n _('Locale'),\n max_length=10,\n blank=True,\n null=True,\n help_text=_('Locale of this template.'),\n )\n\n def __str__(self):\n return '%s' % self.template_name\n\n class Meta:\n ordering = ('template_name', 'locale')\n verbose_name = _('e-mail template')\n verbose_name_plural = _('e-mail templates')\n\n\n@python_2_unicode_compatible\nclass KBCategory(models.Model):\n \"\"\"\n Lets help users help themselves: the Knowledge Base is a categorised\n listing of questions & answers.\n \"\"\"\n\n title = models.CharField(\n _('Title'),\n max_length=100,\n )\n\n slug = models.SlugField(\n _('Slug'),\n )\n\n description = models.TextField(\n _('Description'),\n )\n\n def __str__(self):\n return '%s' % self.title\n\n class Meta:\n ordering = ('title',)\n verbose_name = _('Knowledge base category')\n verbose_name_plural = _('Knowledge base categories')\n\n def get_absolute_url(self):\n return 'helpdesk:kb_category', (), {'slug': self.slug}\n get_absolute_url = models.permalink(get_absolute_url)\n\n\n@python_2_unicode_compatible\nclass KBItem(models.Model):\n \"\"\"\n An item within the knowledgebase. Very straightforward question/answer\n style system.\n \"\"\"\n category = models.ForeignKey(\n KBCategory,\n verbose_name=_('Category'),\n )\n\n title = models.CharField(\n _('Title'),\n max_length=100,\n )\n\n question = models.TextField(\n _('Question'),\n )\n\n answer = models.TextField(\n _('Answer'),\n )\n\n votes = models.IntegerField(\n _('Votes'),\n help_text=_('Total number of votes cast for this item'),\n default=0,\n )\n\n recommendations = models.IntegerField(\n _('Positive Votes'),\n help_text=_('Number of votes for this item which were POSITIVE.'),\n default=0,\n )\n\n last_updated = models.DateTimeField(\n _('Last Updated'),\n help_text=_('The date on which this question was most recently changed.'),\n blank=True,\n )\n\n def save(self, *args, **kwargs):\n if not self.last_updated:\n self.last_updated = timezone.now()\n return super(KBItem, self).save(*args, **kwargs)\n\n def _score(self):\n if self.votes > 0:\n return int(self.recommendations / self.votes)\n else:\n return _('Unrated')\n score = property(_score)\n\n def __str__(self):\n return '%s' % self.title\n\n class Meta:\n ordering = ('title',)\n verbose_name = _('Knowledge base item')\n verbose_name_plural = _('Knowledge base items')\n\n def get_absolute_url(self):\n return 'helpdesk:kb_item', (self.id,)\n get_absolute_url = models.permalink(get_absolute_url)\n\n\n@python_2_unicode_compatible\nclass SavedSearch(models.Model):\n \"\"\"\n Allow a user to save a ticket search, eg their filtering and sorting\n options, and optionally share it with other users. This lets people\n easily create a set of commonly-used filters, such as:\n * My tickets waiting on me\n * My tickets waiting on submitter\n * My tickets in 'Priority Support' queue with priority of 1\n * All tickets containing the word 'billing'.\n etc...\n \"\"\"\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_('User'),\n )\n\n title = models.CharField(\n _('Query Name'),\n max_length=100,\n help_text=_('User-provided name for this query'),\n )\n\n shared = models.BooleanField(\n _('Shared With Other Users?'),\n blank=True,\n default=False,\n help_text=_('Should other users see this query?'),\n )\n\n query = models.TextField(\n _('Search Query'),\n help_text=_('Pickled query object. Be wary changing this.'),\n )\n\n def __str__(self):\n if self.shared:\n return '%s (*)' % self.title\n else:\n return '%s' % self.title\n\n class Meta:\n verbose_name = _('Saved search')\n verbose_name_plural = _('Saved searches')\n\n\n@python_2_unicode_compatible\nclass UserSettings(models.Model):\n \"\"\"\n A bunch of user-specific settings that we want to be able to define, such\n as notification preferences and other things that should probably be\n configurable.\n\n We should always refer to user.usersettings.settings['setting_name'].\n \"\"\"\n\n user = models.OneToOneField(settings.AUTH_USER_MODEL)\n\n settings_pickled = models.TextField(\n _('Settings Dictionary'),\n help_text=_('This is a base64-encoded representation of a pickled Python dictionary. '\n 'Do not change this field via the admin.'),\n blank=True,\n null=True,\n )\n\n def _set_settings(self, data):\n # data should always be a Python dictionary.\n try:\n import pickle\n except ImportError:\n import cPickle as pickle\n from helpdesk.lib import b64encode\n self.settings_pickled = b64encode(pickle.dumps(data))\n\n def _get_settings(self):\n # return a python dictionary representing the pickled data.\n try:\n import pickle\n except ImportError:\n import cPickle as pickle\n from helpdesk.lib import b64decode\n try:\n return pickle.loads(b64decode(str(self.settings_pickled)))\n except pickle.UnpicklingError:\n return {}\n\n settings = property(_get_settings, _set_settings)\n\n def __str__(self):\n return 'Preferences for %s' % self.user\n\n class Meta:\n verbose_name = _('User Setting')\n verbose_name_plural = _('User Settings')\n\n\ndef create_usersettings(sender, instance, created, **kwargs):\n \"\"\"\n Helper function to create UserSettings instances as\n required, eg when we first create the UserSettings database\n table via 'syncdb' or when we save a new user.\n\n If we end up with users with no UserSettings, then we get horrible\n 'DoesNotExist: UserSettings matching query does not exist.' errors.\n \"\"\"\n from helpdesk.settings import DEFAULT_USER_SETTINGS\n if created:\n UserSettings.objects.create(user=instance, settings=DEFAULT_USER_SETTINGS)\n\nmodels.signals.post_save.connect(create_usersettings, sender=settings.AUTH_USER_MODEL)\n\n\n@python_2_unicode_compatible\nclass IgnoreEmail(models.Model):\n \"\"\"\n This model lets us easily ignore e-mails from certain senders when\n processing IMAP and POP3 mailboxes, eg mails from postmaster or from\n known trouble-makers.\n \"\"\"\n class Meta:\n verbose_name = _('Ignored e-mail address')\n verbose_name_plural = _('Ignored e-mail addresses')\n\n queues = models.ManyToManyField(\n Queue,\n blank=True,\n help_text=_('Leave blank for this e-mail to be ignored on all queues, '\n 'or select those queues you wish to ignore this e-mail for.'),\n )\n\n name = models.CharField(\n _('Name'),\n max_length=100,\n )\n\n date = models.DateField(\n _('Date'),\n help_text=_('Date on which this e-mail address was added'),\n blank=True,\n editable=False\n )\n\n email_address = models.CharField(\n _('E-Mail Address'),\n max_length=150,\n help_text=_('Enter a full e-mail address, or portions with '\n 'wildcards, eg *@domain.com or postmaster@*.'),\n )\n\n keep_in_mailbox = models.BooleanField(\n _('Save Emails in Mailbox?'),\n blank=True,\n default=False,\n help_text=_('Do you want to save emails from this address in the mailbox? '\n 'If this is unticked, emails from this address will be deleted.'),\n )\n\n def __str__(self):\n return '%s' % self.name\n\n def save(self, *args, **kwargs):\n if not self.date:\n self.date = timezone.now()\n return super(IgnoreEmail, self).save(*args, **kwargs)\n\n def test(self, email):\n \"\"\"\n Possible situations:\n 1. Username & Domain both match\n 2. Username is wildcard, domain matches\n 3. Username matches, domain is wildcard\n 4. username & domain are both wildcards\n 5. Other (no match)\n\n 1-4 return True, 5 returns False.\n \"\"\"\n\n own_parts = self.email_address.split(\"@\")\n email_parts = email.split(\"@\")\n\n if self.email_address == email or \\\n own_parts[0] == \"*\" and own_parts[1] == email_parts[1] or \\\n own_parts[1] == \"*\" and own_parts[0] == email_parts[0] or \\\n own_parts[0] == \"*\" and own_parts[1] == \"*\":\n return True\n else:\n return False\n\n\n@python_2_unicode_compatible\nclass TicketCC(models.Model):\n \"\"\"\n Often, there are people who wish to follow a ticket who aren't the\n person who originally submitted it. This model provides a way for those\n people to follow a ticket.\n\n In this circumstance, a 'person' could be either an e-mail address or\n an existing system user.\n \"\"\"\n\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=True,\n null=True,\n help_text=_('User who wishes to receive updates for this ticket.'),\n verbose_name=_('User'),\n )\n\n email = models.EmailField(\n _('E-Mail Address'),\n blank=True,\n null=True,\n help_text=_('For non-user followers, enter their e-mail address'),\n )\n\n can_view = models.BooleanField(\n _('Can View Ticket?'),\n blank=True,\n default=False,\n help_text=_('Can this CC login to view the ticket details?'),\n )\n\n can_update = models.BooleanField(\n _('Can Update Ticket?'),\n blank=True,\n default=False,\n help_text=_('Can this CC login and update the ticket?'),\n )\n\n def _email_address(self):\n if self.user and self.user.email is not None:\n return self.user.email\n else:\n return self.email\n email_address = property(_email_address)\n\n def _display(self):\n if self.user:\n return self.user\n else:\n return self.email\n display = property(_display)\n\n def __str__(self):\n return '%s for %s' % (self.display, self.ticket.title)\n\n\nclass CustomFieldManager(models.Manager):\n\n def get_queryset(self):\n return super(CustomFieldManager, self).get_queryset().order_by('ordering')\n\n\n@python_2_unicode_compatible\nclass CustomField(models.Model):\n \"\"\"\n Definitions for custom fields that are glued onto each ticket.\n \"\"\"\n\n name = models.SlugField(\n _('Field Name'),\n help_text=_('As used in the database and behind the scenes. '\n 'Must be unique and consist of only lowercase letters with no punctuation.'),\n unique=True,\n )\n\n label = models.CharField(\n _('Label'),\n max_length=30,\n help_text=_('The display label for this field'),\n )\n\n help_text = models.TextField(\n _('Help Text'),\n help_text=_('Shown to the user when editing the ticket'),\n blank=True,\n null=True\n )\n\n DATA_TYPE_CHOICES = (\n ('varchar', _('Character (single line)')),\n ('text', _('Text (multi-line)')),\n ('integer', _('Integer')),\n ('decimal', _('Decimal')),\n ('list', _('List')),\n ('boolean', _('Boolean (checkbox yes/no)')),\n ('date', _('Date')),\n ('time', _('Time')),\n ('datetime', _('Date & Time')),\n ('email', _('E-Mail Address')),\n ('url', _('URL')),\n ('ipaddress', _('IP Address')),\n ('slug', _('Slug')),\n )\n\n data_type = models.CharField(\n _('Data Type'),\n max_length=100,\n help_text=_('Allows you to restrict the data entered into this field'),\n choices=DATA_TYPE_CHOICES,\n )\n\n max_length = models.IntegerField(\n _('Maximum Length (characters)'),\n blank=True,\n null=True,\n )\n\n decimal_places = models.IntegerField(\n _('Decimal Places'),\n help_text=_('Only used for decimal fields'),\n blank=True,\n null=True,\n )\n\n empty_selection_list = models.BooleanField(\n _('Add empty first choice to List?'),\n default=False,\n help_text=_('Only for List: adds an empty first entry to the choices list, '\n 'which enforces that the user makes an active choice.'),\n )\n\n list_values = models.TextField(\n _('List Values'),\n help_text=_('For list fields only. Enter one option per line.'),\n blank=True,\n null=True,\n )\n\n ordering = models.IntegerField(\n _('Ordering'),\n help_text=_('Lower numbers are displayed first; higher numbers are listed later'),\n blank=True,\n null=True,\n )\n\n def _choices_as_array(self):\n from django.utils.six import StringIO\n valuebuffer = StringIO(self.list_values)\n choices = [[item.strip(), item.strip()] for item in valuebuffer.readlines()]\n valuebuffer.close()\n return choices\n choices_as_array = property(_choices_as_array)\n\n required = models.BooleanField(\n _('Required?'),\n help_text=_('Does the user have to enter a value for this field?'),\n default=False,\n )\n\n staff_only = models.BooleanField(\n _('Staff Only?'),\n help_text=_('If this is ticked, then the public submission form '\n 'will NOT show this field'),\n default=False,\n )\n\n objects = CustomFieldManager()\n\n def __str__(self):\n return '%s' % self.name\n\n class Meta:\n verbose_name = _('Custom field')\n verbose_name_plural = _('Custom fields')\n\n\n@python_2_unicode_compatible\nclass TicketCustomFieldValue(models.Model):\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n )\n\n field = models.ForeignKey(\n CustomField,\n verbose_name=_('Field'),\n )\n\n value = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return '%s / %s' % (self.ticket, self.field)\n\n class Meta:\n unique_together = (('ticket', 'field'),)\n verbose_name = _('Ticket custom field value')\n verbose_name_plural = _('Ticket custom field values')\n\n\n@python_2_unicode_compatible\nclass TicketDependency(models.Model):\n \"\"\"\n The ticket identified by `ticket` cannot be resolved until the ticket in `depends_on` has been resolved.\n To help enforce this, a helper function `can_be_resolved` on each Ticket instance checks that\n these have all been resolved.\n \"\"\"\n class Meta:\n unique_together = (('ticket', 'depends_on'),)\n verbose_name = _('Ticket dependency')\n verbose_name_plural = _('Ticket dependencies')\n\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n related_name='ticketdependency',\n )\n\n depends_on = models.ForeignKey(\n Ticket,\n verbose_name=_('Depends On Ticket'),\n related_name='depends_on',\n )\n\n def __str__(self):\n return '%s / %s' % (self.ticket, self.depends_on)\n",
"path": "helpdesk/models.py"
}
] | [
{
"content": "\"\"\"\ndjango-helpdesk - A Django powered ticket tracker for small enterprise.\n\n(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.\n\nmodels.py - Model (and hence database) definitions. This is the core of the\n helpdesk structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _, ugettext\nfrom django.utils.encoding import python_2_unicode_compatible\n\n\n@python_2_unicode_compatible\nclass Queue(models.Model):\n \"\"\"\n A queue is a collection of tickets into what would generally be business\n areas or departments.\n\n For example, a company may have a queue for each Product they provide, or\n a queue for each of Accounts, Pre-Sales, and Support.\n\n \"\"\"\n\n title = models.CharField(\n _('Title'),\n max_length=100,\n )\n\n slug = models.SlugField(\n _('Slug'),\n max_length=50,\n unique=True,\n help_text=_('This slug is used when building ticket ID\\'s. Once set, '\n 'try not to change it or e-mailing may get messy.'),\n )\n\n email_address = models.EmailField(\n _('E-Mail Address'),\n blank=True,\n null=True,\n help_text=_('All outgoing e-mails for this queue will use this e-mail '\n 'address. If you use IMAP or POP3, this should be the e-mail '\n 'address for that mailbox.'),\n )\n\n locale = models.CharField(\n _('Locale'),\n max_length=10,\n blank=True,\n null=True,\n help_text=_('Locale of this queue. All correspondence in this '\n 'queue will be in this language.'),\n )\n\n allow_public_submission = models.BooleanField(\n _('Allow Public Submission?'),\n blank=True,\n default=False,\n help_text=_('Should this queue be listed on the public submission form?'),\n )\n\n allow_email_submission = models.BooleanField(\n _('Allow E-Mail Submission?'),\n blank=True,\n default=False,\n help_text=_('Do you want to poll the e-mail box below for new '\n 'tickets?'),\n )\n\n escalate_days = models.IntegerField(\n _('Escalation Days'),\n blank=True,\n null=True,\n help_text=_('For tickets which are not held, how often do you wish to '\n 'increase their priority? Set to 0 for no escalation.'),\n )\n\n new_ticket_cc = models.CharField(\n _('New Ticket CC Address'),\n blank=True,\n null=True,\n max_length=200,\n help_text=_('If an e-mail address is entered here, then it will '\n 'receive notification of all new tickets created for this queue. '\n 'Enter a comma between multiple e-mail addresses.'),\n )\n\n updated_ticket_cc = models.CharField(\n _('Updated Ticket CC Address'),\n blank=True,\n null=True,\n max_length=200,\n help_text=_('If an e-mail address is entered here, then it will '\n 'receive notification of all activity (new tickets, closed '\n 'tickets, updates, reassignments, etc) for this queue. Separate '\n 'multiple addresses with a comma.'),\n )\n\n email_box_type = models.CharField(\n _('E-Mail Box Type'),\n max_length=5,\n choices=(('pop3', _('POP 3')), ('imap', _('IMAP')), ('local', _('Local Directory'))),\n blank=True,\n null=True,\n help_text=_('E-Mail server type for creating tickets automatically '\n 'from a mailbox - both POP3 and IMAP are supported, as well as '\n 'reading from a local directory.'),\n )\n\n email_box_host = models.CharField(\n _('E-Mail Hostname'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('Your e-mail server address - either the domain name or '\n 'IP address. May be \"localhost\".'),\n )\n\n email_box_port = models.IntegerField(\n _('E-Mail Port'),\n blank=True,\n null=True,\n help_text=_('Port number to use for accessing e-mail. Default for '\n 'POP3 is \"110\", and for IMAP is \"143\". This may differ on some '\n 'servers. Leave it blank to use the defaults.'),\n )\n\n email_box_ssl = models.BooleanField(\n _('Use SSL for E-Mail?'),\n blank=True,\n default=False,\n help_text=_('Whether to use SSL for IMAP or POP3 - the default ports '\n 'when using SSL are 993 for IMAP and 995 for POP3.'),\n )\n\n email_box_user = models.CharField(\n _('E-Mail Username'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('Username for accessing this mailbox.'),\n )\n\n email_box_pass = models.CharField(\n _('E-Mail Password'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('Password for the above username'),\n )\n\n email_box_imap_folder = models.CharField(\n _('IMAP Folder'),\n max_length=100,\n blank=True,\n null=True,\n help_text=_('If using IMAP, what folder do you wish to fetch messages '\n 'from? This allows you to use one IMAP account for multiple '\n 'queues, by filtering messages on your IMAP server into separate '\n 'folders. Default: INBOX.'),\n )\n\n email_box_local_dir = models.CharField(\n _('E-Mail Local Directory'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('If using a local directory, what directory path do you '\n 'wish to poll for new email? '\n 'Example: /var/lib/mail/helpdesk/'),\n )\n\n permission_name = models.CharField(\n _('Django auth permission name'),\n max_length=50,\n blank=True,\n null=True,\n editable=False,\n help_text=_('Name used in the django.contrib.auth permission system'),\n )\n\n email_box_interval = models.IntegerField(\n _('E-Mail Check Interval'),\n help_text=_('How often do you wish to check this mailbox? (in Minutes)'),\n blank=True,\n null=True,\n default='5',\n )\n\n email_box_last_check = models.DateTimeField(\n blank=True,\n null=True,\n editable=False,\n # This is updated by management/commands/get_mail.py.\n )\n\n socks_proxy_type = models.CharField(\n _('Socks Proxy Type'),\n max_length=8,\n choices=(('socks4', _('SOCKS4')), ('socks5', _('SOCKS5'))),\n blank=True,\n null=True,\n help_text=_('SOCKS4 or SOCKS5 allows you to proxy your connections through a SOCKS server.'),\n )\n\n socks_proxy_host = models.GenericIPAddressField(\n _('Socks Proxy Host'),\n blank=True,\n null=True,\n help_text=_('Socks proxy IP address. Default: 127.0.0.1'),\n )\n\n socks_proxy_port = models.IntegerField(\n _('Socks Proxy Port'),\n blank=True,\n null=True,\n help_text=_('Socks proxy port number. Default: 9150 (default TOR port)'),\n )\n\n logging_type = models.CharField(\n _('Logging Type'),\n max_length=5,\n choices=(\n ('none', _('None')),\n ('debug', _('Debug')),\n ('info', _('Information')),\n ('warn', _('Warning')),\n ('error', _('Error')),\n ('crit', _('Critical'))\n ),\n blank=True,\n null=True,\n help_text=_('Set the default logging level. All messages at that '\n 'level or above will be logged to the directory set '\n 'below. If no level is set, logging will be disabled.'),\n )\n\n logging_dir = models.CharField(\n _('Logging Directory'),\n max_length=200,\n blank=True,\n null=True,\n help_text=_('If logging is enabled, what directory should we use to '\n 'store log files for this queue? '\n 'If no directory is set, default to /var/log/helpdesk/'),\n )\n\n default_owner = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name='default_owner',\n blank=True,\n null=True,\n verbose_name=_('Default owner'),\n )\n\n def __str__(self):\n return \"%s\" % self.title\n\n class Meta:\n ordering = ('title',)\n verbose_name = _('Queue')\n verbose_name_plural = _('Queues')\n\n def _from_address(self):\n \"\"\"\n Short property to provide a sender address in SMTP format,\n eg 'Name <email>'. We do this so we can put a simple error message\n in the sender name field, so hopefully the admin can see and fix it.\n \"\"\"\n if not self.email_address:\n return u'NO QUEUE EMAIL ADDRESS DEFINED <%s>' % settings.DEFAULT_FROM_EMAIL\n else:\n return u'%s <%s>' % (self.title, self.email_address)\n from_address = property(_from_address)\n\n def prepare_permission_name(self):\n \"\"\"Prepare internally the codename for the permission and store it in permission_name.\n :return: The codename that can be used to create a new Permission object.\n \"\"\"\n # Prepare the permission associated to this Queue\n basename = \"queue_access_%s\" % self.slug\n self.permission_name = \"helpdesk.%s\" % basename\n return basename\n\n def save(self, *args, **kwargs):\n if self.email_box_type == 'imap' and not self.email_box_imap_folder:\n self.email_box_imap_folder = 'INBOX'\n\n if self.socks_proxy_type:\n if not self.socks_proxy_host:\n self.socks_proxy_host = '127.0.0.1'\n if not self.socks_proxy_port:\n self.socks_proxy_port = 9150\n else:\n self.socks_proxy_host = None\n self.socks_proxy_port = None\n\n if not self.email_box_port:\n if self.email_box_type == 'imap' and self.email_box_ssl:\n self.email_box_port = 993\n elif self.email_box_type == 'imap' and not self.email_box_ssl:\n self.email_box_port = 143\n elif self.email_box_type == 'pop3' and self.email_box_ssl:\n self.email_box_port = 995\n elif self.email_box_type == 'pop3' and not self.email_box_ssl:\n self.email_box_port = 110\n\n if not self.id:\n # Prepare the permission codename and the permission\n # (even if they are not needed with the current configuration)\n basename = self.prepare_permission_name()\n\n Permission.objects.create(\n name=_(\"Permission for queue: \") + self.title,\n content_type=ContentType.objects.get_for_model(self.__class__),\n codename=basename,\n )\n\n super(Queue, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n permission_name = self.permission_name\n super(Queue, self).delete(*args, **kwargs)\n\n # once the Queue is safely deleted, remove the permission (if exists)\n if permission_name:\n try:\n p = Permission.objects.get(codename=permission_name[9:])\n p.delete()\n except ObjectDoesNotExist:\n pass\n\n\nclass Ticket(models.Model):\n \"\"\"\n To allow a ticket to be entered as quickly as possible, only the\n bare minimum fields are required. These basically allow us to\n sort and manage the ticket. The user can always go back and\n enter more information later.\n\n A good example of this is when a customer is on the phone, and\n you want to give them a ticket ID as quickly as possible. You can\n enter some basic info, save the ticket, give the customer the ID\n and get off the phone, then add in further detail at a later time\n (once the customer is not on the line).\n\n Note that assigned_to is optional - unassigned tickets are displayed on\n the dashboard to prompt users to take ownership of them.\n \"\"\"\n\n OPEN_STATUS = 1\n REOPENED_STATUS = 2\n RESOLVED_STATUS = 3\n CLOSED_STATUS = 4\n DUPLICATE_STATUS = 5\n\n STATUS_CHOICES = (\n (OPEN_STATUS, _('Open')),\n (REOPENED_STATUS, _('Reopened')),\n (RESOLVED_STATUS, _('Resolved')),\n (CLOSED_STATUS, _('Closed')),\n (DUPLICATE_STATUS, _('Duplicate')),\n )\n\n PRIORITY_CHOICES = (\n (1, _('1. Critical')),\n (2, _('2. High')),\n (3, _('3. Normal')),\n (4, _('4. Low')),\n (5, _('5. Very Low')),\n )\n\n title = models.CharField(\n _('Title'),\n max_length=200,\n )\n\n queue = models.ForeignKey(\n Queue,\n verbose_name=_('Queue'),\n )\n\n created = models.DateTimeField(\n _('Created'),\n blank=True,\n help_text=_('Date this ticket was first created'),\n )\n\n modified = models.DateTimeField(\n _('Modified'),\n blank=True,\n help_text=_('Date this ticket was most recently changed.'),\n )\n\n submitter_email = models.EmailField(\n _('Submitter E-Mail'),\n blank=True,\n null=True,\n help_text=_('The submitter will receive an email for all public '\n 'follow-ups left for this task.'),\n )\n\n assigned_to = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name='assigned_to',\n blank=True,\n null=True,\n verbose_name=_('Assigned to'),\n )\n\n status = models.IntegerField(\n _('Status'),\n choices=STATUS_CHOICES,\n default=OPEN_STATUS,\n )\n\n on_hold = models.BooleanField(\n _('On Hold'),\n blank=True,\n default=False,\n help_text=_('If a ticket is on hold, it will not automatically be escalated.'),\n )\n\n description = models.TextField(\n _('Description'),\n blank=True,\n null=True,\n help_text=_('The content of the customers query.'),\n )\n\n resolution = models.TextField(\n _('Resolution'),\n blank=True,\n null=True,\n help_text=_('The resolution provided to the customer by our staff.'),\n )\n\n priority = models.IntegerField(\n _('Priority'),\n choices=PRIORITY_CHOICES,\n default=3,\n blank=3,\n help_text=_('1 = Highest Priority, 5 = Low Priority'),\n )\n\n due_date = models.DateTimeField(\n _('Due on'),\n blank=True,\n null=True,\n )\n\n last_escalation = models.DateTimeField(\n blank=True,\n null=True,\n editable=False,\n help_text=_('The date this ticket was last escalated - updated '\n 'automatically by management/commands/escalate_tickets.py.'),\n )\n\n def _get_assigned_to(self):\n \"\"\" Custom property to allow us to easily print 'Unassigned' if a\n ticket has no owner, or the users name if it's assigned. If the user\n has a full name configured, we use that, otherwise their username. \"\"\"\n if not self.assigned_to:\n return _('Unassigned')\n else:\n if self.assigned_to.get_full_name():\n return self.assigned_to.get_full_name()\n else:\n return self.assigned_to.get_username()\n get_assigned_to = property(_get_assigned_to)\n\n def _get_ticket(self):\n \"\"\" A user-friendly ticket ID, which is a combination of ticket ID\n and queue slug. This is generally used in e-mail subjects. \"\"\"\n\n return u\"[%s]\" % self.ticket_for_url\n ticket = property(_get_ticket)\n\n def _get_ticket_for_url(self):\n \"\"\" A URL-friendly ticket ID, used in links. \"\"\"\n return u\"%s-%s\" % (self.queue.slug, self.id)\n ticket_for_url = property(_get_ticket_for_url)\n\n def _get_priority_css_class(self):\n \"\"\"\n Return the boostrap class corresponding to the priority.\n \"\"\"\n if self.priority == 2:\n return \"warning\"\n elif self.priority == 1:\n return \"danger\"\n elif self.priority == 5:\n return \"success\"\n else:\n return \"\"\n get_priority_css_class = property(_get_priority_css_class)\n\n def _get_status(self):\n \"\"\"\n Displays the ticket status, with an \"On Hold\" message if needed.\n \"\"\"\n held_msg = ''\n if self.on_hold:\n held_msg = _(' - On Hold')\n dep_msg = ''\n if not self.can_be_resolved:\n dep_msg = _(' - Open dependencies')\n return u'%s%s%s' % (self.get_status_display(), held_msg, dep_msg)\n get_status = property(_get_status)\n\n def _get_ticket_url(self):\n \"\"\"\n Returns a publicly-viewable URL for this ticket, used when giving\n a URL to the submitter of a ticket.\n \"\"\"\n from django.contrib.sites.models import Site\n from django.core.urlresolvers import reverse\n try:\n site = Site.objects.get_current()\n except:\n site = Site(domain='configure-django-sites.com')\n return u\"http://%s%s?ticket=%s&email=%s\" % (\n site.domain,\n reverse('helpdesk:public_view'),\n self.ticket_for_url,\n self.submitter_email\n )\n ticket_url = property(_get_ticket_url)\n\n def _get_staff_url(self):\n \"\"\"\n Returns a staff-only URL for this ticket, used when giving a URL to\n a staff member (in emails etc)\n \"\"\"\n from django.contrib.sites.models import Site\n from django.core.urlresolvers import reverse\n try:\n site = Site.objects.get_current()\n except:\n site = Site(domain='configure-django-sites.com')\n return u\"http://%s%s\" % (\n site.domain,\n reverse('helpdesk:view',\n args=[self.id])\n )\n staff_url = property(_get_staff_url)\n\n def _can_be_resolved(self):\n \"\"\"\n Returns a boolean.\n True = any dependencies are resolved\n False = There are non-resolved dependencies\n \"\"\"\n OPEN_STATUSES = (Ticket.OPEN_STATUS, Ticket.REOPENED_STATUS)\n return TicketDependency.objects.filter(ticket=self).filter(\n depends_on__status__in=OPEN_STATUSES).count() == 0\n can_be_resolved = property(_can_be_resolved)\n\n class Meta:\n get_latest_by = \"created\"\n ordering = ('id',)\n verbose_name = _('Ticket')\n verbose_name_plural = _('Tickets')\n\n def __str__(self):\n return '%s %s' % (self.id, self.title)\n\n def get_absolute_url(self):\n return 'helpdesk:view', (self.id,)\n get_absolute_url = models.permalink(get_absolute_url)\n\n def save(self, *args, **kwargs):\n if not self.id:\n # This is a new ticket as no ID yet exists.\n self.created = timezone.now()\n\n if not self.priority:\n self.priority = 3\n\n self.modified = timezone.now()\n\n super(Ticket, self).save(*args, **kwargs)\n\n @staticmethod\n def queue_and_id_from_query(query):\n # Apply the opposite logic here compared to self._get_ticket_for_url\n # Ensure that queues with '-' in them will work\n parts = query.split('-')\n queue = '-'.join(parts[0:-1])\n return queue, parts[-1]\n\n\nclass FollowUpManager(models.Manager):\n\n def private_followups(self):\n return self.filter(public=False)\n\n def public_followups(self):\n return self.filter(public=True)\n\n\n@python_2_unicode_compatible\nclass FollowUp(models.Model):\n \"\"\"\n A FollowUp is a comment and/or change to a ticket. We keep a simple\n title, the comment entered by the user, and the new status of a ticket\n to enable easy flagging of details on the view-ticket page.\n\n The title is automatically generated at save-time, based on what action\n the user took.\n\n Tickets that aren't public are never shown to or e-mailed to the submitter,\n although all staff can see them.\n \"\"\"\n\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n )\n\n date = models.DateTimeField(\n _('Date'),\n default=timezone.now\n )\n\n title = models.CharField(\n _('Title'),\n max_length=200,\n blank=True,\n null=True,\n )\n\n comment = models.TextField(\n _('Comment'),\n blank=True,\n null=True,\n )\n\n public = models.BooleanField(\n _('Public'),\n blank=True,\n default=False,\n help_text=_('Public tickets are viewable by the submitter and all '\n 'staff, but non-public tickets can only be seen by staff.'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=True,\n null=True,\n verbose_name=_('User'),\n )\n\n new_status = models.IntegerField(\n _('New Status'),\n choices=Ticket.STATUS_CHOICES,\n blank=True,\n null=True,\n help_text=_('If the status was changed, what was it changed to?'),\n )\n\n objects = FollowUpManager()\n\n class Meta:\n ordering = ('date',)\n verbose_name = _('Follow-up')\n verbose_name_plural = _('Follow-ups')\n\n def __str__(self):\n return '%s' % self.title\n\n def get_absolute_url(self):\n return u\"%s#followup%s\" % (self.ticket.get_absolute_url(), self.id)\n\n def save(self, *args, **kwargs):\n t = self.ticket\n t.modified = timezone.now()\n t.save()\n super(FollowUp, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass TicketChange(models.Model):\n \"\"\"\n For each FollowUp, any changes to the parent ticket (eg Title, Priority,\n etc) are tracked here for display purposes.\n \"\"\"\n\n followup = models.ForeignKey(\n FollowUp,\n verbose_name=_('Follow-up'),\n )\n\n field = models.CharField(\n _('Field'),\n max_length=100,\n )\n\n old_value = models.TextField(\n _('Old Value'),\n blank=True,\n null=True,\n )\n\n new_value = models.TextField(\n _('New Value'),\n blank=True,\n null=True,\n )\n\n def __str__(self):\n out = '%s ' % self.field\n if not self.new_value:\n out += ugettext('removed')\n elif not self.old_value:\n out += ugettext('set to %s') % self.new_value\n else:\n out += ugettext('changed from \"%(old_value)s\" to \"%(new_value)s\"') % {\n 'old_value': self.old_value,\n 'new_value': self.new_value\n }\n return out\n\n class Meta:\n verbose_name = _('Ticket change')\n verbose_name_plural = _('Ticket changes')\n\n\ndef attachment_path(instance, filename):\n \"\"\"\n Provide a file path that will help prevent files being overwritten, by\n putting attachments in a folder off attachments for ticket/followup_id/.\n \"\"\"\n import os\n os.umask(0)\n path = 'helpdesk/attachments/%s/%s' % (instance.followup.ticket.ticket_for_url, instance.followup.id)\n att_path = os.path.join(settings.MEDIA_ROOT, path)\n if settings.DEFAULT_FILE_STORAGE == \"django.core.files.storage.FileSystemStorage\":\n if not os.path.exists(att_path):\n os.makedirs(att_path, 0o777)\n return os.path.join(path, filename)\n\n\n@python_2_unicode_compatible\nclass Attachment(models.Model):\n \"\"\"\n Represents a file attached to a follow-up. This could come from an e-mail\n attachment, or it could be uploaded via the web interface.\n \"\"\"\n\n followup = models.ForeignKey(\n FollowUp,\n verbose_name=_('Follow-up'),\n )\n\n file = models.FileField(\n _('File'),\n upload_to=attachment_path,\n max_length=1000,\n )\n\n filename = models.CharField(\n _('Filename'),\n max_length=1000,\n )\n\n mime_type = models.CharField(\n _('MIME Type'),\n max_length=255,\n )\n\n size = models.IntegerField(\n _('Size'),\n help_text=_('Size of this file in bytes'),\n )\n\n def __str__(self):\n return '%s' % self.filename\n\n class Meta:\n ordering = ('filename',)\n verbose_name = _('Attachment')\n verbose_name_plural = _('Attachments')\n\n\n@python_2_unicode_compatible\nclass PreSetReply(models.Model):\n \"\"\"\n We can allow the admin to define a number of pre-set replies, used to\n simplify the sending of updates and resolutions. These are basically Django\n templates with a limited context - however if you wanted to get crafy it would\n be easy to write a reply that displays ALL updates in hierarchical order etc\n with use of for loops over {{ ticket.followup_set.all }} and friends.\n\n When replying to a ticket, the user can select any reply set for the current\n queue, and the body text is fetched via AJAX.\n \"\"\"\n class Meta:\n ordering = ('name',)\n verbose_name = _('Pre-set reply')\n verbose_name_plural = _('Pre-set replies')\n\n queues = models.ManyToManyField(\n Queue,\n blank=True,\n help_text=_('Leave blank to allow this reply to be used for all '\n 'queues, or select those queues you wish to limit this reply to.'),\n )\n\n name = models.CharField(\n _('Name'),\n max_length=100,\n help_text=_('Only used to assist users with selecting a reply - not '\n 'shown to the user.'),\n )\n\n body = models.TextField(\n _('Body'),\n help_text=_('Context available: {{ ticket }} - ticket object (eg '\n '{{ ticket.title }}); {{ queue }} - The queue; and {{ user }} '\n '- the current user.'),\n )\n\n def __str__(self):\n return '%s' % self.name\n\n\n@python_2_unicode_compatible\nclass EscalationExclusion(models.Model):\n \"\"\"\n An 'EscalationExclusion' lets us define a date on which escalation should\n not happen, for example a weekend or public holiday.\n\n You may also have a queue that is only used on one day per week.\n\n To create these on a regular basis, check out the README file for an\n example cronjob that runs 'create_escalation_exclusions.py'.\n \"\"\"\n\n queues = models.ManyToManyField(\n Queue,\n blank=True,\n help_text=_('Leave blank for this exclusion to be applied to all queues, '\n 'or select those queues you wish to exclude with this entry.'),\n )\n\n name = models.CharField(\n _('Name'),\n max_length=100,\n )\n\n date = models.DateField(\n _('Date'),\n help_text=_('Date on which escalation should not happen'),\n )\n\n def __str__(self):\n return '%s' % self.name\n\n class Meta:\n verbose_name = _('Escalation exclusion')\n verbose_name_plural = _('Escalation exclusions')\n\n\n@python_2_unicode_compatible\nclass EmailTemplate(models.Model):\n \"\"\"\n Since these are more likely to be changed than other templates, we store\n them in the database.\n\n This means that an admin can change email templates without having to have\n access to the filesystem.\n \"\"\"\n\n template_name = models.CharField(\n _('Template Name'),\n max_length=100,\n )\n\n subject = models.CharField(\n _('Subject'),\n max_length=100,\n help_text=_('This will be prefixed with \"[ticket.ticket] ticket.title\"'\n '. We recommend something simple such as \"(Updated\") or \"(Closed)\"'\n ' - the same context is available as in plain_text, below.'),\n )\n\n heading = models.CharField(\n _('Heading'),\n max_length=100,\n help_text=_('In HTML e-mails, this will be the heading at the top of '\n 'the email - the same context is available as in plain_text, '\n 'below.'),\n )\n\n plain_text = models.TextField(\n _('Plain Text'),\n help_text=_('The context available to you includes {{ ticket }}, '\n '{{ queue }}, and depending on the time of the call: '\n '{{ resolution }} or {{ comment }}.'),\n )\n\n html = models.TextField(\n _('HTML'),\n help_text=_('The same context is available here as in plain_text, above.'),\n )\n\n locale = models.CharField(\n _('Locale'),\n max_length=10,\n blank=True,\n null=True,\n help_text=_('Locale of this template.'),\n )\n\n def __str__(self):\n return '%s' % self.template_name\n\n class Meta:\n ordering = ('template_name', 'locale')\n verbose_name = _('e-mail template')\n verbose_name_plural = _('e-mail templates')\n\n\n@python_2_unicode_compatible\nclass KBCategory(models.Model):\n \"\"\"\n Lets help users help themselves: the Knowledge Base is a categorised\n listing of questions & answers.\n \"\"\"\n\n title = models.CharField(\n _('Title'),\n max_length=100,\n )\n\n slug = models.SlugField(\n _('Slug'),\n )\n\n description = models.TextField(\n _('Description'),\n )\n\n def __str__(self):\n return '%s' % self.title\n\n class Meta:\n ordering = ('title',)\n verbose_name = _('Knowledge base category')\n verbose_name_plural = _('Knowledge base categories')\n\n def get_absolute_url(self):\n return 'helpdesk:kb_category', (), {'slug': self.slug}\n get_absolute_url = models.permalink(get_absolute_url)\n\n\n@python_2_unicode_compatible\nclass KBItem(models.Model):\n \"\"\"\n An item within the knowledgebase. Very straightforward question/answer\n style system.\n \"\"\"\n category = models.ForeignKey(\n KBCategory,\n verbose_name=_('Category'),\n )\n\n title = models.CharField(\n _('Title'),\n max_length=100,\n )\n\n question = models.TextField(\n _('Question'),\n )\n\n answer = models.TextField(\n _('Answer'),\n )\n\n votes = models.IntegerField(\n _('Votes'),\n help_text=_('Total number of votes cast for this item'),\n default=0,\n )\n\n recommendations = models.IntegerField(\n _('Positive Votes'),\n help_text=_('Number of votes for this item which were POSITIVE.'),\n default=0,\n )\n\n last_updated = models.DateTimeField(\n _('Last Updated'),\n help_text=_('The date on which this question was most recently changed.'),\n blank=True,\n )\n\n def save(self, *args, **kwargs):\n if not self.last_updated:\n self.last_updated = timezone.now()\n return super(KBItem, self).save(*args, **kwargs)\n\n def _score(self):\n if self.votes > 0:\n return int(self.recommendations / self.votes)\n else:\n return _('Unrated')\n score = property(_score)\n\n def __str__(self):\n return '%s' % self.title\n\n class Meta:\n ordering = ('title',)\n verbose_name = _('Knowledge base item')\n verbose_name_plural = _('Knowledge base items')\n\n def get_absolute_url(self):\n return 'helpdesk:kb_item', (self.id,)\n get_absolute_url = models.permalink(get_absolute_url)\n\n\n@python_2_unicode_compatible\nclass SavedSearch(models.Model):\n \"\"\"\n Allow a user to save a ticket search, eg their filtering and sorting\n options, and optionally share it with other users. This lets people\n easily create a set of commonly-used filters, such as:\n * My tickets waiting on me\n * My tickets waiting on submitter\n * My tickets in 'Priority Support' queue with priority of 1\n * All tickets containing the word 'billing'.\n etc...\n \"\"\"\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_('User'),\n )\n\n title = models.CharField(\n _('Query Name'),\n max_length=100,\n help_text=_('User-provided name for this query'),\n )\n\n shared = models.BooleanField(\n _('Shared With Other Users?'),\n blank=True,\n default=False,\n help_text=_('Should other users see this query?'),\n )\n\n query = models.TextField(\n _('Search Query'),\n help_text=_('Pickled query object. Be wary changing this.'),\n )\n\n def __str__(self):\n if self.shared:\n return '%s (*)' % self.title\n else:\n return '%s' % self.title\n\n class Meta:\n verbose_name = _('Saved search')\n verbose_name_plural = _('Saved searches')\n\n\n@python_2_unicode_compatible\nclass UserSettings(models.Model):\n \"\"\"\n A bunch of user-specific settings that we want to be able to define, such\n as notification preferences and other things that should probably be\n configurable.\n\n We should always refer to user.usersettings.settings['setting_name'].\n \"\"\"\n\n user = models.OneToOneField(settings.AUTH_USER_MODEL)\n\n settings_pickled = models.TextField(\n _('Settings Dictionary'),\n help_text=_('This is a base64-encoded representation of a pickled Python dictionary. '\n 'Do not change this field via the admin.'),\n blank=True,\n null=True,\n )\n\n def _set_settings(self, data):\n # data should always be a Python dictionary.\n try:\n import pickle\n except ImportError:\n import cPickle as pickle\n from helpdesk.lib import b64encode\n self.settings_pickled = b64encode(pickle.dumps(data))\n\n def _get_settings(self):\n # return a python dictionary representing the pickled data.\n try:\n import pickle\n except ImportError:\n import cPickle as pickle\n from helpdesk.lib import b64decode\n try:\n return pickle.loads(b64decode(str(self.settings_pickled)))\n except pickle.UnpicklingError:\n return {}\n\n settings = property(_get_settings, _set_settings)\n\n def __str__(self):\n return 'Preferences for %s' % self.user\n\n class Meta:\n verbose_name = _('User Setting')\n verbose_name_plural = _('User Settings')\n\n\ndef create_usersettings(sender, instance, created, **kwargs):\n \"\"\"\n Helper function to create UserSettings instances as\n required, eg when we first create the UserSettings database\n table via 'syncdb' or when we save a new user.\n\n If we end up with users with no UserSettings, then we get horrible\n 'DoesNotExist: UserSettings matching query does not exist.' errors.\n \"\"\"\n from helpdesk.settings import DEFAULT_USER_SETTINGS\n if created:\n UserSettings.objects.create(user=instance, settings=DEFAULT_USER_SETTINGS)\n\nmodels.signals.post_save.connect(create_usersettings, sender=settings.AUTH_USER_MODEL)\n\n\n@python_2_unicode_compatible\nclass IgnoreEmail(models.Model):\n \"\"\"\n This model lets us easily ignore e-mails from certain senders when\n processing IMAP and POP3 mailboxes, eg mails from postmaster or from\n known trouble-makers.\n \"\"\"\n class Meta:\n verbose_name = _('Ignored e-mail address')\n verbose_name_plural = _('Ignored e-mail addresses')\n\n queues = models.ManyToManyField(\n Queue,\n blank=True,\n help_text=_('Leave blank for this e-mail to be ignored on all queues, '\n 'or select those queues you wish to ignore this e-mail for.'),\n )\n\n name = models.CharField(\n _('Name'),\n max_length=100,\n )\n\n date = models.DateField(\n _('Date'),\n help_text=_('Date on which this e-mail address was added'),\n blank=True,\n editable=False\n )\n\n email_address = models.CharField(\n _('E-Mail Address'),\n max_length=150,\n help_text=_('Enter a full e-mail address, or portions with '\n 'wildcards, eg *@domain.com or postmaster@*.'),\n )\n\n keep_in_mailbox = models.BooleanField(\n _('Save Emails in Mailbox?'),\n blank=True,\n default=False,\n help_text=_('Do you want to save emails from this address in the mailbox? '\n 'If this is unticked, emails from this address will be deleted.'),\n )\n\n def __str__(self):\n return '%s' % self.name\n\n def save(self, *args, **kwargs):\n if not self.date:\n self.date = timezone.now()\n return super(IgnoreEmail, self).save(*args, **kwargs)\n\n def test(self, email):\n \"\"\"\n Possible situations:\n 1. Username & Domain both match\n 2. Username is wildcard, domain matches\n 3. Username matches, domain is wildcard\n 4. username & domain are both wildcards\n 5. Other (no match)\n\n 1-4 return True, 5 returns False.\n \"\"\"\n\n own_parts = self.email_address.split(\"@\")\n email_parts = email.split(\"@\")\n\n if self.email_address == email or \\\n own_parts[0] == \"*\" and own_parts[1] == email_parts[1] or \\\n own_parts[1] == \"*\" and own_parts[0] == email_parts[0] or \\\n own_parts[0] == \"*\" and own_parts[1] == \"*\":\n return True\n else:\n return False\n\n\n@python_2_unicode_compatible\nclass TicketCC(models.Model):\n \"\"\"\n Often, there are people who wish to follow a ticket who aren't the\n person who originally submitted it. This model provides a way for those\n people to follow a ticket.\n\n In this circumstance, a 'person' could be either an e-mail address or\n an existing system user.\n \"\"\"\n\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=True,\n null=True,\n help_text=_('User who wishes to receive updates for this ticket.'),\n verbose_name=_('User'),\n )\n\n email = models.EmailField(\n _('E-Mail Address'),\n blank=True,\n null=True,\n help_text=_('For non-user followers, enter their e-mail address'),\n )\n\n can_view = models.BooleanField(\n _('Can View Ticket?'),\n blank=True,\n default=False,\n help_text=_('Can this CC login to view the ticket details?'),\n )\n\n can_update = models.BooleanField(\n _('Can Update Ticket?'),\n blank=True,\n default=False,\n help_text=_('Can this CC login and update the ticket?'),\n )\n\n def _email_address(self):\n if self.user and self.user.email is not None:\n return self.user.email\n else:\n return self.email\n email_address = property(_email_address)\n\n def _display(self):\n if self.user:\n return self.user\n else:\n return self.email\n display = property(_display)\n\n def __str__(self):\n return '%s for %s' % (self.display, self.ticket.title)\n\n\nclass CustomFieldManager(models.Manager):\n\n def get_queryset(self):\n return super(CustomFieldManager, self).get_queryset().order_by('ordering')\n\n\n@python_2_unicode_compatible\nclass CustomField(models.Model):\n \"\"\"\n Definitions for custom fields that are glued onto each ticket.\n \"\"\"\n\n name = models.SlugField(\n _('Field Name'),\n help_text=_('As used in the database and behind the scenes. '\n 'Must be unique and consist of only lowercase letters with no punctuation.'),\n unique=True,\n )\n\n label = models.CharField(\n _('Label'),\n max_length=30,\n help_text=_('The display label for this field'),\n )\n\n help_text = models.TextField(\n _('Help Text'),\n help_text=_('Shown to the user when editing the ticket'),\n blank=True,\n null=True\n )\n\n DATA_TYPE_CHOICES = (\n ('varchar', _('Character (single line)')),\n ('text', _('Text (multi-line)')),\n ('integer', _('Integer')),\n ('decimal', _('Decimal')),\n ('list', _('List')),\n ('boolean', _('Boolean (checkbox yes/no)')),\n ('date', _('Date')),\n ('time', _('Time')),\n ('datetime', _('Date & Time')),\n ('email', _('E-Mail Address')),\n ('url', _('URL')),\n ('ipaddress', _('IP Address')),\n ('slug', _('Slug')),\n )\n\n data_type = models.CharField(\n _('Data Type'),\n max_length=100,\n help_text=_('Allows you to restrict the data entered into this field'),\n choices=DATA_TYPE_CHOICES,\n )\n\n max_length = models.IntegerField(\n _('Maximum Length (characters)'),\n blank=True,\n null=True,\n )\n\n decimal_places = models.IntegerField(\n _('Decimal Places'),\n help_text=_('Only used for decimal fields'),\n blank=True,\n null=True,\n )\n\n empty_selection_list = models.BooleanField(\n _('Add empty first choice to List?'),\n default=False,\n help_text=_('Only for List: adds an empty first entry to the choices list, '\n 'which enforces that the user makes an active choice.'),\n )\n\n list_values = models.TextField(\n _('List Values'),\n help_text=_('For list fields only. Enter one option per line.'),\n blank=True,\n null=True,\n )\n\n ordering = models.IntegerField(\n _('Ordering'),\n help_text=_('Lower numbers are displayed first; higher numbers are listed later'),\n blank=True,\n null=True,\n )\n\n def _choices_as_array(self):\n from django.utils.six import StringIO\n valuebuffer = StringIO(self.list_values)\n choices = [[item.strip(), item.strip()] for item in valuebuffer.readlines()]\n valuebuffer.close()\n return choices\n choices_as_array = property(_choices_as_array)\n\n required = models.BooleanField(\n _('Required?'),\n help_text=_('Does the user have to enter a value for this field?'),\n default=False,\n )\n\n staff_only = models.BooleanField(\n _('Staff Only?'),\n help_text=_('If this is ticked, then the public submission form '\n 'will NOT show this field'),\n default=False,\n )\n\n objects = CustomFieldManager()\n\n def __str__(self):\n return '%s' % self.name\n\n class Meta:\n verbose_name = _('Custom field')\n verbose_name_plural = _('Custom fields')\n\n\n@python_2_unicode_compatible\nclass TicketCustomFieldValue(models.Model):\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n )\n\n field = models.ForeignKey(\n CustomField,\n verbose_name=_('Field'),\n )\n\n value = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return '%s / %s' % (self.ticket, self.field)\n\n class Meta:\n unique_together = (('ticket', 'field'),)\n verbose_name = _('Ticket custom field value')\n verbose_name_plural = _('Ticket custom field values')\n\n\n@python_2_unicode_compatible\nclass TicketDependency(models.Model):\n \"\"\"\n The ticket identified by `ticket` cannot be resolved until the ticket in `depends_on` has been resolved.\n To help enforce this, a helper function `can_be_resolved` on each Ticket instance checks that\n these have all been resolved.\n \"\"\"\n class Meta:\n unique_together = (('ticket', 'depends_on'),)\n verbose_name = _('Ticket dependency')\n verbose_name_plural = _('Ticket dependencies')\n\n ticket = models.ForeignKey(\n Ticket,\n verbose_name=_('Ticket'),\n related_name='ticketdependency',\n )\n\n depends_on = models.ForeignKey(\n Ticket,\n verbose_name=_('Depends On Ticket'),\n related_name='depends_on',\n )\n\n def __str__(self):\n return '%s / %s' % (self.ticket, self.depends_on)\n",
"path": "helpdesk/models.py"
}
] | diff --git a/helpdesk/models.py b/helpdesk/models.py
index c39e733be..4dc28a36e 100644
--- a/helpdesk/models.py
+++ b/helpdesk/models.py
@@ -320,7 +320,7 @@ def save(self, *args, **kwargs):
Permission.objects.create(
name=_("Permission for queue: ") + self.title,
- content_type=ContentType.objects.get(model="queue"),
+ content_type=ContentType.objects.get_for_model(self.__class__),
codename=basename,
)
|
tobymao__sqlglot-3401 | bug(mssql): incorrect syntax for creating quoted temporary table
```python
[ins] In [1]: import sqlglot as sg
[ins] In [2]: sg.__version__
Out[2]: '23.12.3.dev11'
[ins] In [3]: exp = sg.parse_one(
...: "CREATE TEMPORARY TABLE 'temptest' (name VARCHAR);", dialect="duckdb"
...: )
[ins] In [4]: exp
Out[4]:
Create(
this=Schema(
this=Table(
this=Identifier(this=temptest, quoted=True)),
expressions=[
ColumnDef(
this=Identifier(this=name, quoted=False),
kind=DataType(this=Type.VARCHAR, nested=False))]),
kind=TABLE,
properties=Properties(
expressions=[
TemporaryProperty()]))
[ins] In [5]: exp.sql(dialect="tsql")
Out[5]: 'CREATE TABLE #[temptest] (name VARCHAR)'
```
`#[temptest]` is invalid syntax. I am not sure of how to use the `#` temp indicator with a quoted identifier -- it IS possible to create a temporary table that requires quoting but the only way I have figured out how to do it is by specifying the catalog and database explicitly, e.g.
```sql
CREATE TABLE tempdb.dbo.[quoted@identifer] ([name] VARCHAR(max))
```
**Official Documentation**
https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver16#syntax-options
https://learn.microsoft.com/en-us/sql/relational-databases/databases/database-identifiers?view=sql-server-ver16#classes-of-identifiers
xref: https://github.com/ibis-project/ibis/issues/9095
| [
{
"content": "from __future__ import annotations\n\nimport datetime\nimport re\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n any_value_to_max_sql,\n date_delta_sql,\n datestrtodate_sql,\n generatedasidentitycolumnconstraint_sql,\n max_or_greatest,\n min_or_least,\n build_date_delta,\n rename_func,\n timestrtotime_sql,\n trim_sql,\n)\nfrom sqlglot.helper import seq_get\nfrom sqlglot.time import format_time\nfrom sqlglot.tokens import TokenType\n\nif t.TYPE_CHECKING:\n from sqlglot._typing import E\n\nFULL_FORMAT_TIME_MAPPING = {\n \"weekday\": \"%A\",\n \"dw\": \"%A\",\n \"w\": \"%A\",\n \"month\": \"%B\",\n \"mm\": \"%B\",\n \"m\": \"%B\",\n}\n\nDATE_DELTA_INTERVAL = {\n \"year\": \"year\",\n \"yyyy\": \"year\",\n \"yy\": \"year\",\n \"quarter\": \"quarter\",\n \"qq\": \"quarter\",\n \"q\": \"quarter\",\n \"month\": \"month\",\n \"mm\": \"month\",\n \"m\": \"month\",\n \"week\": \"week\",\n \"ww\": \"week\",\n \"wk\": \"week\",\n \"day\": \"day\",\n \"dd\": \"day\",\n \"d\": \"day\",\n}\n\n\nDATE_FMT_RE = re.compile(\"([dD]{1,2})|([mM]{1,2})|([yY]{1,4})|([hH]{1,2})|([sS]{1,2})\")\n\n# N = Numeric, C=Currency\nTRANSPILE_SAFE_NUMBER_FMT = {\"N\", \"C\"}\n\nDEFAULT_START_DATE = datetime.date(1900, 1, 1)\n\nBIT_TYPES = {exp.EQ, exp.NEQ, exp.Is, exp.In, exp.Select, exp.Alias}\n\n# Unsupported options:\n# - OPTIMIZE FOR ( @variable_name { UNKNOWN | = <literal_constant> } [ , ...n ] )\n# - TABLE HINT\nOPTIONS: parser.OPTIONS_TYPE = {\n **dict.fromkeys(\n (\n \"DISABLE_OPTIMIZED_PLAN_FORCING\",\n \"FAST\",\n \"IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX\",\n \"LABEL\",\n \"MAXDOP\",\n \"MAXRECURSION\",\n \"MAX_GRANT_PERCENT\",\n \"MIN_GRANT_PERCENT\",\n \"NO_PERFORMANCE_SPOOL\",\n \"QUERYTRACEON\",\n \"RECOMPILE\",\n ),\n tuple(),\n ),\n \"CONCAT\": (\"UNION\",),\n \"DISABLE\": (\"EXTERNALPUSHDOWN\", \"SCALEOUTEXECUTION\"),\n \"EXPAND\": (\"VIEWS\",),\n \"FORCE\": (\"EXTERNALPUSHDOWN\", \"ORDER\", \"SCALEOUTEXECUTION\"),\n \"HASH\": (\"GROUP\", \"JOIN\", \"UNION\"),\n \"KEEP\": (\"PLAN\",),\n \"KEEPFIXED\": (\"PLAN\",),\n \"LOOP\": (\"JOIN\",),\n \"MERGE\": (\"JOIN\", \"UNION\"),\n \"OPTIMIZE\": ((\"FOR\", \"UNKNOWN\"),),\n \"ORDER\": (\"GROUP\",),\n \"PARAMETERIZATION\": (\"FORCED\", \"SIMPLE\"),\n \"ROBUST\": (\"PLAN\",),\n \"USE\": (\"PLAN\",),\n}\n\nOPTIONS_THAT_REQUIRE_EQUAL = (\"MAX_GRANT_PERCENT\", \"MIN_GRANT_PERCENT\", \"LABEL\")\n\n\ndef _build_formatted_time(\n exp_class: t.Type[E], full_format_mapping: t.Optional[bool] = None\n) -> t.Callable[[t.List], E]:\n def _builder(args: t.List) -> E:\n assert len(args) == 2\n\n return exp_class(\n this=exp.cast(args[1], exp.DataType.Type.DATETIME),\n format=exp.Literal.string(\n format_time(\n args[0].name.lower(),\n (\n {**TSQL.TIME_MAPPING, **FULL_FORMAT_TIME_MAPPING}\n if full_format_mapping\n else TSQL.TIME_MAPPING\n ),\n )\n ),\n )\n\n return _builder\n\n\ndef _build_format(args: t.List) -> exp.NumberToStr | exp.TimeToStr:\n this = seq_get(args, 0)\n fmt = seq_get(args, 1)\n culture = seq_get(args, 2)\n\n number_fmt = fmt and (fmt.name in TRANSPILE_SAFE_NUMBER_FMT or not DATE_FMT_RE.search(fmt.name))\n\n if number_fmt:\n return exp.NumberToStr(this=this, format=fmt, culture=culture)\n\n if fmt:\n fmt = exp.Literal.string(\n format_time(fmt.name, TSQL.FORMAT_TIME_MAPPING)\n if len(fmt.name) == 1\n else format_time(fmt.name, TSQL.TIME_MAPPING)\n )\n\n return exp.TimeToStr(this=this, format=fmt, culture=culture)\n\n\ndef _build_eomonth(args: t.List) -> exp.LastDay:\n date = exp.TsOrDsToDate(this=seq_get(args, 0))\n month_lag = seq_get(args, 1)\n\n if month_lag is None:\n this: exp.Expression = date\n else:\n unit = DATE_DELTA_INTERVAL.get(\"month\")\n this = exp.DateAdd(this=date, expression=month_lag, unit=unit and exp.var(unit))\n\n return exp.LastDay(this=this)\n\n\ndef _build_hashbytes(args: t.List) -> exp.Expression:\n kind, data = args\n kind = kind.name.upper() if kind.is_string else \"\"\n\n if kind == \"MD5\":\n args.pop(0)\n return exp.MD5(this=data)\n if kind in (\"SHA\", \"SHA1\"):\n args.pop(0)\n return exp.SHA(this=data)\n if kind == \"SHA2_256\":\n return exp.SHA2(this=data, length=exp.Literal.number(256))\n if kind == \"SHA2_512\":\n return exp.SHA2(this=data, length=exp.Literal.number(512))\n\n return exp.func(\"HASHBYTES\", *args)\n\n\nDATEPART_ONLY_FORMATS = {\"DW\", \"HOUR\", \"QUARTER\"}\n\n\ndef _format_sql(self: TSQL.Generator, expression: exp.NumberToStr | exp.TimeToStr) -> str:\n fmt = expression.args[\"format\"]\n\n if not isinstance(expression, exp.NumberToStr):\n if fmt.is_string:\n mapped_fmt = format_time(fmt.name, TSQL.INVERSE_TIME_MAPPING)\n\n name = (mapped_fmt or \"\").upper()\n if name in DATEPART_ONLY_FORMATS:\n return self.func(\"DATEPART\", name, expression.this)\n\n fmt_sql = self.sql(exp.Literal.string(mapped_fmt))\n else:\n fmt_sql = self.format_time(expression) or self.sql(fmt)\n else:\n fmt_sql = self.sql(fmt)\n\n return self.func(\"FORMAT\", expression.this, fmt_sql, expression.args.get(\"culture\"))\n\n\ndef _string_agg_sql(self: TSQL.Generator, expression: exp.GroupConcat) -> str:\n this = expression.this\n distinct = expression.find(exp.Distinct)\n if distinct:\n # exp.Distinct can appear below an exp.Order or an exp.GroupConcat expression\n self.unsupported(\"T-SQL STRING_AGG doesn't support DISTINCT.\")\n this = distinct.pop().expressions[0]\n\n order = \"\"\n if isinstance(expression.this, exp.Order):\n if expression.this.this:\n this = expression.this.this.pop()\n order = f\" WITHIN GROUP ({self.sql(expression.this)[1:]})\" # Order has a leading space\n\n separator = expression.args.get(\"separator\") or exp.Literal.string(\",\")\n return f\"STRING_AGG({self.format_args(this, separator)}){order}\"\n\n\ndef _build_date_delta(\n exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None\n) -> t.Callable[[t.List], E]:\n def _builder(args: t.List) -> E:\n unit = seq_get(args, 0)\n if unit and unit_mapping:\n unit = exp.var(unit_mapping.get(unit.name.lower(), unit.name))\n\n start_date = seq_get(args, 1)\n if start_date and start_date.is_number:\n # Numeric types are valid DATETIME values\n if start_date.is_int:\n adds = DEFAULT_START_DATE + datetime.timedelta(days=int(start_date.this))\n start_date = exp.Literal.string(adds.strftime(\"%F\"))\n else:\n # We currently don't handle float values, i.e. they're not converted to equivalent DATETIMEs.\n # This is not a problem when generating T-SQL code, it is when transpiling to other dialects.\n return exp_class(this=seq_get(args, 2), expression=start_date, unit=unit)\n\n return exp_class(\n this=exp.TimeStrToTime(this=seq_get(args, 2)),\n expression=exp.TimeStrToTime(this=start_date),\n unit=unit,\n )\n\n return _builder\n\n\ndef qualify_derived_table_outputs(expression: exp.Expression) -> exp.Expression:\n \"\"\"Ensures all (unnamed) output columns are aliased for CTEs and Subqueries.\"\"\"\n alias = expression.args.get(\"alias\")\n\n if (\n isinstance(expression, (exp.CTE, exp.Subquery))\n and isinstance(alias, exp.TableAlias)\n and not alias.columns\n ):\n from sqlglot.optimizer.qualify_columns import qualify_outputs\n\n # We keep track of the unaliased column projection indexes instead of the expressions\n # themselves, because the latter are going to be replaced by new nodes when the aliases\n # are added and hence we won't be able to reach these newly added Alias parents\n query = expression.this\n unaliased_column_indexes = (\n i for i, c in enumerate(query.selects) if isinstance(c, exp.Column) and not c.alias\n )\n\n qualify_outputs(query)\n\n # Preserve the quoting information of columns for newly added Alias nodes\n query_selects = query.selects\n for select_index in unaliased_column_indexes:\n alias = query_selects[select_index]\n column = alias.this\n if isinstance(column.this, exp.Identifier):\n alias.args[\"alias\"].set(\"quoted\", column.this.quoted)\n\n return expression\n\n\n# https://learn.microsoft.com/en-us/sql/t-sql/functions/datetimefromparts-transact-sql?view=sql-server-ver16#syntax\ndef _build_datetimefromparts(args: t.List) -> exp.TimestampFromParts:\n return exp.TimestampFromParts(\n year=seq_get(args, 0),\n month=seq_get(args, 1),\n day=seq_get(args, 2),\n hour=seq_get(args, 3),\n min=seq_get(args, 4),\n sec=seq_get(args, 5),\n milli=seq_get(args, 6),\n )\n\n\n# https://learn.microsoft.com/en-us/sql/t-sql/functions/timefromparts-transact-sql?view=sql-server-ver16#syntax\ndef _build_timefromparts(args: t.List) -> exp.TimeFromParts:\n return exp.TimeFromParts(\n hour=seq_get(args, 0),\n min=seq_get(args, 1),\n sec=seq_get(args, 2),\n fractions=seq_get(args, 3),\n precision=seq_get(args, 4),\n )\n\n\ndef _build_with_arg_as_text(\n klass: t.Type[exp.Expression],\n) -> t.Callable[[t.List[exp.Expression]], exp.Expression]:\n def _parse(args: t.List[exp.Expression]) -> exp.Expression:\n this = seq_get(args, 0)\n\n if this and not this.is_string:\n this = exp.cast(this, exp.DataType.Type.TEXT)\n\n expression = seq_get(args, 1)\n kwargs = {\"this\": this}\n\n if expression:\n kwargs[\"expression\"] = expression\n\n return klass(**kwargs)\n\n return _parse\n\n\ndef _json_extract_sql(\n self: TSQL.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar\n) -> str:\n json_query = self.func(\"JSON_QUERY\", expression.this, expression.expression)\n json_value = self.func(\"JSON_VALUE\", expression.this, expression.expression)\n return self.func(\"ISNULL\", json_query, json_value)\n\n\nclass TSQL(Dialect):\n NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE\n TIME_FORMAT = \"'yyyy-mm-dd hh:mm:ss'\"\n SUPPORTS_SEMI_ANTI_JOIN = False\n LOG_BASE_FIRST = False\n TYPED_DIVISION = True\n CONCAT_COALESCE = True\n\n TIME_MAPPING = {\n \"year\": \"%Y\",\n \"dayofyear\": \"%j\",\n \"day\": \"%d\",\n \"dy\": \"%d\",\n \"y\": \"%Y\",\n \"week\": \"%W\",\n \"ww\": \"%W\",\n \"wk\": \"%W\",\n \"hour\": \"%h\",\n \"hh\": \"%I\",\n \"minute\": \"%M\",\n \"mi\": \"%M\",\n \"n\": \"%M\",\n \"second\": \"%S\",\n \"ss\": \"%S\",\n \"s\": \"%-S\",\n \"millisecond\": \"%f\",\n \"ms\": \"%f\",\n \"weekday\": \"%W\",\n \"dw\": \"%W\",\n \"month\": \"%m\",\n \"mm\": \"%M\",\n \"m\": \"%-M\",\n \"Y\": \"%Y\",\n \"YYYY\": \"%Y\",\n \"YY\": \"%y\",\n \"MMMM\": \"%B\",\n \"MMM\": \"%b\",\n \"MM\": \"%m\",\n \"M\": \"%-m\",\n \"dddd\": \"%A\",\n \"dd\": \"%d\",\n \"d\": \"%-d\",\n \"HH\": \"%H\",\n \"H\": \"%-H\",\n \"h\": \"%-I\",\n \"S\": \"%f\",\n \"yyyy\": \"%Y\",\n \"yy\": \"%y\",\n }\n\n CONVERT_FORMAT_MAPPING = {\n \"0\": \"%b %d %Y %-I:%M%p\",\n \"1\": \"%m/%d/%y\",\n \"2\": \"%y.%m.%d\",\n \"3\": \"%d/%m/%y\",\n \"4\": \"%d.%m.%y\",\n \"5\": \"%d-%m-%y\",\n \"6\": \"%d %b %y\",\n \"7\": \"%b %d, %y\",\n \"8\": \"%H:%M:%S\",\n \"9\": \"%b %d %Y %-I:%M:%S:%f%p\",\n \"10\": \"mm-dd-yy\",\n \"11\": \"yy/mm/dd\",\n \"12\": \"yymmdd\",\n \"13\": \"%d %b %Y %H:%M:ss:%f\",\n \"14\": \"%H:%M:%S:%f\",\n \"20\": \"%Y-%m-%d %H:%M:%S\",\n \"21\": \"%Y-%m-%d %H:%M:%S.%f\",\n \"22\": \"%m/%d/%y %-I:%M:%S %p\",\n \"23\": \"%Y-%m-%d\",\n \"24\": \"%H:%M:%S\",\n \"25\": \"%Y-%m-%d %H:%M:%S.%f\",\n \"100\": \"%b %d %Y %-I:%M%p\",\n \"101\": \"%m/%d/%Y\",\n \"102\": \"%Y.%m.%d\",\n \"103\": \"%d/%m/%Y\",\n \"104\": \"%d.%m.%Y\",\n \"105\": \"%d-%m-%Y\",\n \"106\": \"%d %b %Y\",\n \"107\": \"%b %d, %Y\",\n \"108\": \"%H:%M:%S\",\n \"109\": \"%b %d %Y %-I:%M:%S:%f%p\",\n \"110\": \"%m-%d-%Y\",\n \"111\": \"%Y/%m/%d\",\n \"112\": \"%Y%m%d\",\n \"113\": \"%d %b %Y %H:%M:%S:%f\",\n \"114\": \"%H:%M:%S:%f\",\n \"120\": \"%Y-%m-%d %H:%M:%S\",\n \"121\": \"%Y-%m-%d %H:%M:%S.%f\",\n }\n\n FORMAT_TIME_MAPPING = {\n \"y\": \"%B %Y\",\n \"d\": \"%m/%d/%Y\",\n \"H\": \"%-H\",\n \"h\": \"%-I\",\n \"s\": \"%Y-%m-%d %H:%M:%S\",\n \"D\": \"%A,%B,%Y\",\n \"f\": \"%A,%B,%Y %-I:%M %p\",\n \"F\": \"%A,%B,%Y %-I:%M:%S %p\",\n \"g\": \"%m/%d/%Y %-I:%M %p\",\n \"G\": \"%m/%d/%Y %-I:%M:%S %p\",\n \"M\": \"%B %-d\",\n \"m\": \"%B %-d\",\n \"O\": \"%Y-%m-%dT%H:%M:%S\",\n \"u\": \"%Y-%M-%D %H:%M:%S%z\",\n \"U\": \"%A, %B %D, %Y %H:%M:%S%z\",\n \"T\": \"%-I:%M:%S %p\",\n \"t\": \"%-I:%M\",\n \"Y\": \"%a %Y\",\n }\n\n class Tokenizer(tokens.Tokenizer):\n IDENTIFIERS = [(\"[\", \"]\"), '\"']\n QUOTES = [\"'\", '\"']\n HEX_STRINGS = [(\"0x\", \"\"), (\"0X\", \"\")]\n VAR_SINGLE_TOKENS = {\"@\", \"$\", \"#\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"DATETIME2\": TokenType.DATETIME,\n \"DATETIMEOFFSET\": TokenType.TIMESTAMPTZ,\n \"DECLARE\": TokenType.COMMAND,\n \"EXEC\": TokenType.COMMAND,\n \"IMAGE\": TokenType.IMAGE,\n \"MONEY\": TokenType.MONEY,\n \"NTEXT\": TokenType.TEXT,\n \"PRINT\": TokenType.COMMAND,\n \"PROC\": TokenType.PROCEDURE,\n \"REAL\": TokenType.FLOAT,\n \"ROWVERSION\": TokenType.ROWVERSION,\n \"SMALLDATETIME\": TokenType.DATETIME,\n \"SMALLMONEY\": TokenType.SMALLMONEY,\n \"SQL_VARIANT\": TokenType.VARIANT,\n \"TOP\": TokenType.TOP,\n \"TIMESTAMP\": TokenType.ROWVERSION,\n \"UNIQUEIDENTIFIER\": TokenType.UNIQUEIDENTIFIER,\n \"UPDATE STATISTICS\": TokenType.COMMAND,\n \"XML\": TokenType.XML,\n \"OUTPUT\": TokenType.RETURNING,\n \"SYSTEM_USER\": TokenType.CURRENT_USER,\n \"FOR SYSTEM_TIME\": TokenType.TIMESTAMP_SNAPSHOT,\n \"OPTION\": TokenType.OPTION,\n }\n\n class Parser(parser.Parser):\n SET_REQUIRES_ASSIGNMENT_DELIMITER = False\n LOG_DEFAULTS_TO_LN = True\n ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False\n STRING_ALIASES = True\n NO_PAREN_IF_COMMANDS = False\n\n QUERY_MODIFIER_PARSERS = {\n **parser.Parser.QUERY_MODIFIER_PARSERS,\n TokenType.OPTION: lambda self: (\"options\", self._parse_options()),\n }\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"CHARINDEX\": lambda args: exp.StrPosition(\n this=seq_get(args, 1),\n substr=seq_get(args, 0),\n position=seq_get(args, 2),\n ),\n \"DATEADD\": build_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),\n \"DATEDIFF\": _build_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),\n \"DATENAME\": _build_formatted_time(exp.TimeToStr, full_format_mapping=True),\n \"DATEPART\": _build_formatted_time(exp.TimeToStr),\n \"DATETIMEFROMPARTS\": _build_datetimefromparts,\n \"EOMONTH\": _build_eomonth,\n \"FORMAT\": _build_format,\n \"GETDATE\": exp.CurrentTimestamp.from_arg_list,\n \"HASHBYTES\": _build_hashbytes,\n \"ISNULL\": exp.Coalesce.from_arg_list,\n \"JSON_QUERY\": parser.build_extract_json_with_path(exp.JSONExtract),\n \"JSON_VALUE\": parser.build_extract_json_with_path(exp.JSONExtractScalar),\n \"LEN\": _build_with_arg_as_text(exp.Length),\n \"LEFT\": _build_with_arg_as_text(exp.Left),\n \"RIGHT\": _build_with_arg_as_text(exp.Right),\n \"REPLICATE\": exp.Repeat.from_arg_list,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n \"SYSDATETIME\": exp.CurrentTimestamp.from_arg_list,\n \"SUSER_NAME\": exp.CurrentUser.from_arg_list,\n \"SUSER_SNAME\": exp.CurrentUser.from_arg_list,\n \"SYSTEM_USER\": exp.CurrentUser.from_arg_list,\n \"TIMEFROMPARTS\": _build_timefromparts,\n }\n\n JOIN_HINTS = {\"LOOP\", \"HASH\", \"MERGE\", \"REMOTE\"}\n\n RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {\n TokenType.TABLE,\n *parser.Parser.TYPE_TOKENS,\n }\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.END: lambda self: self._parse_command(),\n }\n\n def _parse_options(self) -> t.Optional[t.List[exp.Expression]]:\n if not self._match(TokenType.OPTION):\n return None\n\n def _parse_option() -> t.Optional[exp.Expression]:\n option = self._parse_var_from_options(OPTIONS)\n if not option:\n return None\n\n self._match(TokenType.EQ)\n return self.expression(\n exp.QueryOption, this=option, expression=self._parse_primary_or_var()\n )\n\n return self._parse_wrapped_csv(_parse_option)\n\n def _parse_projections(self) -> t.List[exp.Expression]:\n \"\"\"\n T-SQL supports the syntax alias = expression in the SELECT's projection list,\n so we transform all parsed Selects to convert their EQ projections into Aliases.\n\n See: https://learn.microsoft.com/en-us/sql/t-sql/queries/select-clause-transact-sql?view=sql-server-ver16#syntax\n \"\"\"\n return [\n (\n exp.alias_(projection.expression, projection.this.this, copy=False)\n if isinstance(projection, exp.EQ) and isinstance(projection.this, exp.Column)\n else projection\n )\n for projection in super()._parse_projections()\n ]\n\n def _parse_commit_or_rollback(self) -> exp.Commit | exp.Rollback:\n \"\"\"Applies to SQL Server and Azure SQL Database\n COMMIT [ { TRAN | TRANSACTION }\n [ transaction_name | @tran_name_variable ] ]\n [ WITH ( DELAYED_DURABILITY = { OFF | ON } ) ]\n\n ROLLBACK { TRAN | TRANSACTION }\n [ transaction_name | @tran_name_variable\n | savepoint_name | @savepoint_variable ]\n \"\"\"\n rollback = self._prev.token_type == TokenType.ROLLBACK\n\n self._match_texts((\"TRAN\", \"TRANSACTION\"))\n this = self._parse_id_var()\n\n if rollback:\n return self.expression(exp.Rollback, this=this)\n\n durability = None\n if self._match_pair(TokenType.WITH, TokenType.L_PAREN):\n self._match_text_seq(\"DELAYED_DURABILITY\")\n self._match(TokenType.EQ)\n\n if self._match_text_seq(\"OFF\"):\n durability = False\n else:\n self._match(TokenType.ON)\n durability = True\n\n self._match_r_paren()\n\n return self.expression(exp.Commit, this=this, durability=durability)\n\n def _parse_transaction(self) -> exp.Transaction | exp.Command:\n \"\"\"Applies to SQL Server and Azure SQL Database\n BEGIN { TRAN | TRANSACTION }\n [ { transaction_name | @tran_name_variable }\n [ WITH MARK [ 'description' ] ]\n ]\n \"\"\"\n if self._match_texts((\"TRAN\", \"TRANSACTION\")):\n transaction = self.expression(exp.Transaction, this=self._parse_id_var())\n if self._match_text_seq(\"WITH\", \"MARK\"):\n transaction.set(\"mark\", self._parse_string())\n\n return transaction\n\n return self._parse_as_command(self._prev)\n\n def _parse_returns(self) -> exp.ReturnsProperty:\n table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)\n returns = super()._parse_returns()\n returns.set(\"table\", table)\n return returns\n\n def _parse_convert(\n self, strict: bool, safe: t.Optional[bool] = None\n ) -> t.Optional[exp.Expression]:\n this = self._parse_types()\n self._match(TokenType.COMMA)\n args = [this, *self._parse_csv(self._parse_conjunction)]\n convert = exp.Convert.from_arg_list(args)\n convert.set(\"safe\", safe)\n convert.set(\"strict\", strict)\n return convert\n\n def _parse_user_defined_function(\n self, kind: t.Optional[TokenType] = None\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_user_defined_function(kind=kind)\n\n if (\n kind == TokenType.FUNCTION\n or isinstance(this, exp.UserDefinedFunction)\n or self._match(TokenType.ALIAS, advance=False)\n ):\n return this\n\n expressions = self._parse_csv(self._parse_function_parameter)\n return self.expression(exp.UserDefinedFunction, this=this, expressions=expressions)\n\n def _parse_id_var(\n self,\n any_token: bool = True,\n tokens: t.Optional[t.Collection[TokenType]] = None,\n ) -> t.Optional[exp.Expression]:\n is_temporary = self._match(TokenType.HASH)\n is_global = is_temporary and self._match(TokenType.HASH)\n\n this = super()._parse_id_var(any_token=any_token, tokens=tokens)\n if this:\n if is_global:\n this.set(\"global\", True)\n elif is_temporary:\n this.set(\"temporary\", True)\n\n return this\n\n def _parse_create(self) -> exp.Create | exp.Command:\n create = super()._parse_create()\n\n if isinstance(create, exp.Create):\n table = create.this.this if isinstance(create.this, exp.Schema) else create.this\n if isinstance(table, exp.Table) and table.this.args.get(\"temporary\"):\n if not create.args.get(\"properties\"):\n create.set(\"properties\", exp.Properties(expressions=[]))\n\n create.args[\"properties\"].append(\"expressions\", exp.TemporaryProperty())\n\n return create\n\n def _parse_if(self) -> t.Optional[exp.Expression]:\n index = self._index\n\n if self._match_text_seq(\"OBJECT_ID\"):\n self._parse_wrapped_csv(self._parse_string)\n if self._match_text_seq(\"IS\", \"NOT\", \"NULL\") and self._match(TokenType.DROP):\n return self._parse_drop(exists=True)\n self._retreat(index)\n\n return super()._parse_if()\n\n def _parse_unique(self) -> exp.UniqueColumnConstraint:\n if self._match_texts((\"CLUSTERED\", \"NONCLUSTERED\")):\n this = self.CONSTRAINT_PARSERS[self._prev.text.upper()](self)\n else:\n this = self._parse_schema(self._parse_id_var(any_token=False))\n\n return self.expression(exp.UniqueColumnConstraint, this=this)\n\n def _parse_partition(self) -> t.Optional[exp.Partition]:\n if not self._match_text_seq(\"WITH\", \"(\", \"PARTITIONS\"):\n return None\n\n def parse_range():\n low = self._parse_bitwise()\n high = self._parse_bitwise() if self._match_text_seq(\"TO\") else None\n\n return (\n self.expression(exp.PartitionRange, this=low, expression=high) if high else low\n )\n\n partition = self.expression(\n exp.Partition, expressions=self._parse_wrapped_csv(parse_range)\n )\n\n self._match_r_paren()\n\n return partition\n\n class Generator(generator.Generator):\n LIMIT_IS_TOP = True\n QUERY_HINTS = False\n RETURNING_END = False\n NVL2_SUPPORTED = False\n ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False\n LIMIT_FETCH = \"FETCH\"\n COMPUTED_COLUMN_WITH_TYPE = False\n CTE_RECURSIVE_KEYWORD_REQUIRED = False\n ENSURE_BOOLS = True\n NULL_ORDERING_SUPPORTED = None\n SUPPORTS_SINGLE_ARG_CONCAT = False\n TABLESAMPLE_SEED_KEYWORD = \"REPEATABLE\"\n SUPPORTS_SELECT_INTO = True\n JSON_PATH_BRACKETED_KEY_SUPPORTED = False\n SUPPORTS_TO_NUMBER = False\n OUTER_UNION_MODIFIERS = False\n COPY_PARAMS_EQ_REQUIRED = True\n\n EXPRESSIONS_WITHOUT_NESTED_CTES = {\n exp.Delete,\n exp.Insert,\n exp.Merge,\n exp.Select,\n exp.Subquery,\n exp.Union,\n exp.Update,\n }\n\n SUPPORTED_JSON_PATH_PARTS = {\n exp.JSONPathKey,\n exp.JSONPathRoot,\n exp.JSONPathSubscript,\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.BOOLEAN: \"BIT\",\n exp.DataType.Type.DECIMAL: \"NUMERIC\",\n exp.DataType.Type.DATETIME: \"DATETIME2\",\n exp.DataType.Type.DOUBLE: \"FLOAT\",\n exp.DataType.Type.INT: \"INTEGER\",\n exp.DataType.Type.TEXT: \"VARCHAR(MAX)\",\n exp.DataType.Type.TIMESTAMP: \"DATETIME2\",\n exp.DataType.Type.TIMESTAMPTZ: \"DATETIMEOFFSET\",\n exp.DataType.Type.VARIANT: \"SQL_VARIANT\",\n exp.DataType.Type.ROWVERSION: \"ROWVERSION\",\n }\n\n TYPE_MAPPING.pop(exp.DataType.Type.NCHAR)\n TYPE_MAPPING.pop(exp.DataType.Type.NVARCHAR)\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.AnyValue: any_value_to_max_sql,\n exp.ArrayToString: rename_func(\"STRING_AGG\"),\n exp.AutoIncrementColumnConstraint: lambda *_: \"IDENTITY\",\n exp.DateAdd: date_delta_sql(\"DATEADD\"),\n exp.DateDiff: date_delta_sql(\"DATEDIFF\"),\n exp.CTE: transforms.preprocess([qualify_derived_table_outputs]),\n exp.CurrentDate: rename_func(\"GETDATE\"),\n exp.CurrentTimestamp: rename_func(\"GETDATE\"),\n exp.DateStrToDate: datestrtodate_sql,\n exp.Extract: rename_func(\"DATEPART\"),\n exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,\n exp.GroupConcat: _string_agg_sql,\n exp.If: rename_func(\"IIF\"),\n exp.JSONExtract: _json_extract_sql,\n exp.JSONExtractScalar: _json_extract_sql,\n exp.LastDay: lambda self, e: self.func(\"EOMONTH\", e.this),\n exp.Max: max_or_greatest,\n exp.MD5: lambda self, e: self.func(\"HASHBYTES\", exp.Literal.string(\"MD5\"), e.this),\n exp.Min: min_or_least,\n exp.NumberToStr: _format_sql,\n exp.ParseJSON: lambda self, e: self.sql(e, \"this\"),\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.eliminate_semi_and_anti_joins,\n transforms.eliminate_qualify,\n ]\n ),\n exp.StrPosition: lambda self, e: self.func(\n \"CHARINDEX\", e.args.get(\"substr\"), e.this, e.args.get(\"position\")\n ),\n exp.Subquery: transforms.preprocess([qualify_derived_table_outputs]),\n exp.SHA: lambda self, e: self.func(\"HASHBYTES\", exp.Literal.string(\"SHA1\"), e.this),\n exp.SHA2: lambda self, e: self.func(\n \"HASHBYTES\", exp.Literal.string(f\"SHA2_{e.args.get('length', 256)}\"), e.this\n ),\n exp.TemporaryProperty: lambda self, e: \"\",\n exp.TimeStrToTime: timestrtotime_sql,\n exp.TimeToStr: _format_sql,\n exp.Trim: trim_sql,\n exp.TsOrDsAdd: date_delta_sql(\"DATEADD\", cast=True),\n exp.TsOrDsDiff: date_delta_sql(\"DATEDIFF\"),\n }\n\n TRANSFORMS.pop(exp.ReturnsProperty)\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def select_sql(self, expression: exp.Select) -> str:\n if expression.args.get(\"offset\"):\n if not expression.args.get(\"order\"):\n # ORDER BY is required in order to use OFFSET in a query, so we use\n # a noop order by, since we don't really care about the order.\n # See: https://www.microsoftpressstore.com/articles/article.aspx?p=2314819\n expression.order_by(exp.select(exp.null()).subquery(), copy=False)\n\n limit = expression.args.get(\"limit\")\n if isinstance(limit, exp.Limit):\n # TOP and OFFSET can't be combined, we need use FETCH instead of TOP\n # we replace here because otherwise TOP would be generated in select_sql\n limit.replace(exp.Fetch(direction=\"FIRST\", count=limit.expression))\n\n return super().select_sql(expression)\n\n def convert_sql(self, expression: exp.Convert) -> str:\n name = \"TRY_CONVERT\" if expression.args.get(\"safe\") else \"CONVERT\"\n return self.func(\n name, expression.this, expression.expression, expression.args.get(\"style\")\n )\n\n def queryoption_sql(self, expression: exp.QueryOption) -> str:\n option = self.sql(expression, \"this\")\n value = self.sql(expression, \"expression\")\n if value:\n optional_equal_sign = \"= \" if option in OPTIONS_THAT_REQUIRE_EQUAL else \"\"\n return f\"{option} {optional_equal_sign}{value}\"\n return option\n\n def lateral_op(self, expression: exp.Lateral) -> str:\n cross_apply = expression.args.get(\"cross_apply\")\n if cross_apply is True:\n return \"CROSS APPLY\"\n if cross_apply is False:\n return \"OUTER APPLY\"\n\n # TODO: perhaps we can check if the parent is a Join and transpile it appropriately\n self.unsupported(\"LATERAL clause is not supported.\")\n return \"LATERAL\"\n\n def timefromparts_sql(self, expression: exp.TimeFromParts) -> str:\n nano = expression.args.get(\"nano\")\n if nano is not None:\n nano.pop()\n self.unsupported(\"Specifying nanoseconds is not supported in TIMEFROMPARTS.\")\n\n if expression.args.get(\"fractions\") is None:\n expression.set(\"fractions\", exp.Literal.number(0))\n if expression.args.get(\"precision\") is None:\n expression.set(\"precision\", exp.Literal.number(0))\n\n return rename_func(\"TIMEFROMPARTS\")(self, expression)\n\n def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:\n zone = expression.args.get(\"zone\")\n if zone is not None:\n zone.pop()\n self.unsupported(\"Time zone is not supported in DATETIMEFROMPARTS.\")\n\n nano = expression.args.get(\"nano\")\n if nano is not None:\n nano.pop()\n self.unsupported(\"Specifying nanoseconds is not supported in DATETIMEFROMPARTS.\")\n\n if expression.args.get(\"milli\") is None:\n expression.set(\"milli\", exp.Literal.number(0))\n\n return rename_func(\"DATETIMEFROMPARTS\")(self, expression)\n\n def setitem_sql(self, expression: exp.SetItem) -> str:\n this = expression.this\n if isinstance(this, exp.EQ) and not isinstance(this.left, exp.Parameter):\n # T-SQL does not use '=' in SET command, except when the LHS is a variable.\n return f\"{self.sql(this.left)} {self.sql(this.right)}\"\n\n return super().setitem_sql(expression)\n\n def boolean_sql(self, expression: exp.Boolean) -> str:\n if type(expression.parent) in BIT_TYPES:\n return \"1\" if expression.this else \"0\"\n\n return \"(1 = 1)\" if expression.this else \"(1 = 0)\"\n\n def is_sql(self, expression: exp.Is) -> str:\n if isinstance(expression.expression, exp.Boolean):\n return self.binary(expression, \"=\")\n return self.binary(expression, \"IS\")\n\n def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:\n sql = self.sql(expression, \"this\")\n properties = expression.args.get(\"properties\")\n\n if sql[:1] != \"#\" and any(\n isinstance(prop, exp.TemporaryProperty)\n for prop in (properties.expressions if properties else [])\n ):\n sql = f\"#{sql}\"\n\n return sql\n\n def create_sql(self, expression: exp.Create) -> str:\n kind = expression.kind\n exists = expression.args.pop(\"exists\", None)\n sql = super().create_sql(expression)\n\n like_property = expression.find(exp.LikeProperty)\n if like_property:\n ctas_expression = like_property.this\n else:\n ctas_expression = expression.expression\n\n table = expression.find(exp.Table)\n\n # Convert CTAS statement to SELECT .. INTO ..\n if kind == \"TABLE\" and ctas_expression:\n ctas_with = ctas_expression.args.get(\"with\")\n if ctas_with:\n ctas_with = ctas_with.pop()\n\n if isinstance(ctas_expression, exp.UNWRAPPED_QUERIES):\n ctas_expression = ctas_expression.subquery()\n\n select_into = exp.select(\"*\").from_(exp.alias_(ctas_expression, \"temp\", table=True))\n select_into.set(\"into\", exp.Into(this=table))\n select_into.set(\"with\", ctas_with)\n\n if like_property:\n select_into.limit(0, copy=False)\n\n sql = self.sql(select_into)\n\n if exists:\n identifier = self.sql(exp.Literal.string(exp.table_name(table) if table else \"\"))\n sql = self.sql(exp.Literal.string(sql))\n if kind == \"SCHEMA\":\n sql = f\"\"\"IF NOT EXISTS (SELECT * FROM information_schema.schemata WHERE schema_name = {identifier}) EXEC({sql})\"\"\"\n elif kind == \"TABLE\":\n assert table\n where = exp.and_(\n exp.column(\"table_name\").eq(table.name),\n exp.column(\"table_schema\").eq(table.db) if table.db else None,\n exp.column(\"table_catalog\").eq(table.catalog) if table.catalog else None,\n )\n sql = f\"\"\"IF NOT EXISTS (SELECT * FROM information_schema.tables WHERE {where}) EXEC({sql})\"\"\"\n elif kind == \"INDEX\":\n index = self.sql(exp.Literal.string(expression.this.text(\"this\")))\n sql = f\"\"\"IF NOT EXISTS (SELECT * FROM sys.indexes WHERE object_id = object_id({identifier}) AND name = {index}) EXEC({sql})\"\"\"\n elif expression.args.get(\"replace\"):\n sql = sql.replace(\"CREATE OR REPLACE \", \"CREATE OR ALTER \", 1)\n\n return self.prepend_ctes(expression, sql)\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def version_sql(self, expression: exp.Version) -> str:\n name = \"SYSTEM_TIME\" if expression.name == \"TIMESTAMP\" else expression.name\n this = f\"FOR {name}\"\n expr = expression.expression\n kind = expression.text(\"kind\")\n if kind in (\"FROM\", \"BETWEEN\"):\n args = expr.expressions\n sep = \"TO\" if kind == \"FROM\" else \"AND\"\n expr_sql = f\"{self.sql(seq_get(args, 0))} {sep} {self.sql(seq_get(args, 1))}\"\n else:\n expr_sql = self.sql(expr)\n\n expr_sql = f\" {expr_sql}\" if expr_sql else \"\"\n return f\"{this} {kind}{expr_sql}\"\n\n def returnsproperty_sql(self, expression: exp.ReturnsProperty) -> str:\n table = expression.args.get(\"table\")\n table = f\"{table} \" if table else \"\"\n return f\"RETURNS {table}{self.sql(expression, 'this')}\"\n\n def returning_sql(self, expression: exp.Returning) -> str:\n into = self.sql(expression, \"into\")\n into = self.seg(f\"INTO {into}\") if into else \"\"\n return f\"{self.seg('OUTPUT')} {self.expressions(expression, flat=True)}{into}\"\n\n def transaction_sql(self, expression: exp.Transaction) -> str:\n this = self.sql(expression, \"this\")\n this = f\" {this}\" if this else \"\"\n mark = self.sql(expression, \"mark\")\n mark = f\" WITH MARK {mark}\" if mark else \"\"\n return f\"BEGIN TRANSACTION{this}{mark}\"\n\n def commit_sql(self, expression: exp.Commit) -> str:\n this = self.sql(expression, \"this\")\n this = f\" {this}\" if this else \"\"\n durability = expression.args.get(\"durability\")\n durability = (\n f\" WITH (DELAYED_DURABILITY = {'ON' if durability else 'OFF'})\"\n if durability is not None\n else \"\"\n )\n return f\"COMMIT TRANSACTION{this}{durability}\"\n\n def rollback_sql(self, expression: exp.Rollback) -> str:\n this = self.sql(expression, \"this\")\n this = f\" {this}\" if this else \"\"\n return f\"ROLLBACK TRANSACTION{this}\"\n\n def identifier_sql(self, expression: exp.Identifier) -> str:\n identifier = super().identifier_sql(expression)\n\n if expression.args.get(\"global\"):\n identifier = f\"##{identifier}\"\n elif expression.args.get(\"temporary\"):\n identifier = f\"#{identifier}\"\n\n return identifier\n\n def constraint_sql(self, expression: exp.Constraint) -> str:\n this = self.sql(expression, \"this\")\n expressions = self.expressions(expression, flat=True, sep=\" \")\n return f\"CONSTRAINT {this} {expressions}\"\n\n def length_sql(self, expression: exp.Length) -> str:\n return self._uncast_text(expression, \"LEN\")\n\n def right_sql(self, expression: exp.Right) -> str:\n return self._uncast_text(expression, \"RIGHT\")\n\n def left_sql(self, expression: exp.Left) -> str:\n return self._uncast_text(expression, \"LEFT\")\n\n def _uncast_text(self, expression: exp.Expression, name: str) -> str:\n this = expression.this\n if isinstance(this, exp.Cast) and this.is_type(exp.DataType.Type.TEXT):\n this_sql = self.sql(this, \"this\")\n else:\n this_sql = self.sql(this)\n expression_sql = self.sql(expression, \"expression\")\n return self.func(name, this_sql, expression_sql if expression_sql else None)\n\n def partition_sql(self, expression: exp.Partition) -> str:\n return f\"WITH (PARTITIONS({self.expressions(expression, flat=True)}))\"\n\n def altertable_sql(self, expression: exp.AlterTable) -> str:\n action = seq_get(expression.args.get(\"actions\") or [], 0)\n if isinstance(action, exp.RenameTable):\n return f\"EXEC sp_rename '{self.sql(expression.this)}', '{action.this.name}'\"\n return super().altertable_sql(expression)\n",
"path": "sqlglot/dialects/tsql.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport datetime\nimport re\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import (\n Dialect,\n NormalizationStrategy,\n any_value_to_max_sql,\n date_delta_sql,\n datestrtodate_sql,\n generatedasidentitycolumnconstraint_sql,\n max_or_greatest,\n min_or_least,\n build_date_delta,\n rename_func,\n timestrtotime_sql,\n trim_sql,\n)\nfrom sqlglot.helper import seq_get\nfrom sqlglot.time import format_time\nfrom sqlglot.tokens import TokenType\n\nif t.TYPE_CHECKING:\n from sqlglot._typing import E\n\nFULL_FORMAT_TIME_MAPPING = {\n \"weekday\": \"%A\",\n \"dw\": \"%A\",\n \"w\": \"%A\",\n \"month\": \"%B\",\n \"mm\": \"%B\",\n \"m\": \"%B\",\n}\n\nDATE_DELTA_INTERVAL = {\n \"year\": \"year\",\n \"yyyy\": \"year\",\n \"yy\": \"year\",\n \"quarter\": \"quarter\",\n \"qq\": \"quarter\",\n \"q\": \"quarter\",\n \"month\": \"month\",\n \"mm\": \"month\",\n \"m\": \"month\",\n \"week\": \"week\",\n \"ww\": \"week\",\n \"wk\": \"week\",\n \"day\": \"day\",\n \"dd\": \"day\",\n \"d\": \"day\",\n}\n\n\nDATE_FMT_RE = re.compile(\"([dD]{1,2})|([mM]{1,2})|([yY]{1,4})|([hH]{1,2})|([sS]{1,2})\")\n\n# N = Numeric, C=Currency\nTRANSPILE_SAFE_NUMBER_FMT = {\"N\", \"C\"}\n\nDEFAULT_START_DATE = datetime.date(1900, 1, 1)\n\nBIT_TYPES = {exp.EQ, exp.NEQ, exp.Is, exp.In, exp.Select, exp.Alias}\n\n# Unsupported options:\n# - OPTIMIZE FOR ( @variable_name { UNKNOWN | = <literal_constant> } [ , ...n ] )\n# - TABLE HINT\nOPTIONS: parser.OPTIONS_TYPE = {\n **dict.fromkeys(\n (\n \"DISABLE_OPTIMIZED_PLAN_FORCING\",\n \"FAST\",\n \"IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX\",\n \"LABEL\",\n \"MAXDOP\",\n \"MAXRECURSION\",\n \"MAX_GRANT_PERCENT\",\n \"MIN_GRANT_PERCENT\",\n \"NO_PERFORMANCE_SPOOL\",\n \"QUERYTRACEON\",\n \"RECOMPILE\",\n ),\n tuple(),\n ),\n \"CONCAT\": (\"UNION\",),\n \"DISABLE\": (\"EXTERNALPUSHDOWN\", \"SCALEOUTEXECUTION\"),\n \"EXPAND\": (\"VIEWS\",),\n \"FORCE\": (\"EXTERNALPUSHDOWN\", \"ORDER\", \"SCALEOUTEXECUTION\"),\n \"HASH\": (\"GROUP\", \"JOIN\", \"UNION\"),\n \"KEEP\": (\"PLAN\",),\n \"KEEPFIXED\": (\"PLAN\",),\n \"LOOP\": (\"JOIN\",),\n \"MERGE\": (\"JOIN\", \"UNION\"),\n \"OPTIMIZE\": ((\"FOR\", \"UNKNOWN\"),),\n \"ORDER\": (\"GROUP\",),\n \"PARAMETERIZATION\": (\"FORCED\", \"SIMPLE\"),\n \"ROBUST\": (\"PLAN\",),\n \"USE\": (\"PLAN\",),\n}\n\nOPTIONS_THAT_REQUIRE_EQUAL = (\"MAX_GRANT_PERCENT\", \"MIN_GRANT_PERCENT\", \"LABEL\")\n\n\ndef _build_formatted_time(\n exp_class: t.Type[E], full_format_mapping: t.Optional[bool] = None\n) -> t.Callable[[t.List], E]:\n def _builder(args: t.List) -> E:\n assert len(args) == 2\n\n return exp_class(\n this=exp.cast(args[1], exp.DataType.Type.DATETIME),\n format=exp.Literal.string(\n format_time(\n args[0].name.lower(),\n (\n {**TSQL.TIME_MAPPING, **FULL_FORMAT_TIME_MAPPING}\n if full_format_mapping\n else TSQL.TIME_MAPPING\n ),\n )\n ),\n )\n\n return _builder\n\n\ndef _build_format(args: t.List) -> exp.NumberToStr | exp.TimeToStr:\n this = seq_get(args, 0)\n fmt = seq_get(args, 1)\n culture = seq_get(args, 2)\n\n number_fmt = fmt and (fmt.name in TRANSPILE_SAFE_NUMBER_FMT or not DATE_FMT_RE.search(fmt.name))\n\n if number_fmt:\n return exp.NumberToStr(this=this, format=fmt, culture=culture)\n\n if fmt:\n fmt = exp.Literal.string(\n format_time(fmt.name, TSQL.FORMAT_TIME_MAPPING)\n if len(fmt.name) == 1\n else format_time(fmt.name, TSQL.TIME_MAPPING)\n )\n\n return exp.TimeToStr(this=this, format=fmt, culture=culture)\n\n\ndef _build_eomonth(args: t.List) -> exp.LastDay:\n date = exp.TsOrDsToDate(this=seq_get(args, 0))\n month_lag = seq_get(args, 1)\n\n if month_lag is None:\n this: exp.Expression = date\n else:\n unit = DATE_DELTA_INTERVAL.get(\"month\")\n this = exp.DateAdd(this=date, expression=month_lag, unit=unit and exp.var(unit))\n\n return exp.LastDay(this=this)\n\n\ndef _build_hashbytes(args: t.List) -> exp.Expression:\n kind, data = args\n kind = kind.name.upper() if kind.is_string else \"\"\n\n if kind == \"MD5\":\n args.pop(0)\n return exp.MD5(this=data)\n if kind in (\"SHA\", \"SHA1\"):\n args.pop(0)\n return exp.SHA(this=data)\n if kind == \"SHA2_256\":\n return exp.SHA2(this=data, length=exp.Literal.number(256))\n if kind == \"SHA2_512\":\n return exp.SHA2(this=data, length=exp.Literal.number(512))\n\n return exp.func(\"HASHBYTES\", *args)\n\n\nDATEPART_ONLY_FORMATS = {\"DW\", \"HOUR\", \"QUARTER\"}\n\n\ndef _format_sql(self: TSQL.Generator, expression: exp.NumberToStr | exp.TimeToStr) -> str:\n fmt = expression.args[\"format\"]\n\n if not isinstance(expression, exp.NumberToStr):\n if fmt.is_string:\n mapped_fmt = format_time(fmt.name, TSQL.INVERSE_TIME_MAPPING)\n\n name = (mapped_fmt or \"\").upper()\n if name in DATEPART_ONLY_FORMATS:\n return self.func(\"DATEPART\", name, expression.this)\n\n fmt_sql = self.sql(exp.Literal.string(mapped_fmt))\n else:\n fmt_sql = self.format_time(expression) or self.sql(fmt)\n else:\n fmt_sql = self.sql(fmt)\n\n return self.func(\"FORMAT\", expression.this, fmt_sql, expression.args.get(\"culture\"))\n\n\ndef _string_agg_sql(self: TSQL.Generator, expression: exp.GroupConcat) -> str:\n this = expression.this\n distinct = expression.find(exp.Distinct)\n if distinct:\n # exp.Distinct can appear below an exp.Order or an exp.GroupConcat expression\n self.unsupported(\"T-SQL STRING_AGG doesn't support DISTINCT.\")\n this = distinct.pop().expressions[0]\n\n order = \"\"\n if isinstance(expression.this, exp.Order):\n if expression.this.this:\n this = expression.this.this.pop()\n order = f\" WITHIN GROUP ({self.sql(expression.this)[1:]})\" # Order has a leading space\n\n separator = expression.args.get(\"separator\") or exp.Literal.string(\",\")\n return f\"STRING_AGG({self.format_args(this, separator)}){order}\"\n\n\ndef _build_date_delta(\n exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None\n) -> t.Callable[[t.List], E]:\n def _builder(args: t.List) -> E:\n unit = seq_get(args, 0)\n if unit and unit_mapping:\n unit = exp.var(unit_mapping.get(unit.name.lower(), unit.name))\n\n start_date = seq_get(args, 1)\n if start_date and start_date.is_number:\n # Numeric types are valid DATETIME values\n if start_date.is_int:\n adds = DEFAULT_START_DATE + datetime.timedelta(days=int(start_date.this))\n start_date = exp.Literal.string(adds.strftime(\"%F\"))\n else:\n # We currently don't handle float values, i.e. they're not converted to equivalent DATETIMEs.\n # This is not a problem when generating T-SQL code, it is when transpiling to other dialects.\n return exp_class(this=seq_get(args, 2), expression=start_date, unit=unit)\n\n return exp_class(\n this=exp.TimeStrToTime(this=seq_get(args, 2)),\n expression=exp.TimeStrToTime(this=start_date),\n unit=unit,\n )\n\n return _builder\n\n\ndef qualify_derived_table_outputs(expression: exp.Expression) -> exp.Expression:\n \"\"\"Ensures all (unnamed) output columns are aliased for CTEs and Subqueries.\"\"\"\n alias = expression.args.get(\"alias\")\n\n if (\n isinstance(expression, (exp.CTE, exp.Subquery))\n and isinstance(alias, exp.TableAlias)\n and not alias.columns\n ):\n from sqlglot.optimizer.qualify_columns import qualify_outputs\n\n # We keep track of the unaliased column projection indexes instead of the expressions\n # themselves, because the latter are going to be replaced by new nodes when the aliases\n # are added and hence we won't be able to reach these newly added Alias parents\n query = expression.this\n unaliased_column_indexes = (\n i for i, c in enumerate(query.selects) if isinstance(c, exp.Column) and not c.alias\n )\n\n qualify_outputs(query)\n\n # Preserve the quoting information of columns for newly added Alias nodes\n query_selects = query.selects\n for select_index in unaliased_column_indexes:\n alias = query_selects[select_index]\n column = alias.this\n if isinstance(column.this, exp.Identifier):\n alias.args[\"alias\"].set(\"quoted\", column.this.quoted)\n\n return expression\n\n\n# https://learn.microsoft.com/en-us/sql/t-sql/functions/datetimefromparts-transact-sql?view=sql-server-ver16#syntax\ndef _build_datetimefromparts(args: t.List) -> exp.TimestampFromParts:\n return exp.TimestampFromParts(\n year=seq_get(args, 0),\n month=seq_get(args, 1),\n day=seq_get(args, 2),\n hour=seq_get(args, 3),\n min=seq_get(args, 4),\n sec=seq_get(args, 5),\n milli=seq_get(args, 6),\n )\n\n\n# https://learn.microsoft.com/en-us/sql/t-sql/functions/timefromparts-transact-sql?view=sql-server-ver16#syntax\ndef _build_timefromparts(args: t.List) -> exp.TimeFromParts:\n return exp.TimeFromParts(\n hour=seq_get(args, 0),\n min=seq_get(args, 1),\n sec=seq_get(args, 2),\n fractions=seq_get(args, 3),\n precision=seq_get(args, 4),\n )\n\n\ndef _build_with_arg_as_text(\n klass: t.Type[exp.Expression],\n) -> t.Callable[[t.List[exp.Expression]], exp.Expression]:\n def _parse(args: t.List[exp.Expression]) -> exp.Expression:\n this = seq_get(args, 0)\n\n if this and not this.is_string:\n this = exp.cast(this, exp.DataType.Type.TEXT)\n\n expression = seq_get(args, 1)\n kwargs = {\"this\": this}\n\n if expression:\n kwargs[\"expression\"] = expression\n\n return klass(**kwargs)\n\n return _parse\n\n\ndef _json_extract_sql(\n self: TSQL.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar\n) -> str:\n json_query = self.func(\"JSON_QUERY\", expression.this, expression.expression)\n json_value = self.func(\"JSON_VALUE\", expression.this, expression.expression)\n return self.func(\"ISNULL\", json_query, json_value)\n\n\nclass TSQL(Dialect):\n NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE\n TIME_FORMAT = \"'yyyy-mm-dd hh:mm:ss'\"\n SUPPORTS_SEMI_ANTI_JOIN = False\n LOG_BASE_FIRST = False\n TYPED_DIVISION = True\n CONCAT_COALESCE = True\n\n TIME_MAPPING = {\n \"year\": \"%Y\",\n \"dayofyear\": \"%j\",\n \"day\": \"%d\",\n \"dy\": \"%d\",\n \"y\": \"%Y\",\n \"week\": \"%W\",\n \"ww\": \"%W\",\n \"wk\": \"%W\",\n \"hour\": \"%h\",\n \"hh\": \"%I\",\n \"minute\": \"%M\",\n \"mi\": \"%M\",\n \"n\": \"%M\",\n \"second\": \"%S\",\n \"ss\": \"%S\",\n \"s\": \"%-S\",\n \"millisecond\": \"%f\",\n \"ms\": \"%f\",\n \"weekday\": \"%W\",\n \"dw\": \"%W\",\n \"month\": \"%m\",\n \"mm\": \"%M\",\n \"m\": \"%-M\",\n \"Y\": \"%Y\",\n \"YYYY\": \"%Y\",\n \"YY\": \"%y\",\n \"MMMM\": \"%B\",\n \"MMM\": \"%b\",\n \"MM\": \"%m\",\n \"M\": \"%-m\",\n \"dddd\": \"%A\",\n \"dd\": \"%d\",\n \"d\": \"%-d\",\n \"HH\": \"%H\",\n \"H\": \"%-H\",\n \"h\": \"%-I\",\n \"S\": \"%f\",\n \"yyyy\": \"%Y\",\n \"yy\": \"%y\",\n }\n\n CONVERT_FORMAT_MAPPING = {\n \"0\": \"%b %d %Y %-I:%M%p\",\n \"1\": \"%m/%d/%y\",\n \"2\": \"%y.%m.%d\",\n \"3\": \"%d/%m/%y\",\n \"4\": \"%d.%m.%y\",\n \"5\": \"%d-%m-%y\",\n \"6\": \"%d %b %y\",\n \"7\": \"%b %d, %y\",\n \"8\": \"%H:%M:%S\",\n \"9\": \"%b %d %Y %-I:%M:%S:%f%p\",\n \"10\": \"mm-dd-yy\",\n \"11\": \"yy/mm/dd\",\n \"12\": \"yymmdd\",\n \"13\": \"%d %b %Y %H:%M:ss:%f\",\n \"14\": \"%H:%M:%S:%f\",\n \"20\": \"%Y-%m-%d %H:%M:%S\",\n \"21\": \"%Y-%m-%d %H:%M:%S.%f\",\n \"22\": \"%m/%d/%y %-I:%M:%S %p\",\n \"23\": \"%Y-%m-%d\",\n \"24\": \"%H:%M:%S\",\n \"25\": \"%Y-%m-%d %H:%M:%S.%f\",\n \"100\": \"%b %d %Y %-I:%M%p\",\n \"101\": \"%m/%d/%Y\",\n \"102\": \"%Y.%m.%d\",\n \"103\": \"%d/%m/%Y\",\n \"104\": \"%d.%m.%Y\",\n \"105\": \"%d-%m-%Y\",\n \"106\": \"%d %b %Y\",\n \"107\": \"%b %d, %Y\",\n \"108\": \"%H:%M:%S\",\n \"109\": \"%b %d %Y %-I:%M:%S:%f%p\",\n \"110\": \"%m-%d-%Y\",\n \"111\": \"%Y/%m/%d\",\n \"112\": \"%Y%m%d\",\n \"113\": \"%d %b %Y %H:%M:%S:%f\",\n \"114\": \"%H:%M:%S:%f\",\n \"120\": \"%Y-%m-%d %H:%M:%S\",\n \"121\": \"%Y-%m-%d %H:%M:%S.%f\",\n }\n\n FORMAT_TIME_MAPPING = {\n \"y\": \"%B %Y\",\n \"d\": \"%m/%d/%Y\",\n \"H\": \"%-H\",\n \"h\": \"%-I\",\n \"s\": \"%Y-%m-%d %H:%M:%S\",\n \"D\": \"%A,%B,%Y\",\n \"f\": \"%A,%B,%Y %-I:%M %p\",\n \"F\": \"%A,%B,%Y %-I:%M:%S %p\",\n \"g\": \"%m/%d/%Y %-I:%M %p\",\n \"G\": \"%m/%d/%Y %-I:%M:%S %p\",\n \"M\": \"%B %-d\",\n \"m\": \"%B %-d\",\n \"O\": \"%Y-%m-%dT%H:%M:%S\",\n \"u\": \"%Y-%M-%D %H:%M:%S%z\",\n \"U\": \"%A, %B %D, %Y %H:%M:%S%z\",\n \"T\": \"%-I:%M:%S %p\",\n \"t\": \"%-I:%M\",\n \"Y\": \"%a %Y\",\n }\n\n class Tokenizer(tokens.Tokenizer):\n IDENTIFIERS = [(\"[\", \"]\"), '\"']\n QUOTES = [\"'\", '\"']\n HEX_STRINGS = [(\"0x\", \"\"), (\"0X\", \"\")]\n VAR_SINGLE_TOKENS = {\"@\", \"$\", \"#\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"DATETIME2\": TokenType.DATETIME,\n \"DATETIMEOFFSET\": TokenType.TIMESTAMPTZ,\n \"DECLARE\": TokenType.COMMAND,\n \"EXEC\": TokenType.COMMAND,\n \"IMAGE\": TokenType.IMAGE,\n \"MONEY\": TokenType.MONEY,\n \"NTEXT\": TokenType.TEXT,\n \"PRINT\": TokenType.COMMAND,\n \"PROC\": TokenType.PROCEDURE,\n \"REAL\": TokenType.FLOAT,\n \"ROWVERSION\": TokenType.ROWVERSION,\n \"SMALLDATETIME\": TokenType.DATETIME,\n \"SMALLMONEY\": TokenType.SMALLMONEY,\n \"SQL_VARIANT\": TokenType.VARIANT,\n \"TOP\": TokenType.TOP,\n \"TIMESTAMP\": TokenType.ROWVERSION,\n \"UNIQUEIDENTIFIER\": TokenType.UNIQUEIDENTIFIER,\n \"UPDATE STATISTICS\": TokenType.COMMAND,\n \"XML\": TokenType.XML,\n \"OUTPUT\": TokenType.RETURNING,\n \"SYSTEM_USER\": TokenType.CURRENT_USER,\n \"FOR SYSTEM_TIME\": TokenType.TIMESTAMP_SNAPSHOT,\n \"OPTION\": TokenType.OPTION,\n }\n\n class Parser(parser.Parser):\n SET_REQUIRES_ASSIGNMENT_DELIMITER = False\n LOG_DEFAULTS_TO_LN = True\n ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False\n STRING_ALIASES = True\n NO_PAREN_IF_COMMANDS = False\n\n QUERY_MODIFIER_PARSERS = {\n **parser.Parser.QUERY_MODIFIER_PARSERS,\n TokenType.OPTION: lambda self: (\"options\", self._parse_options()),\n }\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"CHARINDEX\": lambda args: exp.StrPosition(\n this=seq_get(args, 1),\n substr=seq_get(args, 0),\n position=seq_get(args, 2),\n ),\n \"DATEADD\": build_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),\n \"DATEDIFF\": _build_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),\n \"DATENAME\": _build_formatted_time(exp.TimeToStr, full_format_mapping=True),\n \"DATEPART\": _build_formatted_time(exp.TimeToStr),\n \"DATETIMEFROMPARTS\": _build_datetimefromparts,\n \"EOMONTH\": _build_eomonth,\n \"FORMAT\": _build_format,\n \"GETDATE\": exp.CurrentTimestamp.from_arg_list,\n \"HASHBYTES\": _build_hashbytes,\n \"ISNULL\": exp.Coalesce.from_arg_list,\n \"JSON_QUERY\": parser.build_extract_json_with_path(exp.JSONExtract),\n \"JSON_VALUE\": parser.build_extract_json_with_path(exp.JSONExtractScalar),\n \"LEN\": _build_with_arg_as_text(exp.Length),\n \"LEFT\": _build_with_arg_as_text(exp.Left),\n \"RIGHT\": _build_with_arg_as_text(exp.Right),\n \"REPLICATE\": exp.Repeat.from_arg_list,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n \"SYSDATETIME\": exp.CurrentTimestamp.from_arg_list,\n \"SUSER_NAME\": exp.CurrentUser.from_arg_list,\n \"SUSER_SNAME\": exp.CurrentUser.from_arg_list,\n \"SYSTEM_USER\": exp.CurrentUser.from_arg_list,\n \"TIMEFROMPARTS\": _build_timefromparts,\n }\n\n JOIN_HINTS = {\"LOOP\", \"HASH\", \"MERGE\", \"REMOTE\"}\n\n RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {\n TokenType.TABLE,\n *parser.Parser.TYPE_TOKENS,\n }\n\n STATEMENT_PARSERS = {\n **parser.Parser.STATEMENT_PARSERS,\n TokenType.END: lambda self: self._parse_command(),\n }\n\n def _parse_options(self) -> t.Optional[t.List[exp.Expression]]:\n if not self._match(TokenType.OPTION):\n return None\n\n def _parse_option() -> t.Optional[exp.Expression]:\n option = self._parse_var_from_options(OPTIONS)\n if not option:\n return None\n\n self._match(TokenType.EQ)\n return self.expression(\n exp.QueryOption, this=option, expression=self._parse_primary_or_var()\n )\n\n return self._parse_wrapped_csv(_parse_option)\n\n def _parse_projections(self) -> t.List[exp.Expression]:\n \"\"\"\n T-SQL supports the syntax alias = expression in the SELECT's projection list,\n so we transform all parsed Selects to convert their EQ projections into Aliases.\n\n See: https://learn.microsoft.com/en-us/sql/t-sql/queries/select-clause-transact-sql?view=sql-server-ver16#syntax\n \"\"\"\n return [\n (\n exp.alias_(projection.expression, projection.this.this, copy=False)\n if isinstance(projection, exp.EQ) and isinstance(projection.this, exp.Column)\n else projection\n )\n for projection in super()._parse_projections()\n ]\n\n def _parse_commit_or_rollback(self) -> exp.Commit | exp.Rollback:\n \"\"\"Applies to SQL Server and Azure SQL Database\n COMMIT [ { TRAN | TRANSACTION }\n [ transaction_name | @tran_name_variable ] ]\n [ WITH ( DELAYED_DURABILITY = { OFF | ON } ) ]\n\n ROLLBACK { TRAN | TRANSACTION }\n [ transaction_name | @tran_name_variable\n | savepoint_name | @savepoint_variable ]\n \"\"\"\n rollback = self._prev.token_type == TokenType.ROLLBACK\n\n self._match_texts((\"TRAN\", \"TRANSACTION\"))\n this = self._parse_id_var()\n\n if rollback:\n return self.expression(exp.Rollback, this=this)\n\n durability = None\n if self._match_pair(TokenType.WITH, TokenType.L_PAREN):\n self._match_text_seq(\"DELAYED_DURABILITY\")\n self._match(TokenType.EQ)\n\n if self._match_text_seq(\"OFF\"):\n durability = False\n else:\n self._match(TokenType.ON)\n durability = True\n\n self._match_r_paren()\n\n return self.expression(exp.Commit, this=this, durability=durability)\n\n def _parse_transaction(self) -> exp.Transaction | exp.Command:\n \"\"\"Applies to SQL Server and Azure SQL Database\n BEGIN { TRAN | TRANSACTION }\n [ { transaction_name | @tran_name_variable }\n [ WITH MARK [ 'description' ] ]\n ]\n \"\"\"\n if self._match_texts((\"TRAN\", \"TRANSACTION\")):\n transaction = self.expression(exp.Transaction, this=self._parse_id_var())\n if self._match_text_seq(\"WITH\", \"MARK\"):\n transaction.set(\"mark\", self._parse_string())\n\n return transaction\n\n return self._parse_as_command(self._prev)\n\n def _parse_returns(self) -> exp.ReturnsProperty:\n table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)\n returns = super()._parse_returns()\n returns.set(\"table\", table)\n return returns\n\n def _parse_convert(\n self, strict: bool, safe: t.Optional[bool] = None\n ) -> t.Optional[exp.Expression]:\n this = self._parse_types()\n self._match(TokenType.COMMA)\n args = [this, *self._parse_csv(self._parse_conjunction)]\n convert = exp.Convert.from_arg_list(args)\n convert.set(\"safe\", safe)\n convert.set(\"strict\", strict)\n return convert\n\n def _parse_user_defined_function(\n self, kind: t.Optional[TokenType] = None\n ) -> t.Optional[exp.Expression]:\n this = super()._parse_user_defined_function(kind=kind)\n\n if (\n kind == TokenType.FUNCTION\n or isinstance(this, exp.UserDefinedFunction)\n or self._match(TokenType.ALIAS, advance=False)\n ):\n return this\n\n expressions = self._parse_csv(self._parse_function_parameter)\n return self.expression(exp.UserDefinedFunction, this=this, expressions=expressions)\n\n def _parse_id_var(\n self,\n any_token: bool = True,\n tokens: t.Optional[t.Collection[TokenType]] = None,\n ) -> t.Optional[exp.Expression]:\n is_temporary = self._match(TokenType.HASH)\n is_global = is_temporary and self._match(TokenType.HASH)\n\n this = super()._parse_id_var(any_token=any_token, tokens=tokens)\n if this:\n if is_global:\n this.set(\"global\", True)\n elif is_temporary:\n this.set(\"temporary\", True)\n\n return this\n\n def _parse_create(self) -> exp.Create | exp.Command:\n create = super()._parse_create()\n\n if isinstance(create, exp.Create):\n table = create.this.this if isinstance(create.this, exp.Schema) else create.this\n if isinstance(table, exp.Table) and table.this.args.get(\"temporary\"):\n if not create.args.get(\"properties\"):\n create.set(\"properties\", exp.Properties(expressions=[]))\n\n create.args[\"properties\"].append(\"expressions\", exp.TemporaryProperty())\n\n return create\n\n def _parse_if(self) -> t.Optional[exp.Expression]:\n index = self._index\n\n if self._match_text_seq(\"OBJECT_ID\"):\n self._parse_wrapped_csv(self._parse_string)\n if self._match_text_seq(\"IS\", \"NOT\", \"NULL\") and self._match(TokenType.DROP):\n return self._parse_drop(exists=True)\n self._retreat(index)\n\n return super()._parse_if()\n\n def _parse_unique(self) -> exp.UniqueColumnConstraint:\n if self._match_texts((\"CLUSTERED\", \"NONCLUSTERED\")):\n this = self.CONSTRAINT_PARSERS[self._prev.text.upper()](self)\n else:\n this = self._parse_schema(self._parse_id_var(any_token=False))\n\n return self.expression(exp.UniqueColumnConstraint, this=this)\n\n def _parse_partition(self) -> t.Optional[exp.Partition]:\n if not self._match_text_seq(\"WITH\", \"(\", \"PARTITIONS\"):\n return None\n\n def parse_range():\n low = self._parse_bitwise()\n high = self._parse_bitwise() if self._match_text_seq(\"TO\") else None\n\n return (\n self.expression(exp.PartitionRange, this=low, expression=high) if high else low\n )\n\n partition = self.expression(\n exp.Partition, expressions=self._parse_wrapped_csv(parse_range)\n )\n\n self._match_r_paren()\n\n return partition\n\n class Generator(generator.Generator):\n LIMIT_IS_TOP = True\n QUERY_HINTS = False\n RETURNING_END = False\n NVL2_SUPPORTED = False\n ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False\n LIMIT_FETCH = \"FETCH\"\n COMPUTED_COLUMN_WITH_TYPE = False\n CTE_RECURSIVE_KEYWORD_REQUIRED = False\n ENSURE_BOOLS = True\n NULL_ORDERING_SUPPORTED = None\n SUPPORTS_SINGLE_ARG_CONCAT = False\n TABLESAMPLE_SEED_KEYWORD = \"REPEATABLE\"\n SUPPORTS_SELECT_INTO = True\n JSON_PATH_BRACKETED_KEY_SUPPORTED = False\n SUPPORTS_TO_NUMBER = False\n OUTER_UNION_MODIFIERS = False\n COPY_PARAMS_EQ_REQUIRED = True\n\n EXPRESSIONS_WITHOUT_NESTED_CTES = {\n exp.Delete,\n exp.Insert,\n exp.Merge,\n exp.Select,\n exp.Subquery,\n exp.Union,\n exp.Update,\n }\n\n SUPPORTED_JSON_PATH_PARTS = {\n exp.JSONPathKey,\n exp.JSONPathRoot,\n exp.JSONPathSubscript,\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.BOOLEAN: \"BIT\",\n exp.DataType.Type.DECIMAL: \"NUMERIC\",\n exp.DataType.Type.DATETIME: \"DATETIME2\",\n exp.DataType.Type.DOUBLE: \"FLOAT\",\n exp.DataType.Type.INT: \"INTEGER\",\n exp.DataType.Type.TEXT: \"VARCHAR(MAX)\",\n exp.DataType.Type.TIMESTAMP: \"DATETIME2\",\n exp.DataType.Type.TIMESTAMPTZ: \"DATETIMEOFFSET\",\n exp.DataType.Type.VARIANT: \"SQL_VARIANT\",\n exp.DataType.Type.ROWVERSION: \"ROWVERSION\",\n }\n\n TYPE_MAPPING.pop(exp.DataType.Type.NCHAR)\n TYPE_MAPPING.pop(exp.DataType.Type.NVARCHAR)\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.AnyValue: any_value_to_max_sql,\n exp.ArrayToString: rename_func(\"STRING_AGG\"),\n exp.AutoIncrementColumnConstraint: lambda *_: \"IDENTITY\",\n exp.DateAdd: date_delta_sql(\"DATEADD\"),\n exp.DateDiff: date_delta_sql(\"DATEDIFF\"),\n exp.CTE: transforms.preprocess([qualify_derived_table_outputs]),\n exp.CurrentDate: rename_func(\"GETDATE\"),\n exp.CurrentTimestamp: rename_func(\"GETDATE\"),\n exp.DateStrToDate: datestrtodate_sql,\n exp.Extract: rename_func(\"DATEPART\"),\n exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,\n exp.GroupConcat: _string_agg_sql,\n exp.If: rename_func(\"IIF\"),\n exp.JSONExtract: _json_extract_sql,\n exp.JSONExtractScalar: _json_extract_sql,\n exp.LastDay: lambda self, e: self.func(\"EOMONTH\", e.this),\n exp.Max: max_or_greatest,\n exp.MD5: lambda self, e: self.func(\"HASHBYTES\", exp.Literal.string(\"MD5\"), e.this),\n exp.Min: min_or_least,\n exp.NumberToStr: _format_sql,\n exp.ParseJSON: lambda self, e: self.sql(e, \"this\"),\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.eliminate_semi_and_anti_joins,\n transforms.eliminate_qualify,\n ]\n ),\n exp.StrPosition: lambda self, e: self.func(\n \"CHARINDEX\", e.args.get(\"substr\"), e.this, e.args.get(\"position\")\n ),\n exp.Subquery: transforms.preprocess([qualify_derived_table_outputs]),\n exp.SHA: lambda self, e: self.func(\"HASHBYTES\", exp.Literal.string(\"SHA1\"), e.this),\n exp.SHA2: lambda self, e: self.func(\n \"HASHBYTES\", exp.Literal.string(f\"SHA2_{e.args.get('length', 256)}\"), e.this\n ),\n exp.TemporaryProperty: lambda self, e: \"\",\n exp.TimeStrToTime: timestrtotime_sql,\n exp.TimeToStr: _format_sql,\n exp.Trim: trim_sql,\n exp.TsOrDsAdd: date_delta_sql(\"DATEADD\", cast=True),\n exp.TsOrDsDiff: date_delta_sql(\"DATEDIFF\"),\n }\n\n TRANSFORMS.pop(exp.ReturnsProperty)\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def select_sql(self, expression: exp.Select) -> str:\n if expression.args.get(\"offset\"):\n if not expression.args.get(\"order\"):\n # ORDER BY is required in order to use OFFSET in a query, so we use\n # a noop order by, since we don't really care about the order.\n # See: https://www.microsoftpressstore.com/articles/article.aspx?p=2314819\n expression.order_by(exp.select(exp.null()).subquery(), copy=False)\n\n limit = expression.args.get(\"limit\")\n if isinstance(limit, exp.Limit):\n # TOP and OFFSET can't be combined, we need use FETCH instead of TOP\n # we replace here because otherwise TOP would be generated in select_sql\n limit.replace(exp.Fetch(direction=\"FIRST\", count=limit.expression))\n\n return super().select_sql(expression)\n\n def convert_sql(self, expression: exp.Convert) -> str:\n name = \"TRY_CONVERT\" if expression.args.get(\"safe\") else \"CONVERT\"\n return self.func(\n name, expression.this, expression.expression, expression.args.get(\"style\")\n )\n\n def queryoption_sql(self, expression: exp.QueryOption) -> str:\n option = self.sql(expression, \"this\")\n value = self.sql(expression, \"expression\")\n if value:\n optional_equal_sign = \"= \" if option in OPTIONS_THAT_REQUIRE_EQUAL else \"\"\n return f\"{option} {optional_equal_sign}{value}\"\n return option\n\n def lateral_op(self, expression: exp.Lateral) -> str:\n cross_apply = expression.args.get(\"cross_apply\")\n if cross_apply is True:\n return \"CROSS APPLY\"\n if cross_apply is False:\n return \"OUTER APPLY\"\n\n # TODO: perhaps we can check if the parent is a Join and transpile it appropriately\n self.unsupported(\"LATERAL clause is not supported.\")\n return \"LATERAL\"\n\n def timefromparts_sql(self, expression: exp.TimeFromParts) -> str:\n nano = expression.args.get(\"nano\")\n if nano is not None:\n nano.pop()\n self.unsupported(\"Specifying nanoseconds is not supported in TIMEFROMPARTS.\")\n\n if expression.args.get(\"fractions\") is None:\n expression.set(\"fractions\", exp.Literal.number(0))\n if expression.args.get(\"precision\") is None:\n expression.set(\"precision\", exp.Literal.number(0))\n\n return rename_func(\"TIMEFROMPARTS\")(self, expression)\n\n def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:\n zone = expression.args.get(\"zone\")\n if zone is not None:\n zone.pop()\n self.unsupported(\"Time zone is not supported in DATETIMEFROMPARTS.\")\n\n nano = expression.args.get(\"nano\")\n if nano is not None:\n nano.pop()\n self.unsupported(\"Specifying nanoseconds is not supported in DATETIMEFROMPARTS.\")\n\n if expression.args.get(\"milli\") is None:\n expression.set(\"milli\", exp.Literal.number(0))\n\n return rename_func(\"DATETIMEFROMPARTS\")(self, expression)\n\n def setitem_sql(self, expression: exp.SetItem) -> str:\n this = expression.this\n if isinstance(this, exp.EQ) and not isinstance(this.left, exp.Parameter):\n # T-SQL does not use '=' in SET command, except when the LHS is a variable.\n return f\"{self.sql(this.left)} {self.sql(this.right)}\"\n\n return super().setitem_sql(expression)\n\n def boolean_sql(self, expression: exp.Boolean) -> str:\n if type(expression.parent) in BIT_TYPES:\n return \"1\" if expression.this else \"0\"\n\n return \"(1 = 1)\" if expression.this else \"(1 = 0)\"\n\n def is_sql(self, expression: exp.Is) -> str:\n if isinstance(expression.expression, exp.Boolean):\n return self.binary(expression, \"=\")\n return self.binary(expression, \"IS\")\n\n def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:\n sql = self.sql(expression, \"this\")\n properties = expression.args.get(\"properties\")\n\n if sql[:1] != \"#\" and any(\n isinstance(prop, exp.TemporaryProperty)\n for prop in (properties.expressions if properties else [])\n ):\n sql = f\"[#{sql[1:]}\" if sql.startswith(\"[\") else f\"#{sql}\"\n\n return sql\n\n def create_sql(self, expression: exp.Create) -> str:\n kind = expression.kind\n exists = expression.args.pop(\"exists\", None)\n sql = super().create_sql(expression)\n\n like_property = expression.find(exp.LikeProperty)\n if like_property:\n ctas_expression = like_property.this\n else:\n ctas_expression = expression.expression\n\n table = expression.find(exp.Table)\n\n # Convert CTAS statement to SELECT .. INTO ..\n if kind == \"TABLE\" and ctas_expression:\n ctas_with = ctas_expression.args.get(\"with\")\n if ctas_with:\n ctas_with = ctas_with.pop()\n\n if isinstance(ctas_expression, exp.UNWRAPPED_QUERIES):\n ctas_expression = ctas_expression.subquery()\n\n select_into = exp.select(\"*\").from_(exp.alias_(ctas_expression, \"temp\", table=True))\n select_into.set(\"into\", exp.Into(this=table))\n select_into.set(\"with\", ctas_with)\n\n if like_property:\n select_into.limit(0, copy=False)\n\n sql = self.sql(select_into)\n\n if exists:\n identifier = self.sql(exp.Literal.string(exp.table_name(table) if table else \"\"))\n sql = self.sql(exp.Literal.string(sql))\n if kind == \"SCHEMA\":\n sql = f\"\"\"IF NOT EXISTS (SELECT * FROM information_schema.schemata WHERE schema_name = {identifier}) EXEC({sql})\"\"\"\n elif kind == \"TABLE\":\n assert table\n where = exp.and_(\n exp.column(\"table_name\").eq(table.name),\n exp.column(\"table_schema\").eq(table.db) if table.db else None,\n exp.column(\"table_catalog\").eq(table.catalog) if table.catalog else None,\n )\n sql = f\"\"\"IF NOT EXISTS (SELECT * FROM information_schema.tables WHERE {where}) EXEC({sql})\"\"\"\n elif kind == \"INDEX\":\n index = self.sql(exp.Literal.string(expression.this.text(\"this\")))\n sql = f\"\"\"IF NOT EXISTS (SELECT * FROM sys.indexes WHERE object_id = object_id({identifier}) AND name = {index}) EXEC({sql})\"\"\"\n elif expression.args.get(\"replace\"):\n sql = sql.replace(\"CREATE OR REPLACE \", \"CREATE OR ALTER \", 1)\n\n return self.prepend_ctes(expression, sql)\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def version_sql(self, expression: exp.Version) -> str:\n name = \"SYSTEM_TIME\" if expression.name == \"TIMESTAMP\" else expression.name\n this = f\"FOR {name}\"\n expr = expression.expression\n kind = expression.text(\"kind\")\n if kind in (\"FROM\", \"BETWEEN\"):\n args = expr.expressions\n sep = \"TO\" if kind == \"FROM\" else \"AND\"\n expr_sql = f\"{self.sql(seq_get(args, 0))} {sep} {self.sql(seq_get(args, 1))}\"\n else:\n expr_sql = self.sql(expr)\n\n expr_sql = f\" {expr_sql}\" if expr_sql else \"\"\n return f\"{this} {kind}{expr_sql}\"\n\n def returnsproperty_sql(self, expression: exp.ReturnsProperty) -> str:\n table = expression.args.get(\"table\")\n table = f\"{table} \" if table else \"\"\n return f\"RETURNS {table}{self.sql(expression, 'this')}\"\n\n def returning_sql(self, expression: exp.Returning) -> str:\n into = self.sql(expression, \"into\")\n into = self.seg(f\"INTO {into}\") if into else \"\"\n return f\"{self.seg('OUTPUT')} {self.expressions(expression, flat=True)}{into}\"\n\n def transaction_sql(self, expression: exp.Transaction) -> str:\n this = self.sql(expression, \"this\")\n this = f\" {this}\" if this else \"\"\n mark = self.sql(expression, \"mark\")\n mark = f\" WITH MARK {mark}\" if mark else \"\"\n return f\"BEGIN TRANSACTION{this}{mark}\"\n\n def commit_sql(self, expression: exp.Commit) -> str:\n this = self.sql(expression, \"this\")\n this = f\" {this}\" if this else \"\"\n durability = expression.args.get(\"durability\")\n durability = (\n f\" WITH (DELAYED_DURABILITY = {'ON' if durability else 'OFF'})\"\n if durability is not None\n else \"\"\n )\n return f\"COMMIT TRANSACTION{this}{durability}\"\n\n def rollback_sql(self, expression: exp.Rollback) -> str:\n this = self.sql(expression, \"this\")\n this = f\" {this}\" if this else \"\"\n return f\"ROLLBACK TRANSACTION{this}\"\n\n def identifier_sql(self, expression: exp.Identifier) -> str:\n identifier = super().identifier_sql(expression)\n\n if expression.args.get(\"global\"):\n identifier = f\"##{identifier}\"\n elif expression.args.get(\"temporary\"):\n identifier = f\"#{identifier}\"\n\n return identifier\n\n def constraint_sql(self, expression: exp.Constraint) -> str:\n this = self.sql(expression, \"this\")\n expressions = self.expressions(expression, flat=True, sep=\" \")\n return f\"CONSTRAINT {this} {expressions}\"\n\n def length_sql(self, expression: exp.Length) -> str:\n return self._uncast_text(expression, \"LEN\")\n\n def right_sql(self, expression: exp.Right) -> str:\n return self._uncast_text(expression, \"RIGHT\")\n\n def left_sql(self, expression: exp.Left) -> str:\n return self._uncast_text(expression, \"LEFT\")\n\n def _uncast_text(self, expression: exp.Expression, name: str) -> str:\n this = expression.this\n if isinstance(this, exp.Cast) and this.is_type(exp.DataType.Type.TEXT):\n this_sql = self.sql(this, \"this\")\n else:\n this_sql = self.sql(this)\n expression_sql = self.sql(expression, \"expression\")\n return self.func(name, this_sql, expression_sql if expression_sql else None)\n\n def partition_sql(self, expression: exp.Partition) -> str:\n return f\"WITH (PARTITIONS({self.expressions(expression, flat=True)}))\"\n\n def altertable_sql(self, expression: exp.AlterTable) -> str:\n action = seq_get(expression.args.get(\"actions\") or [], 0)\n if isinstance(action, exp.RenameTable):\n return f\"EXEC sp_rename '{self.sql(expression.this)}', '{action.this.name}'\"\n return super().altertable_sql(expression)\n",
"path": "sqlglot/dialects/tsql.py"
}
] | diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 54fc431874..2e8ddf21da 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -913,7 +913,7 @@ def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> st
isinstance(prop, exp.TemporaryProperty)
for prop in (properties.expressions if properties else [])
):
- sql = f"#{sql}"
+ sql = f"[#{sql[1:]}" if sql.startswith("[") else f"#{sql}"
return sql
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 5550a40428..1538d47ae3 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -780,6 +780,14 @@ def test_ddl(self):
"CREATE PROCEDURE foo AS BEGIN DELETE FROM bla WHERE foo < CURRENT_TIMESTAMP - 7 END",
"CREATE PROCEDURE foo AS BEGIN DELETE FROM bla WHERE foo < GETDATE() - 7 END",
)
+
+ self.validate_all(
+ "CREATE TABLE [#temptest] (name VARCHAR)",
+ read={
+ "duckdb": "CREATE TEMPORARY TABLE 'temptest' (name VARCHAR)",
+ "tsql": "CREATE TABLE [#temptest] (name VARCHAR)",
+ },
+ )
self.validate_all(
"CREATE TABLE tbl (id INTEGER IDENTITY PRIMARY KEY)",
read={
|
cornellius-gp__gpytorch-2267 | [Bug] Runtime error for indices not on the same device when running VNNGP example
# 🐛 Bug
When running the VNNGP example, once we hit output = model(x=None) it will report: `RuntimeError: indices should be either on cpu or on the same device as the indexed tensor (cpu)`.
## To reproduce
** Code snippet to reproduce **
Simply run 04_Variational_and_Approximate_GPs/VNNGP.ipynb
** Stack trace/error message **
```
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[5], line 20
18 for i in minibatch_iter:
19 optimizer.zero_grad()
---> 20 output = model(x=None)
21 # Obtain the indices for mini-batch data
22 current_training_indices = model.variational_strategy.current_training_indices
Cell In[4], line 34, in GPModel.__call__(self, x, prior, **kwargs)
32 if x.dim() == 1:
33 x = x.unsqueeze(-1)
---> 34 return self.variational_strategy(x=x, prior=False, **kwargs)
File ~\AppData\Local\mambaforge\envs\torch\lib\site-packages\gpytorch\variational\nearest_neighbor_variational_strategy.py:129, in NNVariationalStrategy.__call__(self, x, prior, **kwargs)
127 if self.training:
128 self._clear_cache()
--> 129 return self.forward(x, self.inducing_points, None, None)
130 else:
131 # Ensure inducing_points and x are the same size
132 inducing_points = self.inducing_points
File ~\AppData\Local\mambaforge\envs\torch\lib\site-packages\gpytorch\variational\nearest_neighbor_variational_strategy.py:168, in NNVariationalStrategy.forward(self, x, inducing_points, inducing_values, variational_inducing_covar, **kwargs)
165 if torch.cuda.is_available():
166 kl_indices = kl_indices.cuda()
--> 168 kl = self._kl_divergence(kl_indices)
169 add_to_cache(self, "kl_divergence_memo", kl)
171 return MultivariateNormal(predictive_mean, DiagLinearOperator(predictive_var))
File ~\AppData\Local\mambaforge\envs\torch\lib\site-packages\gpytorch\variational\nearest_neighbor_variational_strategy.py:325, in NNVariationalStrategy._kl_divergence(self, kl_indices, compute_full, batch_size)
323 kl = self._firstk_kl_helper() * self.M / self.k
324 else:
--> 325 kl = self._stochastic_kl_helper(kl_indices) * self.M / len(kl_indices)
326 return kl
File ~\AppData\Local\mambaforge\envs\torch\lib\site-packages\gpytorch\variational\nearest_neighbor_variational_strategy.py:263, in NNVariationalStrategy._stochastic_kl_helper(self, kl_indices)
261 # Select a mini-batch of inducing points according to kl_indices, and their k-nearest neighbors
262 inducing_points = self.inducing_points[..., kl_indices, :]
--> 263 nearest_neighbor_indices = self.nn_xinduce_idx[..., kl_indices - self.k, :].to(inducing_points.device)
264 expanded_inducing_points_all = self.inducing_points.unsqueeze(-2).expand(
265 *self._inducing_batch_shape, self.M, self.k, self.D
266 )
267 expanded_nearest_neighbor_indices = nearest_neighbor_indices.unsqueeze(-1).expand(
268 *self._inducing_batch_shape, kl_bs, self.k, self.D
269 )
RuntimeError: indices should be either on cpu or on the same device as the indexed tensor (cpu)
```
## Expected Behavior
No error
## System information
**Please complete the following information:**
- <!-- GPyTorch Version (run `print(gpytorch.__version__)` --> GPyTorch 1.9.1
- <!-- PyTorch Version (run `print(torch.__version__)` --> PyTorch 1.13.1
- <!-- Computer OS --> Windows 10 with GPU set up
## Additional context
The mat file download needs to be manually done via web browser.
| [
{
"content": "import warnings\n\nimport torch\nfrom torch.nn import Module\n\n\nclass NNUtil(Module):\n r\"\"\"\n Utility for nearest neighbor search. It would first try to use `faiss`_ (requiring separate pacakge installment)\n as the backend for better computational performance. Otherwise, `scikit-learn` would be used as it is pre-installed\n with gpytorch.\n\n :param int k: number of nearest neighbors\n :param int dim: dimensionality of data\n :param torch.Size batch_shape: batch shape for train data\n :param str preferred_nnlib: currently supports `faiss` and `scikit-learn` (default: faiss).\n :param torch.device device: device that the NN search will be performed on.\n\n Example:\n >>> train_x = torch.randn(10, 5)\n >>> nn_util = NNUtil(k=3, dim=train_x.size(-1), device=train_x.device)\n >>> nn_util.set_nn_idx(train_x)\n >>> test_x = torch.randn(2, 5)\n >>> test_nn_indices = nn_util.find_nn_idx(test_x) # finding 3 nearest neighbors for test_x\n >>> test_nn_indices = nn_util.find_nn_idx(test_x, k=2) # finding 2 nearest neighbors for test_x\n >>> sequential_nn_idx = nn_util.build_sequential_nn_idx(train_x) # build up sequential nearest neighbor\n >>> # structure for train_x\n\n .. _faiss:\n https://github.com/facebookresearch/faiss\n \"\"\"\n\n def __init__(self, k, dim, batch_shape=torch.Size([]), preferred_nnlib=\"faiss\", device=\"cpu\"):\n super().__init__()\n assert k > 0, f\"k must be greater than 0, but got k = {k}.\"\n self.k = k\n self.dim = dim\n if not isinstance(batch_shape, torch.Size):\n raise RuntimeError(f\"batch_shape must be an instance of torch.Size, but got {type(batch_shape)}\")\n self.batch_shape = batch_shape\n\n self.train_n = None\n\n if preferred_nnlib == \"faiss\":\n try:\n import faiss\n import faiss.contrib.torch_utils # noqa F401\n\n self.nnlib = \"faiss\"\n self.cpu() # Initializes the index\n\n except ImportError:\n warnings.warn(\n \"Tried to import faiss, but failed. Falling back to scikit-learn nearest neighbor search.\",\n ImportWarning,\n )\n self.nnlib = \"sklearn\"\n self.train_neighbors = None\n\n else:\n self.nnlib = \"sklearn\"\n self.train_neighbors = None\n\n self.to(device)\n\n def cuda(self, device=None):\n super().cuda(device=device)\n if self.nnlib == \"faiss\":\n from faiss import GpuIndexFlatL2, StandardGpuResources\n\n self.res = StandardGpuResources()\n self.index = [GpuIndexFlatL2(self.res, self.dim) for _ in range(self.batch_shape.numel())]\n return self\n\n def cpu(self):\n super().cpu()\n if self.nnlib == \"faiss\":\n from faiss import IndexFlatL2\n\n self.res = None\n self.index = [IndexFlatL2(self.dim) for _ in range(self.batch_shape.numel())]\n return self\n\n def find_nn_idx(self, test_x, k=None):\n \"\"\"\n Find :math:`k` nearest neighbors for test data `test_x` among the training data stored in this utility\n\n :param test_x: test data, shape (... x N x D)\n :param int k: number of nearest neighbors. Default is the value used in utility initialization.\n :rtype: torch.LongTensor\n :return: the indices of nearest neighbors in the training data\n \"\"\"\n\n assert self.train_n is not None, \"Please initialize with training data first.\"\n if k is None:\n k = self.k\n else:\n assert k > 0, f\"k must be greater than 0, but got k = {k}.\"\n assert k <= self.train_n, (\n f\"k should be smaller than number of train data, \"\n f\"but got k = {k}, number of train data = {self.train_n}.\"\n )\n\n test_x = self._expand_and_check_shape(test_x)\n\n test_n = test_x.shape[-2]\n test_x = test_x.view(-1, test_n, self.dim)\n nn_idx = torch.empty(self.batch_shape.numel(), test_n, k, dtype=torch.int64, device=test_x.device)\n\n with torch.no_grad():\n if self.nnlib == \"sklearn\":\n if self.train_neighbors is None:\n raise RuntimeError(\"The nearest neighbor set has not been defined. First call `set_nn_idx`\")\n\n for i in range(self.batch_shape.numel()):\n nn_idx_i = torch.from_numpy(self.train_neighbors[i].kneighbors(test_x[i].cpu().numpy())[1][..., :k])\n nn_idx[i] = nn_idx_i.long().to(test_x.device)\n else:\n\n for i in range(self.batch_shape.numel()):\n nn_idx[i] = self.index[i].search(test_x[i], k)[1]\n\n nn_idx = nn_idx.view(*self.batch_shape, test_n, k)\n return nn_idx\n\n def set_nn_idx(self, train_x):\n \"\"\"\n Set the indices of training data to facilitate nearest neighbor search.\n This function needs to be called every time that the data changes.\n\n :param torch.Tensor train_x: training data points (... x N x D)\n \"\"\"\n train_x = self._expand_and_check_shape(train_x)\n self.train_n = train_x.shape[-2]\n\n with torch.no_grad():\n if self.nnlib == \"sklearn\":\n self.train_neighbors = []\n\n from sklearn.neighbors import NearestNeighbors\n\n train_x = train_x.view(-1, self.train_n, self.dim)\n\n for i in range(self.batch_shape.numel()):\n x = train_x[i].cpu().numpy()\n self.train_neighbors.append(NearestNeighbors(n_neighbors=self.k, algorithm=\"auto\").fit(x))\n elif self.nnlib == \"faiss\":\n train_x = train_x.view(-1, self.train_n, self.dim)\n for i in range(self.batch_shape.numel()):\n self.index[i].reset()\n self.index[i].add(train_x[i])\n\n def build_sequential_nn_idx(self, x):\n r\"\"\"\n Build the sequential :math:`k` nearest neighbor structure within training data in the following way:\n for the :math:`i`-th data point :math:`x_i`, find its :math:`k` nearest neighbors among preceding\n training data :math:`x_1, \\cdots, x_{i-1}`, for `i=k+1:N` where `N` is the size of training data.\n\n :param x: training data. Shape `(N, D)`\n :rtype: torch.LongTensor\n :return: indices of nearest neighbors. Shape: `(N-k, k)`\n \"\"\"\n x = self._expand_and_check_shape(x)\n N = x.shape[-2]\n assert self.k < N, f\"k should be smaller than number of data, but got k = {self.k}, number of data = {N}.\"\n\n nn_idx = torch.empty(self.batch_shape.numel(), N - self.k, self.k, dtype=torch.int64)\n x_np = x.view(-1, N, self.dim).data.float().cpu().numpy()\n\n if self.nnlib == \"faiss\":\n from faiss import IndexFlatL2\n\n # building nearest neighbor structure within inducing points\n index = IndexFlatL2(self.dim)\n with torch.no_grad():\n if self.res is not None:\n from faiss import index_cpu_to_gpu\n\n index = index_cpu_to_gpu(self.res, 0, index)\n\n for bi in range(self.batch_shape.numel()):\n index.reset()\n index.add(x_np[bi][: self.k])\n for i in range(self.k, N):\n row = x_np[bi][i][None, :]\n nn_idx[bi][i - self.k].copy_(\n torch.from_numpy(index.search(row, self.k)[1][..., 0, :]).long().to(x.device)\n )\n index.add(row)\n\n else:\n assert self.nnlib == \"sklearn\"\n from sklearn.neighbors import NearestNeighbors\n\n for bi in range(self.batch_shape.numel()):\n # finding k nearest neighbors in the first k\n for i in range(self.k, N):\n\n train_neighbors = NearestNeighbors(n_neighbors=self.k, algorithm=\"auto\").fit(x_np[bi][:i])\n nn_idx_i = torch.from_numpy(\n train_neighbors.kneighbors(\n x_np[bi][i][\n None,\n ]\n )[1]\n ).squeeze()\n\n nn_idx[bi][i - self.k].copy_(nn_idx_i)\n nn_idx = nn_idx.view(*self.batch_shape, N - self.k, self.k)\n return nn_idx\n\n def to(self, device):\n \"\"\"\n Put the utility to a cpu or gpu device.\n\n :param torch.device device: Target device.\n \"\"\"\n if str(device) == \"cpu\":\n return self.cpu()\n elif \"cuda\" in str(device):\n return self.cuda()\n else:\n raise ValueError(f\"Unknown device {device}\")\n\n def _expand_and_check_shape(self, x):\n if len(x.shape) == 1:\n x = x.unsqueeze(-1)\n assert x.shape[:-2] == self.batch_shape, (\n f\"x's batch shape must be equal to self.batch_shape, \"\n f\"but got x's batch shape={x.shape[:-2]}, self.batch_shape={self.batch_shape}.\"\n )\n assert x.shape[-1] == self.dim, (\n f\"x's dim must be equal to self.dim, \" f\"but got x's dim = {x.shape[-1]}, self.dim = {self.dim}\"\n )\n return x\n",
"path": "gpytorch/utils/nearest_neighbors.py"
}
] | [
{
"content": "import warnings\n\nimport torch\nfrom torch.nn import Module\n\n\nclass NNUtil(Module):\n r\"\"\"\n Utility for nearest neighbor search. It would first try to use `faiss`_ (requiring separate pacakge installment)\n as the backend for better computational performance. Otherwise, `scikit-learn` would be used as it is pre-installed\n with gpytorch.\n\n :param int k: number of nearest neighbors\n :param int dim: dimensionality of data\n :param torch.Size batch_shape: batch shape for train data\n :param str preferred_nnlib: currently supports `faiss` and `scikit-learn` (default: faiss).\n :param torch.device device: device that the NN search will be performed on.\n\n Example:\n >>> train_x = torch.randn(10, 5)\n >>> nn_util = NNUtil(k=3, dim=train_x.size(-1), device=train_x.device)\n >>> nn_util.set_nn_idx(train_x)\n >>> test_x = torch.randn(2, 5)\n >>> test_nn_indices = nn_util.find_nn_idx(test_x) # finding 3 nearest neighbors for test_x\n >>> test_nn_indices = nn_util.find_nn_idx(test_x, k=2) # finding 2 nearest neighbors for test_x\n >>> sequential_nn_idx = nn_util.build_sequential_nn_idx(train_x) # build up sequential nearest neighbor\n >>> # structure for train_x\n\n .. _faiss:\n https://github.com/facebookresearch/faiss\n \"\"\"\n\n def __init__(self, k, dim, batch_shape=torch.Size([]), preferred_nnlib=\"faiss\", device=\"cpu\"):\n super().__init__()\n assert k > 0, f\"k must be greater than 0, but got k = {k}.\"\n self.k = k\n self.dim = dim\n if not isinstance(batch_shape, torch.Size):\n raise RuntimeError(f\"batch_shape must be an instance of torch.Size, but got {type(batch_shape)}\")\n self.batch_shape = batch_shape\n\n self.train_n = None\n\n if preferred_nnlib == \"faiss\":\n try:\n import faiss\n import faiss.contrib.torch_utils # noqa F401\n\n self.nnlib = \"faiss\"\n self.cpu() # Initializes the index\n\n except ImportError:\n warnings.warn(\n \"Tried to import faiss, but failed. Falling back to scikit-learn nearest neighbor search.\",\n ImportWarning,\n )\n self.nnlib = \"sklearn\"\n self.train_neighbors = None\n\n else:\n self.nnlib = \"sklearn\"\n self.train_neighbors = None\n\n self.to(device)\n\n def cuda(self, device=None):\n super().cuda(device=device)\n if self.nnlib == \"faiss\":\n from faiss import GpuIndexFlatL2, StandardGpuResources\n\n self.res = StandardGpuResources()\n self.index = [GpuIndexFlatL2(self.res, self.dim) for _ in range(self.batch_shape.numel())]\n return self\n\n def cpu(self):\n super().cpu()\n if self.nnlib == \"faiss\":\n from faiss import IndexFlatL2\n\n self.res = None\n self.index = [IndexFlatL2(self.dim) for _ in range(self.batch_shape.numel())]\n return self\n\n def find_nn_idx(self, test_x, k=None):\n \"\"\"\n Find :math:`k` nearest neighbors for test data `test_x` among the training data stored in this utility\n\n :param test_x: test data, shape (... x N x D)\n :param int k: number of nearest neighbors. Default is the value used in utility initialization.\n :rtype: torch.LongTensor\n :return: the indices of nearest neighbors in the training data\n \"\"\"\n\n assert self.train_n is not None, \"Please initialize with training data first.\"\n if k is None:\n k = self.k\n else:\n assert k > 0, f\"k must be greater than 0, but got k = {k}.\"\n assert k <= self.train_n, (\n f\"k should be smaller than number of train data, \"\n f\"but got k = {k}, number of train data = {self.train_n}.\"\n )\n\n test_x = self._expand_and_check_shape(test_x)\n\n test_n = test_x.shape[-2]\n test_x = test_x.view(-1, test_n, self.dim)\n nn_idx = torch.empty(self.batch_shape.numel(), test_n, k, dtype=torch.int64, device=test_x.device)\n\n with torch.no_grad():\n if self.nnlib == \"sklearn\":\n if self.train_neighbors is None:\n raise RuntimeError(\"The nearest neighbor set has not been defined. First call `set_nn_idx`\")\n\n for i in range(self.batch_shape.numel()):\n nn_idx_i = torch.from_numpy(self.train_neighbors[i].kneighbors(test_x[i].cpu().numpy())[1][..., :k])\n nn_idx[i] = nn_idx_i.long().to(test_x.device)\n else:\n\n for i in range(self.batch_shape.numel()):\n nn_idx[i] = self.index[i].search(test_x[i], k)[1]\n\n nn_idx = nn_idx.view(*self.batch_shape, test_n, k)\n return nn_idx\n\n def set_nn_idx(self, train_x):\n \"\"\"\n Set the indices of training data to facilitate nearest neighbor search.\n This function needs to be called every time that the data changes.\n\n :param torch.Tensor train_x: training data points (... x N x D)\n \"\"\"\n train_x = self._expand_and_check_shape(train_x)\n self.train_n = train_x.shape[-2]\n\n with torch.no_grad():\n if self.nnlib == \"sklearn\":\n self.train_neighbors = []\n\n from sklearn.neighbors import NearestNeighbors\n\n train_x = train_x.view(-1, self.train_n, self.dim)\n\n for i in range(self.batch_shape.numel()):\n x = train_x[i].cpu().numpy()\n self.train_neighbors.append(NearestNeighbors(n_neighbors=self.k, algorithm=\"auto\").fit(x))\n elif self.nnlib == \"faiss\":\n train_x = train_x.view(-1, self.train_n, self.dim)\n for i in range(self.batch_shape.numel()):\n self.index[i].reset()\n self.index[i].add(train_x[i])\n\n def build_sequential_nn_idx(self, x):\n r\"\"\"\n Build the sequential :math:`k` nearest neighbor structure within training data in the following way:\n for the :math:`i`-th data point :math:`x_i`, find its :math:`k` nearest neighbors among preceding\n training data :math:`x_1, \\cdots, x_{i-1}`, for `i=k+1:N` where `N` is the size of training data.\n\n :param x: training data. Shape `(N, D)`\n :rtype: torch.LongTensor\n :return: indices of nearest neighbors. Shape: `(N-k, k)`\n \"\"\"\n x = self._expand_and_check_shape(x)\n N = x.shape[-2]\n assert self.k < N, f\"k should be smaller than number of data, but got k = {self.k}, number of data = {N}.\"\n\n nn_idx = torch.empty(self.batch_shape.numel(), N - self.k, self.k, dtype=torch.int64)\n x_np = x.view(-1, N, self.dim).data.float().cpu().numpy()\n\n if self.nnlib == \"faiss\":\n from faiss import IndexFlatL2\n\n # building nearest neighbor structure within inducing points\n index = IndexFlatL2(self.dim)\n with torch.no_grad():\n if self.res is not None:\n from faiss import index_cpu_to_gpu\n\n index = index_cpu_to_gpu(self.res, 0, index)\n\n for bi in range(self.batch_shape.numel()):\n index.reset()\n index.add(x_np[bi][: self.k])\n for i in range(self.k, N):\n row = x_np[bi][i][None, :]\n nn_idx[bi][i - self.k].copy_(\n torch.from_numpy(index.search(row, self.k)[1][..., 0, :]).long().to(x.device)\n )\n index.add(row)\n\n else:\n assert self.nnlib == \"sklearn\"\n from sklearn.neighbors import NearestNeighbors\n\n for bi in range(self.batch_shape.numel()):\n # finding k nearest neighbors in the first k\n for i in range(self.k, N):\n\n train_neighbors = NearestNeighbors(n_neighbors=self.k, algorithm=\"auto\").fit(x_np[bi][:i])\n nn_idx_i = torch.from_numpy(\n train_neighbors.kneighbors(\n x_np[bi][i][\n None,\n ]\n )[1]\n ).squeeze()\n\n nn_idx[bi][i - self.k].copy_(nn_idx_i)\n nn_idx = nn_idx.view(*self.batch_shape, N - self.k, self.k).to(device=x.device)\n return nn_idx\n\n def to(self, device):\n \"\"\"\n Put the utility to a cpu or gpu device.\n\n :param torch.device device: Target device.\n \"\"\"\n if str(device) == \"cpu\":\n return self.cpu()\n elif \"cuda\" in str(device):\n return self.cuda()\n else:\n raise ValueError(f\"Unknown device {device}\")\n\n def _expand_and_check_shape(self, x):\n if len(x.shape) == 1:\n x = x.unsqueeze(-1)\n assert x.shape[:-2] == self.batch_shape, (\n f\"x's batch shape must be equal to self.batch_shape, \"\n f\"but got x's batch shape={x.shape[:-2]}, self.batch_shape={self.batch_shape}.\"\n )\n assert x.shape[-1] == self.dim, (\n f\"x's dim must be equal to self.dim, \" f\"but got x's dim = {x.shape[-1]}, self.dim = {self.dim}\"\n )\n return x\n",
"path": "gpytorch/utils/nearest_neighbors.py"
}
] | diff --git a/gpytorch/utils/nearest_neighbors.py b/gpytorch/utils/nearest_neighbors.py
index 900d68614..e5d350394 100644
--- a/gpytorch/utils/nearest_neighbors.py
+++ b/gpytorch/utils/nearest_neighbors.py
@@ -206,7 +206,7 @@ def build_sequential_nn_idx(self, x):
).squeeze()
nn_idx[bi][i - self.k].copy_(nn_idx_i)
- nn_idx = nn_idx.view(*self.batch_shape, N - self.k, self.k)
+ nn_idx = nn_idx.view(*self.batch_shape, N - self.k, self.k).to(device=x.device)
return nn_idx
def to(self, device):
|
pantsbuild__pants-6037 | contrib go plugin not able to recognize meta tag if meta ends with />
The regex only recognize `<meta xxxxxxxxxxxxx >` but not `<meta xxxxxxxxxx />`.
| [
{
"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport re\n\nimport requests\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.memo import memoized_method\n\nfrom pants.contrib.go.subsystems.imported_repo import ImportedRepo\n\n\nclass GoImportMetaTagReader(Subsystem):\n \"\"\"Implements a reader for the <meta name=\"go-import\"> protocol.\n\n See https://golang.org/cmd/go/#hdr-Remote_import_paths .\n \"\"\"\n options_scope = 'go-import-metatag-reader'\n\n @classmethod\n def register_options(cls, register):\n super(GoImportMetaTagReader, cls).register_options(register)\n register('--retries', type=int, default=1, advanced=True,\n help='How many times to retry when fetching meta tags.')\n\n _META_IMPORT_REGEX = re.compile(r\"\"\"\n <meta\n \\s+\n name=['\"]go-import['\"]\n \\s+\n content=['\"](?P<root>[^\\s]+)\\s+(?P<vcs>[^\\s]+)\\s+(?P<url>[^\\s]+)['\"]\n \\s*\n >\"\"\", flags=re.VERBOSE)\n\n @classmethod\n def find_meta_tags(cls, page_html):\n \"\"\"Returns the content of the meta tag if found inside of the provided HTML.\"\"\"\n\n return cls._META_IMPORT_REGEX.findall(page_html)\n\n @memoized_method\n def get_imported_repo(self, import_path):\n \"\"\"Looks for a go-import meta tag for the provided import_path.\n\n Returns an ImportedRepo instance with the information in the meta tag,\n or None if no go-import meta tag is found.\n \"\"\"\n try:\n session = requests.session()\n # TODO: Support https with (optional) fallback to http, as Go does.\n # See https://github.com/pantsbuild/pants/issues/3503.\n session.mount(\"http://\",\n requests.adapters.HTTPAdapter(max_retries=self.get_options().retries))\n page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))\n except requests.ConnectionError:\n return None\n\n if not page_data:\n return None\n\n # Return the first match, rather than doing some kind of longest prefix search.\n # Hopefully no one returns multiple valid go-import meta tags.\n for (root, vcs, url) in self.find_meta_tags(page_data.text):\n if root and vcs and url:\n # Check to make sure returned root is an exact match to the provided import path. If it is\n # not then run a recursive check on the returned and return the values provided by that call.\n if root == import_path:\n return ImportedRepo(root, vcs, url)\n elif import_path.startswith(root):\n return self.get_imported_repo(root)\n\n return None\n",
"path": "contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py"
}
] | [
{
"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport re\n\nimport requests\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.memo import memoized_method\n\nfrom pants.contrib.go.subsystems.imported_repo import ImportedRepo\n\n\nclass GoImportMetaTagReader(Subsystem):\n \"\"\"Implements a reader for the <meta name=\"go-import\"> protocol.\n\n See https://golang.org/cmd/go/#hdr-Remote_import_paths .\n \"\"\"\n options_scope = 'go-import-metatag-reader'\n\n @classmethod\n def register_options(cls, register):\n super(GoImportMetaTagReader, cls).register_options(register)\n register('--retries', type=int, default=1, advanced=True,\n help='How many times to retry when fetching meta tags.')\n\n _META_IMPORT_REGEX = re.compile(r\"\"\"\n <meta\n \\s+\n name=['\"]go-import['\"]\n \\s+\n content=['\"](?P<root>[^\\s]+)\\s+(?P<vcs>[^\\s]+)\\s+(?P<url>[^\\s]+)['\"]\n \\s*\n /?>\"\"\", flags=re.VERBOSE)\n\n @classmethod\n def find_meta_tags(cls, page_html):\n \"\"\"Returns the content of the meta tag if found inside of the provided HTML.\"\"\"\n\n return cls._META_IMPORT_REGEX.findall(page_html)\n\n @memoized_method\n def get_imported_repo(self, import_path):\n \"\"\"Looks for a go-import meta tag for the provided import_path.\n\n Returns an ImportedRepo instance with the information in the meta tag,\n or None if no go-import meta tag is found.\n \"\"\"\n try:\n session = requests.session()\n # TODO: Support https with (optional) fallback to http, as Go does.\n # See https://github.com/pantsbuild/pants/issues/3503.\n session.mount(\"http://\",\n requests.adapters.HTTPAdapter(max_retries=self.get_options().retries))\n page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))\n except requests.ConnectionError:\n return None\n\n if not page_data:\n return None\n\n # Return the first match, rather than doing some kind of longest prefix search.\n # Hopefully no one returns multiple valid go-import meta tags.\n for (root, vcs, url) in self.find_meta_tags(page_data.text):\n if root and vcs and url:\n # Check to make sure returned root is an exact match to the provided import path. If it is\n # not then run a recursive check on the returned and return the values provided by that call.\n if root == import_path:\n return ImportedRepo(root, vcs, url)\n elif import_path.startswith(root):\n return self.get_imported_repo(root)\n\n return None\n",
"path": "contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py"
}
] | diff --git a/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py b/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py
index 82259bf25d6..f50188f1357 100644
--- a/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py
+++ b/contrib/go/src/python/pants/contrib/go/subsystems/go_import_meta_tag_reader.py
@@ -34,7 +34,7 @@ def register_options(cls, register):
\s+
content=['"](?P<root>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)['"]
\s*
- >""", flags=re.VERBOSE)
+ /?>""", flags=re.VERBOSE)
@classmethod
def find_meta_tags(cls, page_html):
diff --git a/contrib/go/tests/python/pants_test/contrib/go/subsystems/test_go_import_meta_tag_reader.py b/contrib/go/tests/python/pants_test/contrib/go/subsystems/test_go_import_meta_tag_reader.py
index a6fc0ec17c8..440478e7089 100644
--- a/contrib/go/tests/python/pants_test/contrib/go/subsystems/test_go_import_meta_tag_reader.py
+++ b/contrib/go/tests/python/pants_test/contrib/go/subsystems/test_go_import_meta_tag_reader.py
@@ -105,3 +105,24 @@ def test_no_meta_tag(self):
meta_tag_content = GoImportMetaTagReader.find_meta_tags(test_html)
self.assertEqual(meta_tag_content, [])
+
+ def test_meta_tag_end_with_forward_slash(self):
+ test_html = """
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <meta name="go-import"
+ content="google.golang.org/notapi
+ git
+ https://code.googlesource.com/google-notapi-go-client" />
+ </head>
+ <body>
+ Nothing to see here.
+ Please <a href="https://godoc.org/google.golang.org/api/googleapi">move along</a>.
+ </body>
+ </html>
+ """
+
+ meta_tag_content = GoImportMetaTagReader.find_meta_tags(test_html)
+ self.assertEqual(meta_tag_content, [('google.golang.org/notapi', 'git',
+ 'https://code.googlesource.com/google-notapi-go-client')])
|
hydroshare__hydroshare-2616 | [1.15 HOTFIX] User identifier data is not getting copied over to the first creator of the resource
| [
{
"content": "from __future__ import absolute_import\n\nimport mimetypes\nimport os\nimport tempfile\nimport logging\nimport shutil\nimport string\nimport copy\nfrom uuid import uuid4\nimport errno\n\nfrom django.apps import apps\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.timezone import now\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.contrib.auth.models import User, Group\nfrom django.core.files import File\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.core.files.storage import DefaultStorage\nfrom django.core.validators import validate_email\n\nfrom mezzanine.conf import settings\n\nfrom hs_core.signals import pre_create_resource, post_create_resource, pre_add_files_to_resource, \\\n post_add_files_to_resource\nfrom hs_core.models import AbstractResource, BaseResource, ResourceFile\nfrom hs_core.hydroshare.hs_bagit import create_bag_files\n\nfrom django_irods.icommands import SessionException\nfrom django_irods.storage import IrodsStorage\nfrom theme.models import QuotaMessage\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ResourceFileSizeException(Exception):\n pass\n\n\nclass ResourceFileValidationException(Exception):\n pass\n\n\nclass QuotaException(Exception):\n pass\n\n\ndef get_resource_types():\n resource_types = []\n for model in apps.get_models():\n if issubclass(model, AbstractResource) and model != BaseResource:\n if not getattr(model, 'archived_model', False):\n resource_types.append(model)\n return resource_types\n\n\ndef get_resource_instance(app, model_name, pk, or_404=True):\n model = apps.get_model(app, model_name)\n if or_404:\n return get_object_or_404(model, pk=pk)\n else:\n return model.objects.get(pk=pk)\n\n\ndef get_resource_by_shortkey(shortkey, or_404=True):\n try:\n res = BaseResource.objects.get(short_id=shortkey)\n except BaseResource.DoesNotExist:\n if or_404:\n raise Http404(shortkey)\n else:\n raise\n content = res.get_content_model()\n assert content, (res, res.content_model)\n return content\n\n\ndef get_resource_by_doi(doi, or_404=True):\n try:\n res = BaseResource.objects.get(doi=doi)\n except BaseResource.DoesNotExist:\n if or_404:\n raise Http404(doi)\n else:\n raise\n content = res.get_content_model()\n assert content, (res, res.content_model)\n return content\n\n\ndef user_from_id(user, raise404=True):\n if isinstance(user, User):\n return user\n\n try:\n tgt = User.objects.get(username=user)\n except ObjectDoesNotExist:\n try:\n tgt = User.objects.get(email=user)\n except ObjectDoesNotExist:\n try:\n tgt = User.objects.get(pk=int(user))\n except ValueError:\n if raise404:\n raise Http404('User not found')\n else:\n raise User.DoesNotExist\n except ObjectDoesNotExist:\n if raise404:\n raise Http404('User not found')\n else:\n raise\n return tgt\n\n\ndef group_from_id(grp):\n if isinstance(grp, Group):\n return grp\n\n try:\n tgt = Group.objects.get(name=grp)\n except ObjectDoesNotExist:\n try:\n tgt = Group.objects.get(pk=int(grp))\n except TypeError:\n raise Http404('Group not found')\n except ObjectDoesNotExist:\n raise Http404('Group not found')\n return tgt\n\n\ndef get_user_zone_status_info(user):\n \"\"\"\n This function should be called to determine whether the site is in production and whether user\n zone functionality should be enabled or not on the web site front end\n Args:\n user: the requesting user\n Returns:\n in_production, enable_user_zone where both are boolean indicating whether the site is\n in production and whether user zone functionality should be enabled or not on the web site\n front end\n \"\"\"\n if user is None:\n return None, None\n if not hasattr(user, 'userprofile') or user.userprofile is None:\n return None, None\n\n in_production = True if settings.IRODS_USERNAME == settings.HS_WWW_IRODS_PROXY_USER else False\n enable_user_zone = user.userprofile.create_irods_user_account\n if not in_production and enable_user_zone:\n # if these settings are not empty, for example, in users' local\n # development environment for testing, user_zone selection is shown\n if (not settings.HS_WWW_IRODS_PROXY_USER_PWD or\n not settings.HS_WWW_IRODS_HOST or not settings.HS_WWW_IRODS_ZONE):\n enable_user_zone = False\n return in_production, enable_user_zone\n\n\ndef is_federated(homepath):\n \"\"\"\n Check if the selected file via the iRODS browser is from a federated zone or not\n Args:\n homepath: the logical iRODS file name with full logical path, e.g., selected from\n iRODS browser\n\n Returns:\n True is the selected file indicated by homepath is from a federated zone, False if otherwise\n \"\"\"\n homepath = homepath.strip()\n homepath_list = homepath.split('/')\n # homepath is an iRODS logical path in the format of\n # /irods_zone/home/irods_account_username/collection_relative_path, so homepath_list[1]\n # is the irods_zone which we can use to form the fed_proxy_path to check whether\n # fed_proxy_path exists to hold hydroshare resources in a federated zone\n if homepath_list[1]:\n fed_proxy_path = os.path.join(homepath_list[1], 'home',\n settings.HS_LOCAL_PROXY_USER_IN_FED_ZONE)\n fed_proxy_path = '/' + fed_proxy_path\n else:\n # the test path input is invalid, return False meaning it is not federated\n return False\n if settings.REMOTE_USE_IRODS:\n irods_storage = IrodsStorage('federated')\n else:\n irods_storage = IrodsStorage()\n\n # if the iRODS proxy user in hydroshare zone can list homepath and the federation zone proxy\n # user path, it is federated; otherwise, it is not federated\n return irods_storage.exists(homepath) and irods_storage.exists(fed_proxy_path)\n\n\ndef get_federated_zone_home_path(filepath):\n \"\"\"\n Args:\n filepath: the iRODS data object file path that included zone name in the format of\n /zone_name/home/user_name/file_path\n\n Returns:\n the zone name extracted from filepath\n \"\"\"\n if filepath and filepath.startswith('/'):\n split_path_strs = filepath.split('/')\n # the Zone name should follow the first slash\n zone = split_path_strs[1]\n return '/{zone}/home/{local_proxy_user}'.format(\n zone=zone, local_proxy_user=settings.HS_LOCAL_PROXY_USER_IN_FED_ZONE)\n else:\n return ''\n\n\n# TODO: replace with a cache facility that has automatic cleanup\n# TODO: pass a list rather than a string to allow commas in filenames.\ndef get_fed_zone_files(irods_fnames):\n \"\"\"\n Get the files from iRODS federated zone to Django server for metadata extraction on-demand\n for specific resource types\n Args:\n irods_fnames: the logical iRODS file names with full logical path separated by comma\n\n Returns:\n a list of the named temp files which have been copied over to local Django server\n or raise exceptions if input parameter is wrong or iRODS operations fail\n\n Note: application must delete these files after use.\n \"\"\"\n ret_file_list = []\n if isinstance(irods_fnames, basestring):\n ifnames = string.split(irods_fnames, ',')\n elif isinstance(irods_fnames, list):\n ifnames = irods_fnames\n else:\n raise ValueError(\"Input parameter to get_fed_zone_files() must be String or List\")\n irods_storage = IrodsStorage('federated')\n for ifname in ifnames:\n fname = os.path.basename(ifname.rstrip(os.sep))\n # TODO: this is statistically unique but not guaranteed to be unique.\n tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)\n tmpfile = os.path.join(tmpdir, fname)\n try:\n os.makedirs(tmpdir)\n except OSError as ex:\n if ex.errno == errno.EEXIST:\n shutil.rmtree(tmpdir)\n os.makedirs(tmpdir)\n else:\n raise Exception(ex.message)\n irods_storage.getFile(ifname, tmpfile)\n ret_file_list.append(tmpfile)\n return ret_file_list\n\n\n# TODO: make the local cache file (and cleanup) part of ResourceFile state?\ndef get_file_from_irods(res_file):\n \"\"\"\n Copy the file (res_file) from iRODS (local or federated zone)\n over to django (temp directory) which is\n necessary for manipulating the file (e.g. metadata extraction).\n Note: The caller is responsible for cleaning the temp directory\n\n :param res_file: an instance of ResourceFile\n :return: location of the copied file\n \"\"\"\n res = res_file.resource\n istorage = res.get_irods_storage()\n res_file_path = res_file.storage_path\n file_name = os.path.basename(res_file_path)\n\n tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)\n tmpfile = os.path.join(tmpdir, file_name)\n\n # TODO: If collisions occur, really bad things happen.\n # TODO: Directories are never cleaned up when unused. need cache management.\n try:\n os.makedirs(tmpdir)\n except OSError as ex:\n if ex.errno == errno.EEXIST:\n shutil.rmtree(tmpdir)\n os.makedirs(tmpdir)\n else:\n raise Exception(ex.message)\n\n istorage.getFile(res_file_path, tmpfile)\n copied_file = tmpfile\n return copied_file\n\n\n# TODO: should be ResourceFile.replace\ndef replace_resource_file_on_irods(new_file, original_resource_file, user):\n \"\"\"\n Replaces the specified resource file with file (new_file) by copying to iRODS\n (local or federated zone)\n :param new_file: file path for the file to be copied to iRODS\n :param original_resource_file: an instance of ResourceFile that is to be replaced\n :param user: user who is replacing the resource file.\n :return:\n \"\"\"\n ori_res = original_resource_file.resource\n istorage = ori_res.get_irods_storage()\n ori_storage_path = original_resource_file.storage_path\n\n # Note: this doesn't update metadata at all.\n istorage.saveFile(new_file, ori_storage_path, True)\n\n # do this so that the bag will be regenerated prior to download of the bag\n resource_modified(ori_res, by_user=user, overwrite_bag=False)\n\n\n# TODO: should be inside ResourceFile, and federation logic should be transparent.\ndef get_resource_file_name_and_extension(res_file):\n \"\"\"\n Gets the full file name with path, file base name, and extension of the specified resource file\n :param res_file: an instance of ResourceFile for which file extension to be retrieved\n :return: (full filename with path, full file base name, file extension)\n ex: \"/my_path_to/ABC.nc\" --> (\"/my_path_to/ABC.nc\", \"ABC.nc\", \".nc\")\n \"\"\"\n f_fullname = res_file.storage_path\n f_basename = os.path.basename(f_fullname)\n _, file_ext = os.path.splitext(f_fullname)\n\n return f_fullname, f_basename, file_ext\n\n\n# TODO: should be ResourceFile.url\ndef get_resource_file_url(res_file):\n \"\"\"\n Gets the download url of the specified resource file\n :param res_file: an instance of ResourceFile for which download url is to be retrieved\n :return: download url for the resource file\n \"\"\"\n\n if res_file.resource_file:\n f_url = res_file.resource_file.url\n elif res_file.fed_resource_file:\n f_url = res_file.fed_resource_file.url\n else:\n f_url = ''\n return f_url\n\n\n# TODO: should be classmethod of ResourceFile\ndef get_resource_files_by_extension(resource, file_extension):\n matching_files = []\n for res_file in resource.files.all():\n _, _, file_ext = get_resource_file_name_and_extension(res_file)\n if file_ext == file_extension:\n matching_files.append(res_file)\n return matching_files\n\n\ndef get_resource_file_by_name(resource, file_name):\n for res_file in resource.files.all():\n _, fl_name, _ = get_resource_file_name_and_extension(res_file)\n if fl_name == file_name:\n return res_file\n return None\n\n\ndef get_resource_file_by_id(resource, file_id):\n return resource.files.filter(id=file_id).first()\n\n\ndef replicate_resource_bag_to_user_zone(user, res_id):\n \"\"\"\n Replicate resource bag to iRODS user zone\n Args:\n user: the requesting user\n res_id: the resource id with its bag to be replicated to iRODS user zone\n\n Returns:\n None, but exceptions will be raised if there is an issue with iRODS operation\n \"\"\"\n # do on-demand bag creation\n res = get_resource_by_shortkey(res_id)\n res_coll = res.root_path\n istorage = res.get_irods_storage()\n bag_modified = \"false\"\n # needs to check whether res_id collection exists before getting/setting AVU on it to\n # accommodate the case where the very same resource gets deleted by another request when\n # it is getting downloaded\n # TODO: why would we want to do anything at all if the resource does not exist???\n if istorage.exists(res_coll):\n bag_modified = istorage.getAVU(res_coll, 'bag_modified')\n if bag_modified.lower() == \"true\":\n # import here to avoid circular import issue\n from hs_core.tasks import create_bag_by_irods\n create_bag_by_irods(res_id)\n\n # do replication of the resource bag to irods user zone\n if not res.resource_federation_path:\n istorage.set_fed_zone_session()\n src_file = res.bag_path\n # TODO: allow setting destination path\n tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(\n userzone=settings.HS_USER_IRODS_ZONE, username=user.username, resid=res_id)\n fsize = istorage.size(src_file)\n validate_user_quota(user, fsize)\n istorage.copyFiles(src_file, tgt_file)\n else:\n raise ValidationError(\"Resource {} does not exist in iRODS\".format(res.short_id))\n\n\ndef copy_resource_files_and_AVUs(src_res_id, dest_res_id):\n \"\"\"\n Copy resource files and AVUs from source resource to target resource including both\n on iRODS storage and on Django database\n :param src_res_id: source resource uuid\n :param dest_res_id: target resource uuid\n :return:\n \"\"\"\n avu_list = ['bag_modified', 'metadata_dirty', 'isPublic', 'resourceType']\n src_res = get_resource_by_shortkey(src_res_id)\n tgt_res = get_resource_by_shortkey(dest_res_id)\n\n # This makes the assumption that the destination is in the same exact zone.\n # Also, bags and similar attached files are not copied.\n istorage = src_res.get_irods_storage()\n\n # This makes an exact copy of all physical files.\n src_files = os.path.join(src_res.root_path, 'data')\n # This has to be one segment short of the source because it is a target directory.\n dest_files = tgt_res.root_path\n istorage.copyFiles(src_files, dest_files)\n\n src_coll = src_res.root_path\n tgt_coll = tgt_res.root_path\n for avu_name in avu_list:\n value = istorage.getAVU(src_coll, avu_name)\n\n # make formerly public things private\n if avu_name == 'isPublic':\n istorage.setAVU(tgt_coll, avu_name, 'false')\n\n # bag_modified AVU needs to be set to true for copied resource\n elif avu_name == 'bag_modified':\n istorage.setAVU(tgt_coll, avu_name, 'true')\n\n # everything else gets copied literally\n else:\n istorage.setAVU(tgt_coll, avu_name, value)\n\n # link copied resource files to Django resource model\n files = src_res.files.all()\n\n # if resource files are part of logical files, then logical files also need copying\n src_logical_files = list(set([f.logical_file for f in files if f.has_logical_file]))\n map_logical_files = {}\n for src_logical_file in src_logical_files:\n map_logical_files[src_logical_file] = src_logical_file.get_copy()\n\n for n, f in enumerate(files):\n folder, base = os.path.split(f.short_path) # strips object information.\n new_resource_file = ResourceFile.create(tgt_res, base, folder=folder)\n\n # if the original file is part of a logical file, then\n # add the corresponding new resource file to the copy of that logical file\n if f.has_logical_file:\n tgt_logical_file = map_logical_files[f.logical_file]\n tgt_logical_file.add_resource_file(new_resource_file)\n\n if src_res.resource_type.lower() == \"collectionresource\":\n # clone contained_res list of original collection and add to new collection\n # note that new collection resource will not contain \"deleted resources\"\n tgt_res.resources = src_res.resources.all()\n\n\ndef copy_and_create_metadata(src_res, dest_res):\n \"\"\"\n Copy metadata from source resource to target resource except identifier, publisher, and date\n which need to be created for the target resource as appropriate. This method is used for\n resource copying and versioning.\n :param src_res: source resource\n :param dest_res: target resource\n :return:\n \"\"\"\n # copy metadata from source resource to target resource except three elements\n exclude_elements = ['identifier', 'publisher', 'date']\n dest_res.metadata.copy_all_elements_from(src_res.metadata, exclude_elements)\n\n # create Identifier element that is specific to the new resource\n dest_res.metadata.create_element('identifier', name='hydroShareIdentifier',\n url='{0}/resource/{1}'.format(current_site_url(),\n dest_res.short_id))\n\n # create date element that is specific to the new resource\n dest_res.metadata.create_element('date', type='created', start_date=dest_res.created)\n dest_res.metadata.create_element('date', type='modified', start_date=dest_res.updated)\n\n # copy date element to the new resource if exists\n src_res_valid_date_filter = src_res.metadata.dates.all().filter(type='valid')\n if src_res_valid_date_filter:\n res_valid_date = src_res_valid_date_filter[0]\n dest_res.metadata.create_element('date', type='valid', start_date=res_valid_date.start_date,\n end_date=res_valid_date.end_date)\n\n src_res_avail_date_filter = src_res.metadata.dates.all().filter(type='available')\n if src_res_avail_date_filter:\n res_avail_date = src_res_avail_date_filter[0]\n dest_res.metadata.create_element('date', type='available',\n start_date=res_avail_date.start_date,\n end_date=res_avail_date.end_date)\n # create the key/value metadata\n dest_res.extra_metadata = copy.deepcopy(src_res.extra_metadata)\n dest_res.save()\n\n\n# TODO: should be BaseResource.mark_as_modified.\ndef resource_modified(resource, by_user=None, overwrite_bag=True):\n \"\"\"\n Set an AVU flag that forces the bag to be recreated before fetch.\n\n This indicates that some content of the bag has been edited.\n\n \"\"\"\n\n resource.last_changed_by = by_user\n\n resource.updated = now().isoformat()\n # seems this is the best place to sync resource title with metadata title\n resource.title = resource.metadata.title.value\n resource.save()\n if resource.metadata.dates.all().filter(type='modified'):\n res_modified_date = resource.metadata.dates.all().filter(type='modified')[0]\n resource.metadata.update_element('date', res_modified_date.id)\n\n if overwrite_bag:\n create_bag_files(resource)\n\n # set bag_modified-true AVU pair for the modified resource in iRODS to indicate\n # the resource is modified for on-demand bagging.\n set_dirty_bag_flag(resource)\n\n\n# TODO: should be part of BaseResource\ndef set_dirty_bag_flag(resource):\n \"\"\"\n Set bag_modified=true AVU pair for the modified resource in iRODS\n to indicate that the resource is modified for on-demand bagging.\n\n set metadata_dirty (AVU) to 'true' to indicate that metadata has been modified for the\n resource so that xml metadata files need to be generated on-demand\n\n This is done so that the bag creation can be \"lazy\", in the sense that the\n bag is recreated only after multiple changes to the bag files, rather than\n after each change. It is created when someone attempts to download it.\n \"\"\"\n res_coll = resource.root_path\n\n istorage = resource.get_irods_storage()\n res_coll = resource.root_path\n istorage.setAVU(res_coll, \"bag_modified\", \"true\")\n istorage.setAVU(res_coll, \"metadata_dirty\", \"true\")\n\n\ndef _validate_email(email):\n try:\n validate_email(email)\n return True\n except ValidationError:\n return False\n\n\ndef get_profile(user):\n return user.userprofile\n\n\ndef current_site_url():\n \"\"\"Returns fully qualified URL (no trailing slash) for the current site.\"\"\"\n from django.contrib.sites.models import Site\n current_site = Site.objects.get_current()\n protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')\n port = getattr(settings, 'MY_SITE_PORT', '')\n url = '%s://%s' % (protocol, current_site.domain)\n if port:\n url += ':%s' % port\n return url\n\n\ndef get_file_mime_type(file_name):\n # TODO: looks like the mimetypes module can't find all mime types\n # We may need to user the python magic module instead\n file_name = u\"{}\".format(file_name)\n file_format_type = mimetypes.guess_type(file_name)[0]\n if not file_format_type:\n # TODO: this is probably not the right way to get the mime type\n file_format_type = 'application/%s' % os.path.splitext(file_name)[1][1:]\n\n return file_format_type\n\n\ndef check_file_dict_for_error(file_validation_dict):\n if 'are_files_valid' in file_validation_dict:\n if not file_validation_dict['are_files_valid']:\n error_message = file_validation_dict.get('message',\n \"Uploaded file(s) failed validation.\")\n raise ResourceFileValidationException(error_message)\n\n\ndef raise_file_size_exception():\n from .resource import FILE_SIZE_LIMIT_FOR_DISPLAY\n error_msg = 'The resource file is larger than the supported size limit: %s.' \\\n % FILE_SIZE_LIMIT_FOR_DISPLAY\n raise ResourceFileSizeException(error_msg)\n\n\ndef validate_resource_file_size(resource_files):\n from .resource import check_resource_files\n valid, size = check_resource_files(resource_files)\n if not valid:\n raise_file_size_exception()\n # if no exception, return the total size of all files\n return size\n\n\ndef validate_resource_file_type(resource_cls, files):\n supported_file_types = resource_cls.get_supported_upload_file_types()\n # see if file type checking is needed\n if '.*' in supported_file_types:\n # all file types are supported\n return\n\n supported_file_types = [x.lower() for x in supported_file_types]\n for f in files:\n file_ext = os.path.splitext(f.name)[1]\n if file_ext.lower() not in supported_file_types:\n err_msg = \"{file_name} is not a supported file type for {res_type} resource\"\n err_msg = err_msg.format(file_name=f.name, res_type=resource_cls)\n raise ResourceFileValidationException(err_msg)\n\n\ndef validate_resource_file_count(resource_cls, files, resource=None):\n if len(files) > 0:\n if len(resource_cls.get_supported_upload_file_types()) == 0:\n err_msg = \"Content files are not allowed in {res_type} resource\"\n err_msg = err_msg.format(res_type=resource_cls)\n raise ResourceFileValidationException(err_msg)\n\n err_msg = \"Multiple content files are not supported in {res_type} resource\"\n err_msg = err_msg.format(res_type=resource_cls)\n if len(files) > 1:\n if not resource_cls.allow_multiple_file_upload():\n raise ResourceFileValidationException(err_msg)\n\n if resource is not None and resource.files.all().count() > 0:\n if not resource_cls.can_have_multiple_files():\n raise ResourceFileValidationException(err_msg)\n\n\ndef convert_file_size_to_unit(size, unit):\n \"\"\"\n Convert file size to unit for quota comparison\n :param size: in byte unit\n :param unit: should be one of the four: 'KB', 'MB', 'GB', or 'TB'\n :return: the size converted to the pass-in unit\n \"\"\"\n unit = unit.lower()\n if unit not in ('kb', 'mb', 'gb', 'tb'):\n raise ValidationError('Pass-in unit for file size conversion must be one of KB, MB, GB, '\n 'or TB')\n factor = 1024.0\n kbsize = size / factor\n if unit == 'kb':\n return kbsize\n mbsize = kbsize / factor\n if unit == 'mb':\n return mbsize\n gbsize = mbsize / factor\n if unit == 'gb':\n return gbsize\n tbsize = gbsize / factor\n if unit == 'tb':\n return tbsize\n\n\ndef validate_user_quota(user, size):\n \"\"\"\n validate to make sure the user is not over quota with the newly added size\n :param user: the user to be validated\n :param size: the newly added file size to add on top of the user's used quota to be validated.\n size input parameter should be in byte unit\n :return: raise exception for the over quota case\n \"\"\"\n if user:\n # validate it is within quota hard limit\n uq = user.quotas.filter(zone='hydroshare').first()\n if uq:\n if not QuotaMessage.objects.exists():\n QuotaMessage.objects.create()\n qmsg = QuotaMessage.objects.first()\n enforce_flag = qmsg.enforce_quota\n if enforce_flag:\n hard_limit = qmsg.hard_limit_percent\n used_size = uq.add_to_used_value(size)\n used_percent = uq.used_percent\n rounded_percent = round(used_percent, 2)\n rounded_used_val = round(used_size, 4)\n if used_percent >= hard_limit or uq.remaining_grace_period == 0:\n msg_template_str = '{}{}\\n\\n'.format(qmsg.enforce_content_prepend,\n qmsg.content)\n msg_str = msg_template_str.format(used=rounded_used_val,\n unit=uq.unit,\n allocated=uq.allocated_value,\n zone=uq.zone,\n percent=rounded_percent)\n raise QuotaException(msg_str)\n\n\ndef resource_pre_create_actions(resource_type, resource_title, page_redirect_url_key,\n files=(), source_names=[], metadata=None,\n requesting_user=None, **kwargs):\n from.resource import check_resource_type\n from hs_core.views.utils import validate_metadata\n\n if __debug__:\n assert(isinstance(source_names, list))\n\n if not resource_title:\n resource_title = 'Untitled resource'\n else:\n resource_title = resource_title.strip()\n if len(resource_title) == 0:\n resource_title = 'Untitled resource'\n\n resource_cls = check_resource_type(resource_type)\n if len(files) > 0:\n size = validate_resource_file_size(files)\n validate_resource_file_count(resource_cls, files)\n validate_resource_file_type(resource_cls, files)\n # validate it is within quota hard limit\n validate_user_quota(requesting_user, size)\n\n if not metadata:\n metadata = []\n else:\n validate_metadata(metadata, resource_type)\n\n page_url_dict = {}\n # this is needed since raster and feature resource types allows to upload a zip file,\n # then replace zip file with exploded files. If the zip file is loaded from hydroshare\n # federation zone, the original zip file encoded in source_names gets deleted\n # in this case and fed_res_path is used to keep the federation path, so that the resource\n # will be stored in the federated zone rather than the hydroshare zone\n fed_res_path = []\n # receivers need to change the values of this dict if file validation fails\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n\n # Send pre-create resource signal - let any other app populate the empty metadata list object\n # also pass title to other apps, and give other apps a chance to populate page_redirect_url\n # if they want to redirect to their own page for resource creation rather than use core\n # resource creation code\n pre_create_resource.send(sender=resource_cls, metadata=metadata, files=files,\n title=resource_title,\n url_key=page_redirect_url_key, page_url_dict=page_url_dict,\n validate_files=file_validation_dict,\n source_names=source_names,\n user=requesting_user, fed_res_path=fed_res_path, **kwargs)\n\n if len(files) > 0:\n check_file_dict_for_error(file_validation_dict)\n\n return page_url_dict, resource_title, metadata, fed_res_path\n\n\ndef resource_post_create_actions(resource, user, metadata, **kwargs):\n # receivers need to change the values of this dict if file validation fails\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n # Send post-create resource signal\n post_create_resource.send(sender=type(resource), resource=resource, user=user,\n metadata=metadata,\n validate_files=file_validation_dict, **kwargs)\n\n check_file_dict_for_error(file_validation_dict)\n\n\ndef prepare_resource_default_metadata(resource, metadata, res_title):\n add_title = True\n for element in metadata:\n if 'title' in element:\n if 'value' in element['title']:\n res_title = element['title']['value']\n add_title = False\n else:\n metadata.remove(element)\n break\n\n if add_title:\n metadata.append({'title': {'value': res_title}})\n\n add_language = True\n for element in metadata:\n if 'language' in element:\n if 'code' in element['language']:\n add_language = False\n else:\n metadata.remove(element)\n break\n\n if add_language:\n metadata.append({'language': {'code': 'eng'}})\n\n add_rights = True\n for element in metadata:\n if 'rights' in element:\n if 'statement' in element['rights'] and 'url' in element['rights']:\n add_rights = False\n else:\n metadata.remove(element)\n break\n\n if add_rights:\n # add the default rights/license element\n statement = 'This resource is shared under the Creative Commons Attribution CC BY.'\n url = 'http://creativecommons.org/licenses/by/4.0/'\n metadata.append({'rights': {'statement': statement, 'url': url}})\n\n metadata.append({'identifier': {'name': 'hydroShareIdentifier',\n 'url': '{0}/resource/{1}'.format(current_site_url(),\n resource.short_id)}})\n\n # remove if there exists the 'type' element as system generates this element\n # remove if there exists 'format' elements - since format elements are system generated based\n # on resource content files\n # remove any 'date' element which is not of type 'valid'. All other date elements are\n # system generated\n for element in list(metadata):\n if 'type' in element or 'format' in element:\n metadata.remove(element)\n if 'date' in element:\n if 'type' in element['date']:\n if element['date']['type'] != 'valid':\n metadata.remove(element)\n\n metadata.append({'type': {'url': '{0}/terms/{1}'.format(current_site_url(),\n resource.__class__.__name__)}})\n\n metadata.append({'date': {'type': 'created', 'start_date': resource.created}})\n metadata.append({'date': {'type': 'modified', 'start_date': resource.updated}})\n\n # only add the resource creator as the creator for metadata if there is not already\n # creator data in the metadata object\n metadata_keys = [element.keys()[0].lower() for element in metadata]\n if 'creator' not in metadata_keys:\n creator_data = get_party_data_from_user(resource.creator)\n metadata.append({'creator': creator_data})\n\n\ndef get_party_data_from_user(user):\n party_data = {}\n user_profile = get_profile(user)\n user_full_name = user.get_full_name()\n if user_full_name:\n party_name = user_full_name\n else:\n party_name = user.username\n\n party_data['name'] = party_name\n party_data['email'] = user.email\n party_data['description'] = '/user/{uid}/'.format(uid=user.pk)\n party_data['phone'] = user_profile.phone_1\n party_data['organization'] = user_profile.organization\n return party_data\n\n\n# TODO: make this part of resource api. resource --> self.\ndef resource_file_add_pre_process(resource, files, user, extract_metadata=False,\n source_names=[], **kwargs):\n if __debug__:\n assert(isinstance(source_names, list))\n resource_cls = resource.__class__\n if len(files) > 0:\n size = validate_resource_file_size(files)\n validate_user_quota(resource.get_quota_holder(), size)\n validate_resource_file_type(resource_cls, files)\n validate_resource_file_count(resource_cls, files, resource)\n\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n pre_add_files_to_resource.send(sender=resource_cls, files=files, resource=resource, user=user,\n source_names=source_names,\n validate_files=file_validation_dict,\n extract_metadata=extract_metadata, **kwargs)\n\n check_file_dict_for_error(file_validation_dict)\n\n\n# TODO: make this part of resource api. resource --> self.\ndef resource_file_add_process(resource, files, user, extract_metadata=False,\n source_names=[], **kwargs):\n\n from .resource import add_resource_files\n if __debug__:\n assert(isinstance(source_names, list))\n folder = kwargs.pop('folder', None)\n resource_file_objects = add_resource_files(resource.short_id, *files, folder=folder,\n source_names=source_names)\n\n # receivers need to change the values of this dict if file validation fails\n # in case of file validation failure it is assumed the resource type also deleted the file\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n post_add_files_to_resource.send(sender=resource.__class__, files=files,\n source_names=source_names,\n resource=resource, user=user,\n validate_files=file_validation_dict,\n extract_metadata=extract_metadata,\n res_files=resource_file_objects, **kwargs)\n\n check_file_dict_for_error(file_validation_dict)\n\n resource_modified(resource, user, overwrite_bag=False)\n return resource_file_objects\n\n\n# TODO: move this to BaseResource\ndef create_empty_contents_directory(resource):\n res_contents_dir = resource.file_path\n istorage = resource.get_irods_storage()\n if not istorage.exists(res_contents_dir):\n istorage.session.run(\"imkdir\", None, '-p', res_contents_dir)\n\n\ndef add_file_to_resource(resource, f, folder=None, source_name='',\n move=False):\n \"\"\"\n Add a ResourceFile to a Resource. Adds the 'format' metadata element to the resource.\n :param resource: Resource to which file should be added\n :param f: File-like object to add to a resource\n :param source_name: the logical file name of the resource content file for\n federated iRODS resource or the federated zone name;\n By default, it is empty. A non-empty value indicates\n the file needs to be added into the federated zone, either\n from local disk where f holds the uploaded file from local\n disk, or from the federated zone directly where f is empty\n but source_name has the whole data object\n iRODS path in the federated zone\n :param move: indicate whether the file should be copied or moved from private user\n account to proxy user account in federated zone; A value of False\n indicates copy is needed, a value of True indicates no copy, but\n the file will be moved from private user account to proxy user account.\n The default value is False.\n\n :return: The identifier of the ResourceFile added.\n \"\"\"\n\n # importing here to avoid circular import\n from hs_file_types.models import GenericLogicalFile\n\n if f:\n openfile = File(f) if not isinstance(f, UploadedFile) else f\n ret = ResourceFile.create(resource, openfile, folder=folder, source=None, move=False)\n\n # add format metadata element if necessary\n file_format_type = get_file_mime_type(f.name)\n\n elif source_name:\n try:\n # create from existing iRODS file\n ret = ResourceFile.create(resource, None, folder=folder, source=source_name, move=move)\n except SessionException as ex:\n try:\n ret.delete()\n except Exception:\n pass\n # raise the exception for the calling function to inform the error on the page interface\n raise SessionException(ex.exitcode, ex.stdout, ex.stderr)\n\n # add format metadata element if necessary\n file_format_type = get_file_mime_type(source_name)\n\n else:\n raise ValueError('Invalid input parameter is passed into this add_file_to_resource() '\n 'function')\n\n # TODO: generate this from data in ResourceFile rather than extension\n if file_format_type not in [mime.value for mime in resource.metadata.formats.all()]:\n resource.metadata.create_element('format', value=file_format_type)\n\n # if a file gets added successfully to composite resource, then better to set the generic\n # logical file here\n if resource.resource_type == \"CompositeResource\":\n logical_file = GenericLogicalFile.create()\n ret.logical_file_content_object = logical_file\n ret.save()\n\n return ret\n\n\ndef add_metadata_element_to_xml(root, md_element, md_fields):\n \"\"\"\n helper function to generate xml elements for a given metadata element that belongs to\n 'hsterms' namespace\n\n :param root: the xml document root element to which xml elements for the specified\n metadata element needs to be added\n :param md_element: the metadata element object. The term attribute of the metadata\n element object is used for naming the root xml element for this metadata element.\n If the root xml element needs to be named differently, then this needs to be a tuple\n with first element being the metadata element object and the second being the name\n for the root element.\n Example:\n md_element=self.Creator # the term attribute of the Creator object will be used\n md_element=(self.Creator, 'Author') # 'Author' will be used\n\n :param md_fields: a list of attribute names of the metadata element (if the name to be used\n in generating the xml element name is same as the attribute name then include the\n attribute name as a list item. if xml element name needs to be different from the\n attribute name then the list item must be a tuple with first element of the tuple being\n the attribute name and the second element being what will be used in naming the xml\n element)\n Example:\n [('first_name', 'firstName'), 'phone', 'email']\n # xml sub-elements names: firstName, phone, email\n \"\"\"\n from lxml import etree\n from hs_core.models import CoreMetaData\n\n name_spaces = CoreMetaData.NAMESPACES\n if isinstance(md_element, tuple):\n element_name = md_element[1]\n md_element = md_element[0]\n else:\n element_name = md_element.term\n\n hsterms_newElem = etree.SubElement(root,\n \"{{{ns}}}{new_element}\".format(\n ns=name_spaces['hsterms'],\n new_element=element_name))\n hsterms_newElem_rdf_Desc = etree.SubElement(\n hsterms_newElem, \"{{{ns}}}Description\".format(ns=name_spaces['rdf']))\n for md_field in md_fields:\n if isinstance(md_field, tuple):\n field_name = md_field[0]\n xml_element_name = md_field[1]\n else:\n field_name = md_field\n xml_element_name = md_field\n\n if hasattr(md_element, field_name):\n attr = getattr(md_element, field_name)\n if attr:\n field = etree.SubElement(hsterms_newElem_rdf_Desc,\n \"{{{ns}}}{field}\".format(ns=name_spaces['hsterms'],\n field=xml_element_name))\n field.text = str(attr)\n\n\nclass ZipContents(object):\n \"\"\"\n Extract the contents of a zip file one file at a time\n using a generator.\n \"\"\"\n def __init__(self, zip_file):\n self.zip_file = zip_file\n\n def black_list_path(self, file_path):\n return file_path.startswith('__MACOSX/')\n\n def black_list_name(self, file_name):\n return file_name == '.DS_Store'\n\n def get_files(self):\n temp_dir = tempfile.mkdtemp()\n try:\n file_path = None\n for name_path in self.zip_file.namelist():\n if not self.black_list_path(name_path):\n name = os.path.basename(name_path)\n if name != '':\n if not self.black_list_name(name):\n self.zip_file.extract(name_path, temp_dir)\n file_path = os.path.join(temp_dir, name_path)\n logger.debug(\"Opening {0} as File with name {1}\".format(file_path,\n name_path))\n f = File(file=open(file_path, 'rb'),\n name=name_path)\n f.size = os.stat(file_path).st_size\n yield f\n finally:\n shutil.rmtree(temp_dir)\n\n\ndef get_file_storage():\n return IrodsStorage() if getattr(settings, 'USE_IRODS', False) else DefaultStorage()\n\n\ndef resolve_request(request):\n if request.POST:\n return request.POST\n\n if request.data:\n return request.data\n\n return {}\n",
"path": "hs_core/hydroshare/utils.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport mimetypes\nimport os\nimport tempfile\nimport logging\nimport shutil\nimport string\nimport copy\nfrom uuid import uuid4\nimport errno\n\nfrom django.apps import apps\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.timezone import now\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.contrib.auth.models import User, Group\nfrom django.core.files import File\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.core.files.storage import DefaultStorage\nfrom django.core.validators import validate_email\n\nfrom mezzanine.conf import settings\n\nfrom hs_core.signals import pre_create_resource, post_create_resource, pre_add_files_to_resource, \\\n post_add_files_to_resource\nfrom hs_core.models import AbstractResource, BaseResource, ResourceFile\nfrom hs_core.hydroshare.hs_bagit import create_bag_files\n\nfrom django_irods.icommands import SessionException\nfrom django_irods.storage import IrodsStorage\nfrom theme.models import QuotaMessage\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ResourceFileSizeException(Exception):\n pass\n\n\nclass ResourceFileValidationException(Exception):\n pass\n\n\nclass QuotaException(Exception):\n pass\n\n\ndef get_resource_types():\n resource_types = []\n for model in apps.get_models():\n if issubclass(model, AbstractResource) and model != BaseResource:\n if not getattr(model, 'archived_model', False):\n resource_types.append(model)\n return resource_types\n\n\ndef get_resource_instance(app, model_name, pk, or_404=True):\n model = apps.get_model(app, model_name)\n if or_404:\n return get_object_or_404(model, pk=pk)\n else:\n return model.objects.get(pk=pk)\n\n\ndef get_resource_by_shortkey(shortkey, or_404=True):\n try:\n res = BaseResource.objects.get(short_id=shortkey)\n except BaseResource.DoesNotExist:\n if or_404:\n raise Http404(shortkey)\n else:\n raise\n content = res.get_content_model()\n assert content, (res, res.content_model)\n return content\n\n\ndef get_resource_by_doi(doi, or_404=True):\n try:\n res = BaseResource.objects.get(doi=doi)\n except BaseResource.DoesNotExist:\n if or_404:\n raise Http404(doi)\n else:\n raise\n content = res.get_content_model()\n assert content, (res, res.content_model)\n return content\n\n\ndef user_from_id(user, raise404=True):\n if isinstance(user, User):\n return user\n\n try:\n tgt = User.objects.get(username=user)\n except ObjectDoesNotExist:\n try:\n tgt = User.objects.get(email=user)\n except ObjectDoesNotExist:\n try:\n tgt = User.objects.get(pk=int(user))\n except ValueError:\n if raise404:\n raise Http404('User not found')\n else:\n raise User.DoesNotExist\n except ObjectDoesNotExist:\n if raise404:\n raise Http404('User not found')\n else:\n raise\n return tgt\n\n\ndef group_from_id(grp):\n if isinstance(grp, Group):\n return grp\n\n try:\n tgt = Group.objects.get(name=grp)\n except ObjectDoesNotExist:\n try:\n tgt = Group.objects.get(pk=int(grp))\n except TypeError:\n raise Http404('Group not found')\n except ObjectDoesNotExist:\n raise Http404('Group not found')\n return tgt\n\n\ndef get_user_zone_status_info(user):\n \"\"\"\n This function should be called to determine whether the site is in production and whether user\n zone functionality should be enabled or not on the web site front end\n Args:\n user: the requesting user\n Returns:\n in_production, enable_user_zone where both are boolean indicating whether the site is\n in production and whether user zone functionality should be enabled or not on the web site\n front end\n \"\"\"\n if user is None:\n return None, None\n if not hasattr(user, 'userprofile') or user.userprofile is None:\n return None, None\n\n in_production = True if settings.IRODS_USERNAME == settings.HS_WWW_IRODS_PROXY_USER else False\n enable_user_zone = user.userprofile.create_irods_user_account\n if not in_production and enable_user_zone:\n # if these settings are not empty, for example, in users' local\n # development environment for testing, user_zone selection is shown\n if (not settings.HS_WWW_IRODS_PROXY_USER_PWD or\n not settings.HS_WWW_IRODS_HOST or not settings.HS_WWW_IRODS_ZONE):\n enable_user_zone = False\n return in_production, enable_user_zone\n\n\ndef is_federated(homepath):\n \"\"\"\n Check if the selected file via the iRODS browser is from a federated zone or not\n Args:\n homepath: the logical iRODS file name with full logical path, e.g., selected from\n iRODS browser\n\n Returns:\n True is the selected file indicated by homepath is from a federated zone, False if otherwise\n \"\"\"\n homepath = homepath.strip()\n homepath_list = homepath.split('/')\n # homepath is an iRODS logical path in the format of\n # /irods_zone/home/irods_account_username/collection_relative_path, so homepath_list[1]\n # is the irods_zone which we can use to form the fed_proxy_path to check whether\n # fed_proxy_path exists to hold hydroshare resources in a federated zone\n if homepath_list[1]:\n fed_proxy_path = os.path.join(homepath_list[1], 'home',\n settings.HS_LOCAL_PROXY_USER_IN_FED_ZONE)\n fed_proxy_path = '/' + fed_proxy_path\n else:\n # the test path input is invalid, return False meaning it is not federated\n return False\n if settings.REMOTE_USE_IRODS:\n irods_storage = IrodsStorage('federated')\n else:\n irods_storage = IrodsStorage()\n\n # if the iRODS proxy user in hydroshare zone can list homepath and the federation zone proxy\n # user path, it is federated; otherwise, it is not federated\n return irods_storage.exists(homepath) and irods_storage.exists(fed_proxy_path)\n\n\ndef get_federated_zone_home_path(filepath):\n \"\"\"\n Args:\n filepath: the iRODS data object file path that included zone name in the format of\n /zone_name/home/user_name/file_path\n\n Returns:\n the zone name extracted from filepath\n \"\"\"\n if filepath and filepath.startswith('/'):\n split_path_strs = filepath.split('/')\n # the Zone name should follow the first slash\n zone = split_path_strs[1]\n return '/{zone}/home/{local_proxy_user}'.format(\n zone=zone, local_proxy_user=settings.HS_LOCAL_PROXY_USER_IN_FED_ZONE)\n else:\n return ''\n\n\n# TODO: replace with a cache facility that has automatic cleanup\n# TODO: pass a list rather than a string to allow commas in filenames.\ndef get_fed_zone_files(irods_fnames):\n \"\"\"\n Get the files from iRODS federated zone to Django server for metadata extraction on-demand\n for specific resource types\n Args:\n irods_fnames: the logical iRODS file names with full logical path separated by comma\n\n Returns:\n a list of the named temp files which have been copied over to local Django server\n or raise exceptions if input parameter is wrong or iRODS operations fail\n\n Note: application must delete these files after use.\n \"\"\"\n ret_file_list = []\n if isinstance(irods_fnames, basestring):\n ifnames = string.split(irods_fnames, ',')\n elif isinstance(irods_fnames, list):\n ifnames = irods_fnames\n else:\n raise ValueError(\"Input parameter to get_fed_zone_files() must be String or List\")\n irods_storage = IrodsStorage('federated')\n for ifname in ifnames:\n fname = os.path.basename(ifname.rstrip(os.sep))\n # TODO: this is statistically unique but not guaranteed to be unique.\n tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)\n tmpfile = os.path.join(tmpdir, fname)\n try:\n os.makedirs(tmpdir)\n except OSError as ex:\n if ex.errno == errno.EEXIST:\n shutil.rmtree(tmpdir)\n os.makedirs(tmpdir)\n else:\n raise Exception(ex.message)\n irods_storage.getFile(ifname, tmpfile)\n ret_file_list.append(tmpfile)\n return ret_file_list\n\n\n# TODO: make the local cache file (and cleanup) part of ResourceFile state?\ndef get_file_from_irods(res_file):\n \"\"\"\n Copy the file (res_file) from iRODS (local or federated zone)\n over to django (temp directory) which is\n necessary for manipulating the file (e.g. metadata extraction).\n Note: The caller is responsible for cleaning the temp directory\n\n :param res_file: an instance of ResourceFile\n :return: location of the copied file\n \"\"\"\n res = res_file.resource\n istorage = res.get_irods_storage()\n res_file_path = res_file.storage_path\n file_name = os.path.basename(res_file_path)\n\n tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)\n tmpfile = os.path.join(tmpdir, file_name)\n\n # TODO: If collisions occur, really bad things happen.\n # TODO: Directories are never cleaned up when unused. need cache management.\n try:\n os.makedirs(tmpdir)\n except OSError as ex:\n if ex.errno == errno.EEXIST:\n shutil.rmtree(tmpdir)\n os.makedirs(tmpdir)\n else:\n raise Exception(ex.message)\n\n istorage.getFile(res_file_path, tmpfile)\n copied_file = tmpfile\n return copied_file\n\n\n# TODO: should be ResourceFile.replace\ndef replace_resource_file_on_irods(new_file, original_resource_file, user):\n \"\"\"\n Replaces the specified resource file with file (new_file) by copying to iRODS\n (local or federated zone)\n :param new_file: file path for the file to be copied to iRODS\n :param original_resource_file: an instance of ResourceFile that is to be replaced\n :param user: user who is replacing the resource file.\n :return:\n \"\"\"\n ori_res = original_resource_file.resource\n istorage = ori_res.get_irods_storage()\n ori_storage_path = original_resource_file.storage_path\n\n # Note: this doesn't update metadata at all.\n istorage.saveFile(new_file, ori_storage_path, True)\n\n # do this so that the bag will be regenerated prior to download of the bag\n resource_modified(ori_res, by_user=user, overwrite_bag=False)\n\n\n# TODO: should be inside ResourceFile, and federation logic should be transparent.\ndef get_resource_file_name_and_extension(res_file):\n \"\"\"\n Gets the full file name with path, file base name, and extension of the specified resource file\n :param res_file: an instance of ResourceFile for which file extension to be retrieved\n :return: (full filename with path, full file base name, file extension)\n ex: \"/my_path_to/ABC.nc\" --> (\"/my_path_to/ABC.nc\", \"ABC.nc\", \".nc\")\n \"\"\"\n f_fullname = res_file.storage_path\n f_basename = os.path.basename(f_fullname)\n _, file_ext = os.path.splitext(f_fullname)\n\n return f_fullname, f_basename, file_ext\n\n\n# TODO: should be ResourceFile.url\ndef get_resource_file_url(res_file):\n \"\"\"\n Gets the download url of the specified resource file\n :param res_file: an instance of ResourceFile for which download url is to be retrieved\n :return: download url for the resource file\n \"\"\"\n\n if res_file.resource_file:\n f_url = res_file.resource_file.url\n elif res_file.fed_resource_file:\n f_url = res_file.fed_resource_file.url\n else:\n f_url = ''\n return f_url\n\n\n# TODO: should be classmethod of ResourceFile\ndef get_resource_files_by_extension(resource, file_extension):\n matching_files = []\n for res_file in resource.files.all():\n _, _, file_ext = get_resource_file_name_and_extension(res_file)\n if file_ext == file_extension:\n matching_files.append(res_file)\n return matching_files\n\n\ndef get_resource_file_by_name(resource, file_name):\n for res_file in resource.files.all():\n _, fl_name, _ = get_resource_file_name_and_extension(res_file)\n if fl_name == file_name:\n return res_file\n return None\n\n\ndef get_resource_file_by_id(resource, file_id):\n return resource.files.filter(id=file_id).first()\n\n\ndef replicate_resource_bag_to_user_zone(user, res_id):\n \"\"\"\n Replicate resource bag to iRODS user zone\n Args:\n user: the requesting user\n res_id: the resource id with its bag to be replicated to iRODS user zone\n\n Returns:\n None, but exceptions will be raised if there is an issue with iRODS operation\n \"\"\"\n # do on-demand bag creation\n res = get_resource_by_shortkey(res_id)\n res_coll = res.root_path\n istorage = res.get_irods_storage()\n bag_modified = \"false\"\n # needs to check whether res_id collection exists before getting/setting AVU on it to\n # accommodate the case where the very same resource gets deleted by another request when\n # it is getting downloaded\n # TODO: why would we want to do anything at all if the resource does not exist???\n if istorage.exists(res_coll):\n bag_modified = istorage.getAVU(res_coll, 'bag_modified')\n if bag_modified.lower() == \"true\":\n # import here to avoid circular import issue\n from hs_core.tasks import create_bag_by_irods\n create_bag_by_irods(res_id)\n\n # do replication of the resource bag to irods user zone\n if not res.resource_federation_path:\n istorage.set_fed_zone_session()\n src_file = res.bag_path\n # TODO: allow setting destination path\n tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(\n userzone=settings.HS_USER_IRODS_ZONE, username=user.username, resid=res_id)\n fsize = istorage.size(src_file)\n validate_user_quota(user, fsize)\n istorage.copyFiles(src_file, tgt_file)\n else:\n raise ValidationError(\"Resource {} does not exist in iRODS\".format(res.short_id))\n\n\ndef copy_resource_files_and_AVUs(src_res_id, dest_res_id):\n \"\"\"\n Copy resource files and AVUs from source resource to target resource including both\n on iRODS storage and on Django database\n :param src_res_id: source resource uuid\n :param dest_res_id: target resource uuid\n :return:\n \"\"\"\n avu_list = ['bag_modified', 'metadata_dirty', 'isPublic', 'resourceType']\n src_res = get_resource_by_shortkey(src_res_id)\n tgt_res = get_resource_by_shortkey(dest_res_id)\n\n # This makes the assumption that the destination is in the same exact zone.\n # Also, bags and similar attached files are not copied.\n istorage = src_res.get_irods_storage()\n\n # This makes an exact copy of all physical files.\n src_files = os.path.join(src_res.root_path, 'data')\n # This has to be one segment short of the source because it is a target directory.\n dest_files = tgt_res.root_path\n istorage.copyFiles(src_files, dest_files)\n\n src_coll = src_res.root_path\n tgt_coll = tgt_res.root_path\n for avu_name in avu_list:\n value = istorage.getAVU(src_coll, avu_name)\n\n # make formerly public things private\n if avu_name == 'isPublic':\n istorage.setAVU(tgt_coll, avu_name, 'false')\n\n # bag_modified AVU needs to be set to true for copied resource\n elif avu_name == 'bag_modified':\n istorage.setAVU(tgt_coll, avu_name, 'true')\n\n # everything else gets copied literally\n else:\n istorage.setAVU(tgt_coll, avu_name, value)\n\n # link copied resource files to Django resource model\n files = src_res.files.all()\n\n # if resource files are part of logical files, then logical files also need copying\n src_logical_files = list(set([f.logical_file for f in files if f.has_logical_file]))\n map_logical_files = {}\n for src_logical_file in src_logical_files:\n map_logical_files[src_logical_file] = src_logical_file.get_copy()\n\n for n, f in enumerate(files):\n folder, base = os.path.split(f.short_path) # strips object information.\n new_resource_file = ResourceFile.create(tgt_res, base, folder=folder)\n\n # if the original file is part of a logical file, then\n # add the corresponding new resource file to the copy of that logical file\n if f.has_logical_file:\n tgt_logical_file = map_logical_files[f.logical_file]\n tgt_logical_file.add_resource_file(new_resource_file)\n\n if src_res.resource_type.lower() == \"collectionresource\":\n # clone contained_res list of original collection and add to new collection\n # note that new collection resource will not contain \"deleted resources\"\n tgt_res.resources = src_res.resources.all()\n\n\ndef copy_and_create_metadata(src_res, dest_res):\n \"\"\"\n Copy metadata from source resource to target resource except identifier, publisher, and date\n which need to be created for the target resource as appropriate. This method is used for\n resource copying and versioning.\n :param src_res: source resource\n :param dest_res: target resource\n :return:\n \"\"\"\n # copy metadata from source resource to target resource except three elements\n exclude_elements = ['identifier', 'publisher', 'date']\n dest_res.metadata.copy_all_elements_from(src_res.metadata, exclude_elements)\n\n # create Identifier element that is specific to the new resource\n dest_res.metadata.create_element('identifier', name='hydroShareIdentifier',\n url='{0}/resource/{1}'.format(current_site_url(),\n dest_res.short_id))\n\n # create date element that is specific to the new resource\n dest_res.metadata.create_element('date', type='created', start_date=dest_res.created)\n dest_res.metadata.create_element('date', type='modified', start_date=dest_res.updated)\n\n # copy date element to the new resource if exists\n src_res_valid_date_filter = src_res.metadata.dates.all().filter(type='valid')\n if src_res_valid_date_filter:\n res_valid_date = src_res_valid_date_filter[0]\n dest_res.metadata.create_element('date', type='valid', start_date=res_valid_date.start_date,\n end_date=res_valid_date.end_date)\n\n src_res_avail_date_filter = src_res.metadata.dates.all().filter(type='available')\n if src_res_avail_date_filter:\n res_avail_date = src_res_avail_date_filter[0]\n dest_res.metadata.create_element('date', type='available',\n start_date=res_avail_date.start_date,\n end_date=res_avail_date.end_date)\n # create the key/value metadata\n dest_res.extra_metadata = copy.deepcopy(src_res.extra_metadata)\n dest_res.save()\n\n\n# TODO: should be BaseResource.mark_as_modified.\ndef resource_modified(resource, by_user=None, overwrite_bag=True):\n \"\"\"\n Set an AVU flag that forces the bag to be recreated before fetch.\n\n This indicates that some content of the bag has been edited.\n\n \"\"\"\n\n resource.last_changed_by = by_user\n\n resource.updated = now().isoformat()\n # seems this is the best place to sync resource title with metadata title\n resource.title = resource.metadata.title.value\n resource.save()\n if resource.metadata.dates.all().filter(type='modified'):\n res_modified_date = resource.metadata.dates.all().filter(type='modified')[0]\n resource.metadata.update_element('date', res_modified_date.id)\n\n if overwrite_bag:\n create_bag_files(resource)\n\n # set bag_modified-true AVU pair for the modified resource in iRODS to indicate\n # the resource is modified for on-demand bagging.\n set_dirty_bag_flag(resource)\n\n\n# TODO: should be part of BaseResource\ndef set_dirty_bag_flag(resource):\n \"\"\"\n Set bag_modified=true AVU pair for the modified resource in iRODS\n to indicate that the resource is modified for on-demand bagging.\n\n set metadata_dirty (AVU) to 'true' to indicate that metadata has been modified for the\n resource so that xml metadata files need to be generated on-demand\n\n This is done so that the bag creation can be \"lazy\", in the sense that the\n bag is recreated only after multiple changes to the bag files, rather than\n after each change. It is created when someone attempts to download it.\n \"\"\"\n res_coll = resource.root_path\n\n istorage = resource.get_irods_storage()\n res_coll = resource.root_path\n istorage.setAVU(res_coll, \"bag_modified\", \"true\")\n istorage.setAVU(res_coll, \"metadata_dirty\", \"true\")\n\n\ndef _validate_email(email):\n try:\n validate_email(email)\n return True\n except ValidationError:\n return False\n\n\ndef get_profile(user):\n return user.userprofile\n\n\ndef current_site_url():\n \"\"\"Returns fully qualified URL (no trailing slash) for the current site.\"\"\"\n from django.contrib.sites.models import Site\n current_site = Site.objects.get_current()\n protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')\n port = getattr(settings, 'MY_SITE_PORT', '')\n url = '%s://%s' % (protocol, current_site.domain)\n if port:\n url += ':%s' % port\n return url\n\n\ndef get_file_mime_type(file_name):\n # TODO: looks like the mimetypes module can't find all mime types\n # We may need to user the python magic module instead\n file_name = u\"{}\".format(file_name)\n file_format_type = mimetypes.guess_type(file_name)[0]\n if not file_format_type:\n # TODO: this is probably not the right way to get the mime type\n file_format_type = 'application/%s' % os.path.splitext(file_name)[1][1:]\n\n return file_format_type\n\n\ndef check_file_dict_for_error(file_validation_dict):\n if 'are_files_valid' in file_validation_dict:\n if not file_validation_dict['are_files_valid']:\n error_message = file_validation_dict.get('message',\n \"Uploaded file(s) failed validation.\")\n raise ResourceFileValidationException(error_message)\n\n\ndef raise_file_size_exception():\n from .resource import FILE_SIZE_LIMIT_FOR_DISPLAY\n error_msg = 'The resource file is larger than the supported size limit: %s.' \\\n % FILE_SIZE_LIMIT_FOR_DISPLAY\n raise ResourceFileSizeException(error_msg)\n\n\ndef validate_resource_file_size(resource_files):\n from .resource import check_resource_files\n valid, size = check_resource_files(resource_files)\n if not valid:\n raise_file_size_exception()\n # if no exception, return the total size of all files\n return size\n\n\ndef validate_resource_file_type(resource_cls, files):\n supported_file_types = resource_cls.get_supported_upload_file_types()\n # see if file type checking is needed\n if '.*' in supported_file_types:\n # all file types are supported\n return\n\n supported_file_types = [x.lower() for x in supported_file_types]\n for f in files:\n file_ext = os.path.splitext(f.name)[1]\n if file_ext.lower() not in supported_file_types:\n err_msg = \"{file_name} is not a supported file type for {res_type} resource\"\n err_msg = err_msg.format(file_name=f.name, res_type=resource_cls)\n raise ResourceFileValidationException(err_msg)\n\n\ndef validate_resource_file_count(resource_cls, files, resource=None):\n if len(files) > 0:\n if len(resource_cls.get_supported_upload_file_types()) == 0:\n err_msg = \"Content files are not allowed in {res_type} resource\"\n err_msg = err_msg.format(res_type=resource_cls)\n raise ResourceFileValidationException(err_msg)\n\n err_msg = \"Multiple content files are not supported in {res_type} resource\"\n err_msg = err_msg.format(res_type=resource_cls)\n if len(files) > 1:\n if not resource_cls.allow_multiple_file_upload():\n raise ResourceFileValidationException(err_msg)\n\n if resource is not None and resource.files.all().count() > 0:\n if not resource_cls.can_have_multiple_files():\n raise ResourceFileValidationException(err_msg)\n\n\ndef convert_file_size_to_unit(size, unit):\n \"\"\"\n Convert file size to unit for quota comparison\n :param size: in byte unit\n :param unit: should be one of the four: 'KB', 'MB', 'GB', or 'TB'\n :return: the size converted to the pass-in unit\n \"\"\"\n unit = unit.lower()\n if unit not in ('kb', 'mb', 'gb', 'tb'):\n raise ValidationError('Pass-in unit for file size conversion must be one of KB, MB, GB, '\n 'or TB')\n factor = 1024.0\n kbsize = size / factor\n if unit == 'kb':\n return kbsize\n mbsize = kbsize / factor\n if unit == 'mb':\n return mbsize\n gbsize = mbsize / factor\n if unit == 'gb':\n return gbsize\n tbsize = gbsize / factor\n if unit == 'tb':\n return tbsize\n\n\ndef validate_user_quota(user, size):\n \"\"\"\n validate to make sure the user is not over quota with the newly added size\n :param user: the user to be validated\n :param size: the newly added file size to add on top of the user's used quota to be validated.\n size input parameter should be in byte unit\n :return: raise exception for the over quota case\n \"\"\"\n if user:\n # validate it is within quota hard limit\n uq = user.quotas.filter(zone='hydroshare').first()\n if uq:\n if not QuotaMessage.objects.exists():\n QuotaMessage.objects.create()\n qmsg = QuotaMessage.objects.first()\n enforce_flag = qmsg.enforce_quota\n if enforce_flag:\n hard_limit = qmsg.hard_limit_percent\n used_size = uq.add_to_used_value(size)\n used_percent = uq.used_percent\n rounded_percent = round(used_percent, 2)\n rounded_used_val = round(used_size, 4)\n if used_percent >= hard_limit or uq.remaining_grace_period == 0:\n msg_template_str = '{}{}\\n\\n'.format(qmsg.enforce_content_prepend,\n qmsg.content)\n msg_str = msg_template_str.format(used=rounded_used_val,\n unit=uq.unit,\n allocated=uq.allocated_value,\n zone=uq.zone,\n percent=rounded_percent)\n raise QuotaException(msg_str)\n\n\ndef resource_pre_create_actions(resource_type, resource_title, page_redirect_url_key,\n files=(), source_names=[], metadata=None,\n requesting_user=None, **kwargs):\n from.resource import check_resource_type\n from hs_core.views.utils import validate_metadata\n\n if __debug__:\n assert(isinstance(source_names, list))\n\n if not resource_title:\n resource_title = 'Untitled resource'\n else:\n resource_title = resource_title.strip()\n if len(resource_title) == 0:\n resource_title = 'Untitled resource'\n\n resource_cls = check_resource_type(resource_type)\n if len(files) > 0:\n size = validate_resource_file_size(files)\n validate_resource_file_count(resource_cls, files)\n validate_resource_file_type(resource_cls, files)\n # validate it is within quota hard limit\n validate_user_quota(requesting_user, size)\n\n if not metadata:\n metadata = []\n else:\n validate_metadata(metadata, resource_type)\n\n page_url_dict = {}\n # this is needed since raster and feature resource types allows to upload a zip file,\n # then replace zip file with exploded files. If the zip file is loaded from hydroshare\n # federation zone, the original zip file encoded in source_names gets deleted\n # in this case and fed_res_path is used to keep the federation path, so that the resource\n # will be stored in the federated zone rather than the hydroshare zone\n fed_res_path = []\n # receivers need to change the values of this dict if file validation fails\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n\n # Send pre-create resource signal - let any other app populate the empty metadata list object\n # also pass title to other apps, and give other apps a chance to populate page_redirect_url\n # if they want to redirect to their own page for resource creation rather than use core\n # resource creation code\n pre_create_resource.send(sender=resource_cls, metadata=metadata, files=files,\n title=resource_title,\n url_key=page_redirect_url_key, page_url_dict=page_url_dict,\n validate_files=file_validation_dict,\n source_names=source_names,\n user=requesting_user, fed_res_path=fed_res_path, **kwargs)\n\n if len(files) > 0:\n check_file_dict_for_error(file_validation_dict)\n\n return page_url_dict, resource_title, metadata, fed_res_path\n\n\ndef resource_post_create_actions(resource, user, metadata, **kwargs):\n # receivers need to change the values of this dict if file validation fails\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n # Send post-create resource signal\n post_create_resource.send(sender=type(resource), resource=resource, user=user,\n metadata=metadata,\n validate_files=file_validation_dict, **kwargs)\n\n check_file_dict_for_error(file_validation_dict)\n\n\ndef prepare_resource_default_metadata(resource, metadata, res_title):\n add_title = True\n for element in metadata:\n if 'title' in element:\n if 'value' in element['title']:\n res_title = element['title']['value']\n add_title = False\n else:\n metadata.remove(element)\n break\n\n if add_title:\n metadata.append({'title': {'value': res_title}})\n\n add_language = True\n for element in metadata:\n if 'language' in element:\n if 'code' in element['language']:\n add_language = False\n else:\n metadata.remove(element)\n break\n\n if add_language:\n metadata.append({'language': {'code': 'eng'}})\n\n add_rights = True\n for element in metadata:\n if 'rights' in element:\n if 'statement' in element['rights'] and 'url' in element['rights']:\n add_rights = False\n else:\n metadata.remove(element)\n break\n\n if add_rights:\n # add the default rights/license element\n statement = 'This resource is shared under the Creative Commons Attribution CC BY.'\n url = 'http://creativecommons.org/licenses/by/4.0/'\n metadata.append({'rights': {'statement': statement, 'url': url}})\n\n metadata.append({'identifier': {'name': 'hydroShareIdentifier',\n 'url': '{0}/resource/{1}'.format(current_site_url(),\n resource.short_id)}})\n\n # remove if there exists the 'type' element as system generates this element\n # remove if there exists 'format' elements - since format elements are system generated based\n # on resource content files\n # remove any 'date' element which is not of type 'valid'. All other date elements are\n # system generated\n for element in list(metadata):\n if 'type' in element or 'format' in element:\n metadata.remove(element)\n if 'date' in element:\n if 'type' in element['date']:\n if element['date']['type'] != 'valid':\n metadata.remove(element)\n\n metadata.append({'type': {'url': '{0}/terms/{1}'.format(current_site_url(),\n resource.__class__.__name__)}})\n\n metadata.append({'date': {'type': 'created', 'start_date': resource.created}})\n metadata.append({'date': {'type': 'modified', 'start_date': resource.updated}})\n\n # only add the resource creator as the creator for metadata if there is not already\n # creator data in the metadata object\n metadata_keys = [element.keys()[0].lower() for element in metadata]\n if 'creator' not in metadata_keys:\n creator_data = get_party_data_from_user(resource.creator)\n metadata.append({'creator': creator_data})\n\n\ndef get_party_data_from_user(user):\n party_data = {}\n user_profile = get_profile(user)\n user_full_name = user.get_full_name()\n if user_full_name:\n party_name = user_full_name\n else:\n party_name = user.username\n\n party_data['name'] = party_name\n party_data['email'] = user.email\n party_data['description'] = '/user/{uid}/'.format(uid=user.pk)\n party_data['phone'] = user_profile.phone_1\n party_data['organization'] = user_profile.organization\n party_data['identifiers'] = user_profile.identifiers\n return party_data\n\n\n# TODO: make this part of resource api. resource --> self.\ndef resource_file_add_pre_process(resource, files, user, extract_metadata=False,\n source_names=[], **kwargs):\n if __debug__:\n assert(isinstance(source_names, list))\n resource_cls = resource.__class__\n if len(files) > 0:\n size = validate_resource_file_size(files)\n validate_user_quota(resource.get_quota_holder(), size)\n validate_resource_file_type(resource_cls, files)\n validate_resource_file_count(resource_cls, files, resource)\n\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n pre_add_files_to_resource.send(sender=resource_cls, files=files, resource=resource, user=user,\n source_names=source_names,\n validate_files=file_validation_dict,\n extract_metadata=extract_metadata, **kwargs)\n\n check_file_dict_for_error(file_validation_dict)\n\n\n# TODO: make this part of resource api. resource --> self.\ndef resource_file_add_process(resource, files, user, extract_metadata=False,\n source_names=[], **kwargs):\n\n from .resource import add_resource_files\n if __debug__:\n assert(isinstance(source_names, list))\n folder = kwargs.pop('folder', None)\n resource_file_objects = add_resource_files(resource.short_id, *files, folder=folder,\n source_names=source_names)\n\n # receivers need to change the values of this dict if file validation fails\n # in case of file validation failure it is assumed the resource type also deleted the file\n file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}\n post_add_files_to_resource.send(sender=resource.__class__, files=files,\n source_names=source_names,\n resource=resource, user=user,\n validate_files=file_validation_dict,\n extract_metadata=extract_metadata,\n res_files=resource_file_objects, **kwargs)\n\n check_file_dict_for_error(file_validation_dict)\n\n resource_modified(resource, user, overwrite_bag=False)\n return resource_file_objects\n\n\n# TODO: move this to BaseResource\ndef create_empty_contents_directory(resource):\n res_contents_dir = resource.file_path\n istorage = resource.get_irods_storage()\n if not istorage.exists(res_contents_dir):\n istorage.session.run(\"imkdir\", None, '-p', res_contents_dir)\n\n\ndef add_file_to_resource(resource, f, folder=None, source_name='',\n move=False):\n \"\"\"\n Add a ResourceFile to a Resource. Adds the 'format' metadata element to the resource.\n :param resource: Resource to which file should be added\n :param f: File-like object to add to a resource\n :param source_name: the logical file name of the resource content file for\n federated iRODS resource or the federated zone name;\n By default, it is empty. A non-empty value indicates\n the file needs to be added into the federated zone, either\n from local disk where f holds the uploaded file from local\n disk, or from the federated zone directly where f is empty\n but source_name has the whole data object\n iRODS path in the federated zone\n :param move: indicate whether the file should be copied or moved from private user\n account to proxy user account in federated zone; A value of False\n indicates copy is needed, a value of True indicates no copy, but\n the file will be moved from private user account to proxy user account.\n The default value is False.\n\n :return: The identifier of the ResourceFile added.\n \"\"\"\n\n # importing here to avoid circular import\n from hs_file_types.models import GenericLogicalFile\n\n if f:\n openfile = File(f) if not isinstance(f, UploadedFile) else f\n ret = ResourceFile.create(resource, openfile, folder=folder, source=None, move=False)\n\n # add format metadata element if necessary\n file_format_type = get_file_mime_type(f.name)\n\n elif source_name:\n try:\n # create from existing iRODS file\n ret = ResourceFile.create(resource, None, folder=folder, source=source_name, move=move)\n except SessionException as ex:\n try:\n ret.delete()\n except Exception:\n pass\n # raise the exception for the calling function to inform the error on the page interface\n raise SessionException(ex.exitcode, ex.stdout, ex.stderr)\n\n # add format metadata element if necessary\n file_format_type = get_file_mime_type(source_name)\n\n else:\n raise ValueError('Invalid input parameter is passed into this add_file_to_resource() '\n 'function')\n\n # TODO: generate this from data in ResourceFile rather than extension\n if file_format_type not in [mime.value for mime in resource.metadata.formats.all()]:\n resource.metadata.create_element('format', value=file_format_type)\n\n # if a file gets added successfully to composite resource, then better to set the generic\n # logical file here\n if resource.resource_type == \"CompositeResource\":\n logical_file = GenericLogicalFile.create()\n ret.logical_file_content_object = logical_file\n ret.save()\n\n return ret\n\n\ndef add_metadata_element_to_xml(root, md_element, md_fields):\n \"\"\"\n helper function to generate xml elements for a given metadata element that belongs to\n 'hsterms' namespace\n\n :param root: the xml document root element to which xml elements for the specified\n metadata element needs to be added\n :param md_element: the metadata element object. The term attribute of the metadata\n element object is used for naming the root xml element for this metadata element.\n If the root xml element needs to be named differently, then this needs to be a tuple\n with first element being the metadata element object and the second being the name\n for the root element.\n Example:\n md_element=self.Creator # the term attribute of the Creator object will be used\n md_element=(self.Creator, 'Author') # 'Author' will be used\n\n :param md_fields: a list of attribute names of the metadata element (if the name to be used\n in generating the xml element name is same as the attribute name then include the\n attribute name as a list item. if xml element name needs to be different from the\n attribute name then the list item must be a tuple with first element of the tuple being\n the attribute name and the second element being what will be used in naming the xml\n element)\n Example:\n [('first_name', 'firstName'), 'phone', 'email']\n # xml sub-elements names: firstName, phone, email\n \"\"\"\n from lxml import etree\n from hs_core.models import CoreMetaData\n\n name_spaces = CoreMetaData.NAMESPACES\n if isinstance(md_element, tuple):\n element_name = md_element[1]\n md_element = md_element[0]\n else:\n element_name = md_element.term\n\n hsterms_newElem = etree.SubElement(root,\n \"{{{ns}}}{new_element}\".format(\n ns=name_spaces['hsterms'],\n new_element=element_name))\n hsterms_newElem_rdf_Desc = etree.SubElement(\n hsterms_newElem, \"{{{ns}}}Description\".format(ns=name_spaces['rdf']))\n for md_field in md_fields:\n if isinstance(md_field, tuple):\n field_name = md_field[0]\n xml_element_name = md_field[1]\n else:\n field_name = md_field\n xml_element_name = md_field\n\n if hasattr(md_element, field_name):\n attr = getattr(md_element, field_name)\n if attr:\n field = etree.SubElement(hsterms_newElem_rdf_Desc,\n \"{{{ns}}}{field}\".format(ns=name_spaces['hsterms'],\n field=xml_element_name))\n field.text = str(attr)\n\n\nclass ZipContents(object):\n \"\"\"\n Extract the contents of a zip file one file at a time\n using a generator.\n \"\"\"\n def __init__(self, zip_file):\n self.zip_file = zip_file\n\n def black_list_path(self, file_path):\n return file_path.startswith('__MACOSX/')\n\n def black_list_name(self, file_name):\n return file_name == '.DS_Store'\n\n def get_files(self):\n temp_dir = tempfile.mkdtemp()\n try:\n file_path = None\n for name_path in self.zip_file.namelist():\n if not self.black_list_path(name_path):\n name = os.path.basename(name_path)\n if name != '':\n if not self.black_list_name(name):\n self.zip_file.extract(name_path, temp_dir)\n file_path = os.path.join(temp_dir, name_path)\n logger.debug(\"Opening {0} as File with name {1}\".format(file_path,\n name_path))\n f = File(file=open(file_path, 'rb'),\n name=name_path)\n f.size = os.stat(file_path).st_size\n yield f\n finally:\n shutil.rmtree(temp_dir)\n\n\ndef get_file_storage():\n return IrodsStorage() if getattr(settings, 'USE_IRODS', False) else DefaultStorage()\n\n\ndef resolve_request(request):\n if request.POST:\n return request.POST\n\n if request.data:\n return request.data\n\n return {}\n",
"path": "hs_core/hydroshare/utils.py"
}
] | diff --git a/hs_core/hydroshare/utils.py b/hs_core/hydroshare/utils.py
index 4f9f4b4c3f..dbd9f521d1 100755
--- a/hs_core/hydroshare/utils.py
+++ b/hs_core/hydroshare/utils.py
@@ -860,6 +860,7 @@ def get_party_data_from_user(user):
party_data['description'] = '/user/{uid}/'.format(uid=user.pk)
party_data['phone'] = user_profile.phone_1
party_data['organization'] = user_profile.organization
+ party_data['identifiers'] = user_profile.identifiers
return party_data
|
DataDog__dd-agent-1776 | Couchbase service check do not send tags on OK status
Missing a `tags=service_check_tags` on line:
https://github.com/DataDog/dd-agent/blob/master/checks.d/couchbase.py#L104
Pretty small fix.
| [
{
"content": "# stdlib\nimport re\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\nfrom util import headers\n\n# Constants\nCOUCHBASE_STATS_PATH = '/pools/default'\nDEFAULT_TIMEOUT = 10\n\n\nclass Couchbase(AgentCheck):\n \"\"\"Extracts stats from Couchbase via its REST API\n http://docs.couchbase.com/couchbase-manual-2.0/#using-the-rest-api\n \"\"\"\n SERVICE_CHECK_NAME = 'couchbase.can_connect'\n\n def _create_metrics(self, data, tags=None):\n storage_totals = data['stats']['storageTotals']\n for key, storage_type in storage_totals.items():\n for metric_name, val in storage_type.items():\n if val is not None:\n metric_name = '.'.join(['couchbase', key, self.camel_case_to_joined_lower(metric_name)])\n self.gauge(metric_name, val, tags=tags)\n\n for bucket_name, bucket_stats in data['buckets'].items():\n for metric_name, val in bucket_stats.items():\n if val is not None:\n metric_name = '.'.join(['couchbase', 'by_bucket', self.camel_case_to_joined_lower(metric_name)])\n metric_tags = list(tags)\n metric_tags.append('bucket:%s' % bucket_name)\n self.gauge(metric_name, val[0], tags=metric_tags, device_name=bucket_name)\n\n for node_name, node_stats in data['nodes'].items():\n for metric_name, val in node_stats['interestingStats'].items():\n if val is not None:\n metric_name = '.'.join(['couchbase', 'by_node', self.camel_case_to_joined_lower(metric_name)])\n metric_tags = list(tags)\n metric_tags.append('node:%s' % node_name)\n self.gauge(metric_name, val, tags=metric_tags, device_name=node_name)\n\n\n def _get_stats(self, url, instance):\n \"\"\" Hit a given URL and return the parsed json. \"\"\"\n self.log.debug('Fetching Couchbase stats at url: %s' % url)\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT))\n\n auth = None\n if 'user' in instance and 'password' in instance:\n auth = (instance['user'], instance['password'])\n\n r = requests.get(url, auth=auth, headers=headers(self.agentConfig),\n timeout=timeout)\n r.raise_for_status()\n return r.json()\n\n def check(self, instance):\n server = instance.get('server', None)\n if server is None:\n raise Exception(\"The server must be specified\")\n tags = instance.get('tags', [])\n # Clean up tags in case there was a None entry in the instance\n # e.g. if the yaml contains tags: but no actual tags\n if tags is None:\n tags = []\n else:\n tags = list(set(tags))\n tags.append('instance:%s' % server)\n data = self.get_data(server, instance)\n self._create_metrics(data, tags=list(set(tags)))\n\n def get_data(self, server, instance):\n # The dictionary to be returned.\n couchbase = {\n 'stats': None,\n 'buckets': {},\n 'nodes': {}\n }\n\n # build couchbase stats entry point\n url = '%s%s' % (server, COUCHBASE_STATS_PATH)\n\n # Fetch initial stats and capture a service check based on response.\n service_check_tags = ['instance:%s' % server]\n try:\n overall_stats = self._get_stats(url, instance)\n # No overall stats? bail out now\n if overall_stats is None:\n raise Exception(\"No data returned from couchbase endpoint: %s\" % url)\n except requests.exceptions.HTTPError as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=str(e.message))\n raise\n except Exception as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=str(e))\n raise\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK)\n\n couchbase['stats'] = overall_stats\n\n nodes = overall_stats['nodes']\n\n # Next, get all the nodes\n if nodes is not None:\n for node in nodes:\n couchbase['nodes'][node['hostname']] = node\n\n # Next, get all buckets .\n endpoint = overall_stats['buckets']['uri']\n\n url = '%s%s' % (server, endpoint)\n buckets = self._get_stats(url, instance)\n\n if buckets is not None:\n for bucket in buckets:\n bucket_name = bucket['name']\n\n # Fetch URI for the stats bucket\n endpoint = bucket['stats']['uri']\n url = '%s%s' % (server, endpoint)\n\n try:\n bucket_stats = self._get_stats(url, instance)\n except requests.exceptions.HTTPError:\n url_backup = '%s/pools/nodes/buckets/%s/stats' % (server, bucket_name)\n bucket_stats = self._get_stats(url_backup, instance)\n\n bucket_samples = bucket_stats['op']['samples']\n if bucket_samples is not None:\n couchbase['buckets'][bucket['name']] = bucket_samples\n\n return couchbase\n\n # Takes a camelCased variable and returns a joined_lower equivalent.\n # Returns input if non-camelCase variable is detected.\n def camel_case_to_joined_lower(self, variable):\n # replace non-word with _\n converted_variable = re.sub('\\W+', '_', variable)\n\n # insert _ in front of capital letters and lowercase the string\n converted_variable = re.sub('([A-Z])', '_\\g<1>', converted_variable).lower()\n\n # remove duplicate _\n converted_variable = re.sub('_+', '_', converted_variable)\n\n # handle special case of starting/ending underscores\n converted_variable = re.sub('^_|_$', '', converted_variable)\n\n return converted_variable\n",
"path": "checks.d/couchbase.py"
}
] | [
{
"content": "# stdlib\nimport re\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\nfrom util import headers\n\n# Constants\nCOUCHBASE_STATS_PATH = '/pools/default'\nDEFAULT_TIMEOUT = 10\n\n\nclass Couchbase(AgentCheck):\n \"\"\"Extracts stats from Couchbase via its REST API\n http://docs.couchbase.com/couchbase-manual-2.0/#using-the-rest-api\n \"\"\"\n SERVICE_CHECK_NAME = 'couchbase.can_connect'\n\n def _create_metrics(self, data, tags=None):\n storage_totals = data['stats']['storageTotals']\n for key, storage_type in storage_totals.items():\n for metric_name, val in storage_type.items():\n if val is not None:\n metric_name = '.'.join(['couchbase', key, self.camel_case_to_joined_lower(metric_name)])\n self.gauge(metric_name, val, tags=tags)\n\n for bucket_name, bucket_stats in data['buckets'].items():\n for metric_name, val in bucket_stats.items():\n if val is not None:\n metric_name = '.'.join(['couchbase', 'by_bucket', self.camel_case_to_joined_lower(metric_name)])\n metric_tags = list(tags)\n metric_tags.append('bucket:%s' % bucket_name)\n self.gauge(metric_name, val[0], tags=metric_tags, device_name=bucket_name)\n\n for node_name, node_stats in data['nodes'].items():\n for metric_name, val in node_stats['interestingStats'].items():\n if val is not None:\n metric_name = '.'.join(['couchbase', 'by_node', self.camel_case_to_joined_lower(metric_name)])\n metric_tags = list(tags)\n metric_tags.append('node:%s' % node_name)\n self.gauge(metric_name, val, tags=metric_tags, device_name=node_name)\n\n\n def _get_stats(self, url, instance):\n \"\"\" Hit a given URL and return the parsed json. \"\"\"\n self.log.debug('Fetching Couchbase stats at url: %s' % url)\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT))\n\n auth = None\n if 'user' in instance and 'password' in instance:\n auth = (instance['user'], instance['password'])\n\n r = requests.get(url, auth=auth, headers=headers(self.agentConfig),\n timeout=timeout)\n r.raise_for_status()\n return r.json()\n\n def check(self, instance):\n server = instance.get('server', None)\n if server is None:\n raise Exception(\"The server must be specified\")\n tags = instance.get('tags', [])\n # Clean up tags in case there was a None entry in the instance\n # e.g. if the yaml contains tags: but no actual tags\n if tags is None:\n tags = []\n else:\n tags = list(set(tags))\n tags.append('instance:%s' % server)\n data = self.get_data(server, instance)\n self._create_metrics(data, tags=list(set(tags)))\n\n def get_data(self, server, instance):\n # The dictionary to be returned.\n couchbase = {\n 'stats': None,\n 'buckets': {},\n 'nodes': {}\n }\n\n # build couchbase stats entry point\n url = '%s%s' % (server, COUCHBASE_STATS_PATH)\n\n # Fetch initial stats and capture a service check based on response.\n service_check_tags = ['instance:%s' % server]\n try:\n overall_stats = self._get_stats(url, instance)\n # No overall stats? bail out now\n if overall_stats is None:\n raise Exception(\"No data returned from couchbase endpoint: %s\" % url)\n except requests.exceptions.HTTPError as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=str(e.message))\n raise\n except Exception as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=str(e))\n raise\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags=service_check_tags)\n\n couchbase['stats'] = overall_stats\n\n nodes = overall_stats['nodes']\n\n # Next, get all the nodes\n if nodes is not None:\n for node in nodes:\n couchbase['nodes'][node['hostname']] = node\n\n # Next, get all buckets .\n endpoint = overall_stats['buckets']['uri']\n\n url = '%s%s' % (server, endpoint)\n buckets = self._get_stats(url, instance)\n\n if buckets is not None:\n for bucket in buckets:\n bucket_name = bucket['name']\n\n # Fetch URI for the stats bucket\n endpoint = bucket['stats']['uri']\n url = '%s%s' % (server, endpoint)\n\n try:\n bucket_stats = self._get_stats(url, instance)\n except requests.exceptions.HTTPError:\n url_backup = '%s/pools/nodes/buckets/%s/stats' % (server, bucket_name)\n bucket_stats = self._get_stats(url_backup, instance)\n\n bucket_samples = bucket_stats['op']['samples']\n if bucket_samples is not None:\n couchbase['buckets'][bucket['name']] = bucket_samples\n\n return couchbase\n\n # Takes a camelCased variable and returns a joined_lower equivalent.\n # Returns input if non-camelCase variable is detected.\n def camel_case_to_joined_lower(self, variable):\n # replace non-word with _\n converted_variable = re.sub('\\W+', '_', variable)\n\n # insert _ in front of capital letters and lowercase the string\n converted_variable = re.sub('([A-Z])', '_\\g<1>', converted_variable).lower()\n\n # remove duplicate _\n converted_variable = re.sub('_+', '_', converted_variable)\n\n # handle special case of starting/ending underscores\n converted_variable = re.sub('^_|_$', '', converted_variable)\n\n return converted_variable\n",
"path": "checks.d/couchbase.py"
}
] | diff --git a/checks.d/couchbase.py b/checks.d/couchbase.py
index 8ede71b3f6..9f8f2e2f88 100644
--- a/checks.d/couchbase.py
+++ b/checks.d/couchbase.py
@@ -101,7 +101,8 @@ def get_data(self, server, instance):
tags=service_check_tags, message=str(e))
raise
else:
- self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK)
+ self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
+ tags=service_check_tags)
couchbase['stats'] = overall_stats
|
mdn__kuma-6501 | E - Noindex on /**/docs/MDN/Jobs ?
Similar to https://github.com/mdn/kuma/issues/6422
but this time, instead of `/**/docs/MDN/Doc_status` it's `/**/docs/MDN/Jobs`.
There are only 9 of them in my month old clone of the production DB.
```
mysql> select locale, slug from wiki_document where slug like 'MDN/Jobs%';
+--------+-------------------------------------------------------+
| locale | slug |
+--------+-------------------------------------------------------+
| en-US | MDN/Jobs |
| ja | MDN/Jobs |
| zh-CN | MDN/Jobs |
| en-US | MDN/Jobs/Firefox_developer_content_manager_contractor |
| en-US | MDN/Jobs/MDN_Developer_Researcher |
| ja | MDN/Jobs/MDN_Developer_Researcher |
| zh-CN | MDN/Jobs/MDN_Developer_Researcher |
| en-US | MDN/Jobs/MDN_technical_writer_contractor |
| ja | MDN/Jobs/MDN_technical_writer_contractor |
+--------+-------------------------------------------------------+
9 rows in set (0.00 sec)
```
So can I add it to [`NOINDEX_SLUG_PREFIXES`](https://github.com/mdn/kuma/blob/3df115a9ba8d3130094a9007f4e9d668a6f274fe/kuma/wiki/constants.py#L668)?
| [
{
"content": "import re\nfrom urllib.parse import urlparse, urlunparse\n\nimport bleach\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nALLOWED_TAGS = bleach.ALLOWED_TAGS + [\n \"div\",\n \"span\",\n \"p\",\n \"br\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"pre\",\n \"code\",\n \"cite\",\n \"dl\",\n \"dt\",\n \"dd\",\n \"small\",\n \"sub\",\n \"sup\",\n \"u\",\n \"strike\",\n \"samp\",\n \"abbr\",\n \"ul\",\n \"ol\",\n \"li\",\n \"nobr\",\n \"dfn\",\n \"caption\",\n \"var\",\n \"s\",\n \"i\",\n \"img\",\n \"hr\",\n \"input\",\n \"label\",\n \"select\",\n \"option\",\n \"textarea\",\n # Note: <iframe> is allowed, but src=\"\" is filtered after bleach\n \"iframe\",\n \"table\",\n \"tbody\",\n \"thead\",\n \"tfoot\",\n \"tr\",\n \"th\",\n \"td\",\n \"colgroup\",\n \"col\",\n \"section\",\n \"header\",\n \"footer\",\n \"nav\",\n \"article\",\n \"aside\",\n \"figure\",\n \"figcaption\",\n \"dialog\",\n \"hgroup\",\n \"mark\",\n \"time\",\n \"meter\",\n \"command\",\n \"output\",\n \"progress\",\n \"audio\",\n \"video\",\n \"details\",\n \"summary\",\n \"datagrid\",\n \"datalist\",\n \"table\",\n \"address\",\n \"font\",\n \"bdi\",\n \"bdo\",\n \"del\",\n \"ins\",\n \"kbd\",\n \"samp\",\n \"var\",\n \"ruby\",\n \"rp\",\n \"rt\",\n \"q\",\n # MathML\n \"math\",\n \"maction\",\n \"menclose\",\n \"merror\",\n \"mfenced\",\n \"mfrac\",\n \"mglyph\",\n \"mi\",\n \"mlabeledtr\",\n \"mmultiscripts\",\n \"mn\",\n \"mo\",\n \"mover\",\n \"mpadded\",\n \"mphantom\",\n \"mroot\",\n \"mrow\",\n \"ms\",\n \"mspace\",\n \"msqrt\",\n \"mstyle\",\n \"msub\",\n \"msup\",\n \"msubsup\",\n \"mtable\",\n \"mtd\",\n \"mtext\",\n \"mtr\",\n \"munder\",\n \"munderover\",\n \"none\",\n \"mprescripts\",\n \"semantics\",\n \"annotation\",\n \"annotation-xml\",\n]\nALLOWED_ATTRIBUTES = bleach.ALLOWED_ATTRIBUTES\n\nALLOWED_ATTRIBUTES[\"*\"] = [\"lang\"]\n# Note: <iframe> is allowed, but src=\"\" is pre-filtered before bleach\nALLOWED_ATTRIBUTES[\"iframe\"] = [\n \"id\",\n \"src\",\n \"sandbox\",\n \"seamless\",\n \"frameborder\",\n \"width\",\n \"height\",\n \"class\",\n \"allow\",\n \"title\",\n]\nALLOWED_ATTRIBUTES[\"p\"] = [\"style\", \"class\", \"id\", \"align\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"span\"] = [\"style\", \"class\", \"id\", \"title\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"abbr\"] = [\"style\", \"class\", \"id\", \"title\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"img\"] = [\n \"src\",\n \"id\",\n \"align\",\n \"alt\",\n \"class\",\n \"is\",\n \"title\",\n \"style\",\n \"lang\",\n \"dir\",\n \"width\",\n \"height\",\n]\nALLOWED_ATTRIBUTES[\"a\"] = [\n \"style\",\n \"id\",\n \"class\",\n \"href\",\n \"title\",\n \"lang\",\n \"name\",\n \"dir\",\n \"hreflang\",\n \"rel\",\n]\nALLOWED_ATTRIBUTES[\"i\"] = [\"class\"]\nALLOWED_ATTRIBUTES[\"td\"] = [\"style\", \"id\", \"class\", \"colspan\", \"rowspan\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"th\"] = [\n \"style\",\n \"id\",\n \"class\",\n \"colspan\",\n \"rowspan\",\n \"scope\",\n \"lang\",\n \"dir\",\n]\nALLOWED_ATTRIBUTES[\"video\"] = [\"style\", \"id\", \"class\", \"lang\", \"src\", \"controls\", \"dir\"]\nALLOWED_ATTRIBUTES[\"font\"] = [\"color\", \"face\", \"size\", \"dir\"]\nALLOWED_ATTRIBUTES[\"select\"] = [\"name\", \"dir\"]\nALLOWED_ATTRIBUTES[\"option\"] = [\"value\", \"selected\", \"dir\"]\nALLOWED_ATTRIBUTES[\"ol\"] = [\"style\", \"class\", \"id\", \"lang\", \"start\", \"dir\"]\nALLOWED_ATTRIBUTES.update(\n dict(\n (x, [\"style\", \"class\", \"id\", \"name\", \"lang\", \"dir\"])\n for x in (\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\")\n )\n)\nALLOWED_ATTRIBUTES.update(\n dict(\n (x, [\"style\", \"class\", \"id\", \"lang\", \"dir\", \"title\"])\n for x in (\n \"div\",\n \"pre\",\n \"ul\",\n \"li\",\n \"code\",\n \"dl\",\n \"dt\",\n \"dd\",\n \"section\",\n \"header\",\n \"footer\",\n \"nav\",\n \"article\",\n \"aside\",\n \"figure\",\n \"dialog\",\n \"hgroup\",\n \"mark\",\n \"time\",\n \"meter\",\n \"command\",\n \"output\",\n \"progress\",\n \"audio\",\n \"details\",\n \"datagrid\",\n \"datalist\",\n \"table\",\n \"tr\",\n \"address\",\n \"col\",\n \"s\",\n \"strong\",\n )\n )\n)\nALLOWED_ATTRIBUTES.update(\n dict((x, [\"cite\"]) for x in (\"blockquote\", \"del\", \"ins\", \"q\"))\n)\nALLOWED_ATTRIBUTES[\"li\"] += [\"data-default-state\"]\nALLOWED_ATTRIBUTES[\"time\"] += [\"datetime\"]\nALLOWED_ATTRIBUTES[\"ins\"] = [\"datetime\"]\nALLOWED_ATTRIBUTES[\"del\"] = [\"datetime\"]\nALLOWED_ATTRIBUTES[\"meter\"] += [\"max\", \"min\", \"value\", \"low\", \"high\", \"optimum\", \"form\"]\nALLOWED_ATTRIBUTES[\"details\"] += [\"open\"]\n# MathML\nALLOWED_ATTRIBUTES.update(\n dict((x, [\"encoding\", \"src\"]) for x in (\"annotation\", \"annotation-xml\"))\n)\nALLOWED_ATTRIBUTES.update(\n dict(\n (x, [\"href\", \"mathbackground\", \"mathcolor\", \"id\", \"class\", \"style\"])\n for x in (\n \"math\",\n \"maction\",\n \"menclose\",\n \"merror\",\n \"mfenced\",\n \"mfrac\",\n \"mglyph\",\n \"mi\",\n \"mlabeledtr\",\n \"mmultiscripts\",\n \"mn\",\n \"mo\",\n \"mover\",\n \"mpadded\",\n \"mphantom\",\n \"mroot\",\n \"mrow\",\n \"ms\",\n \"mspace\",\n \"msqrt\",\n \"mstyle\",\n \"msub\",\n \"msup\",\n \"msubsup\",\n \"mtable\",\n \"mtd\",\n \"mtext\",\n \"mtr\",\n \"munder\",\n \"munderover\",\n \"none\",\n \"mprescripts\",\n )\n )\n)\nALLOWED_ATTRIBUTES[\"math\"] += [\n \"display\",\n \"dir\",\n \"selection\",\n \"notation\",\n \"close\",\n \"open\",\n \"separators\",\n \"bevelled\",\n \"denomalign\",\n \"linethickness\",\n \"numalign\",\n \"largeop\",\n \"maxsize\",\n \"minsize\",\n \"movablelimits\",\n \"rspace\",\n \"separator\",\n \"stretchy\",\n \"symmetric\",\n \"depth\",\n \"lquote\",\n \"rquote\",\n \"align\",\n \"columnlines\",\n \"frame\",\n \"rowalign\",\n \"rowspacing\",\n \"rowspan\",\n \"columnspan\",\n \"accent\",\n \"accentunder\",\n \"dir\",\n \"mathsize\",\n \"mathvariant\",\n \"subscriptshift\",\n \"supscriptshift\",\n \"scriptlevel\",\n \"displaystyle\",\n \"scriptsizemultiplier\",\n \"scriptminsize\",\n \"altimg\",\n \"altimg-width\",\n \"altimg-height\",\n \"altimg-valign\",\n \"alttext\",\n]\nALLOWED_ATTRIBUTES[\"maction\"] += [\"actiontype\", \"selection\"]\nALLOWED_ATTRIBUTES[\"menclose\"] += [\"notation\"]\nALLOWED_ATTRIBUTES[\"mfenced\"] += [\"close\", \"open\", \"separators\"]\nALLOWED_ATTRIBUTES[\"mfrac\"] += [\"bevelled\", \"denomalign\", \"linethickness\", \"numalign\"]\nALLOWED_ATTRIBUTES[\"mi\"] += [\"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mn\"] += [\"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mmultiscripts\"] += [\"subscriptshift\", \"superscriptshift\"]\nALLOWED_ATTRIBUTES[\"mo\"] += [\n \"largeop\",\n \"lspace\",\n \"maxsize\",\n \"minsize\",\n \"movablelimits\",\n \"rspace\",\n \"separator\",\n \"stretchy\",\n \"symmetric\",\n \"accent\",\n \"dir\",\n \"mathsize\",\n \"mathvariant\",\n]\nALLOWED_ATTRIBUTES[\"mover\"] += [\"accent\"]\nALLOWED_ATTRIBUTES[\"mpadded\"] += [\"lspace\", \"voffset\", \"depth\"]\nALLOWED_ATTRIBUTES[\"mrow\"] += [\"dir\"]\nALLOWED_ATTRIBUTES[\"ms\"] += [\"lquote\", \"rquote\", \"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mspace\"] += [\"depth\", \"height\", \"width\"]\nALLOWED_ATTRIBUTES[\"mstyle\"] += [\n \"display\",\n \"dir\",\n \"selection\",\n \"notation\",\n \"close\",\n \"open\",\n \"separators\",\n \"bevelled\",\n \"denomalign\",\n \"linethickness\",\n \"numalign\",\n \"largeop\",\n \"maxsize\",\n \"minsize\",\n \"movablelimits\",\n \"rspace\",\n \"separator\",\n \"stretchy\",\n \"symmetric\",\n \"depth\",\n \"lquote\",\n \"rquote\",\n \"align\",\n \"columnlines\",\n \"frame\",\n \"rowalign\",\n \"rowspacing\",\n \"rowspan\",\n \"columnspan\",\n \"accent\",\n \"accentunder\",\n \"dir\",\n \"mathsize\",\n \"mathvariant\",\n \"subscriptshift\",\n \"supscriptshift\",\n \"scriptlevel\",\n \"displaystyle\",\n \"scriptsizemultiplier\",\n \"scriptminsize\",\n]\nALLOWED_ATTRIBUTES[\"msub\"] += [\"subscriptshift\"]\nALLOWED_ATTRIBUTES[\"msubsup\"] += [\"subscriptshift\", \"superscriptshift\"]\nALLOWED_ATTRIBUTES[\"msup\"] += [\"superscriptshift\"]\nALLOWED_ATTRIBUTES[\"mtable\"] += [\n \"align\",\n \"columnalign\",\n \"columnlines\",\n \"frame\",\n \"rowalign\",\n \"rowspacing\",\n \"rowlines\",\n]\nALLOWED_ATTRIBUTES[\"mtd\"] += [\"columnalign\", \"columnspan\", \"rowalign\", \"rowspan\"]\nALLOWED_ATTRIBUTES[\"mtext\"] += [\"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mtr\"] += [\"columnalign\", \"rowalign\"]\nALLOWED_ATTRIBUTES[\"munder\"] += [\"accentunder\"]\nALLOWED_ATTRIBUTES[\"mundermover\"] = [\"accent\", \"accentunder\"]\n# CSS\nALLOWED_STYLES = [\n \"border\",\n \"border-top\",\n \"border-right\",\n \"border-bottom\",\n \"border-left\",\n \"float\",\n \"overflow\",\n \"min-height\",\n \"vertical-align\",\n \"white-space\",\n \"color\",\n \"border-radius\",\n \"-webkit-border-radius\",\n \"-moz-border-radius, -o-border-radius\",\n \"margin\",\n \"margin-left\",\n \"margin-top\",\n \"margin-bottom\",\n \"margin-right\",\n \"padding\",\n \"padding-left\",\n \"padding-top\",\n \"padding-bottom\",\n \"padding-right\",\n \"position\",\n \"top\",\n \"height\",\n \"left\",\n \"right\",\n \"background\", # TODO: Maybe not this one, it can load URLs\n \"background-color\",\n \"font\",\n \"font-size\",\n \"font-weight\",\n \"font-family\",\n \"font-variant\",\n \"text-align\",\n \"text-transform\",\n \"-moz-column-width\",\n \"-webkit-columns\",\n \"columns\",\n \"width\",\n \"list-style-type\",\n \"line-height\",\n # CSS properties needed for live examples (pending proper solution):\n \"backface-visibility\",\n \"-moz-backface-visibility\",\n \"-webkit-backface-visibility\",\n \"-o-backface-visibility\",\n \"perspective\",\n \"-moz-perspective\",\n \"-webkit-perspective\",\n \"-o-perspective\",\n \"perspective-origin\",\n \"-moz-perspective-origin\",\n \"-webkit-perspective-origin\",\n \"-o-perspective-origin\",\n \"transform\",\n \"-moz-transform\",\n \"-webkit-transform\",\n \"-o-transform\",\n \"transform-style\",\n \"-moz-transform-style\",\n \"-webkit-transform-style\",\n \"-o-transform-style\",\n \"columns\",\n \"-moz-columns\",\n \"-webkit-columns\",\n \"column-rule\",\n \"-moz-column-rule\",\n \"-webkit-column-rule\",\n \"column-width\",\n \"-moz-column-width\",\n \"-webkit-column-width\",\n \"image-rendering\",\n \"-ms-interpolation-mode\",\n \"position\",\n \"border-style\",\n \"background-clip\",\n \"border-bottom-right-radius\",\n \"border-bottom-left-radius\",\n \"border-top-right-radius\",\n \"border-top-left-radius\",\n \"border-bottom-style\",\n \"border-left-style\",\n \"border-right-style\",\n \"border-top-style\",\n \"border-bottom-width\",\n \"border-left-width\",\n \"border-right-width\",\n \"border-top-width\",\n \"vertical-align\",\n \"border-collapse\",\n \"border-width\",\n \"border-color\",\n \"border-left\",\n \"border-right\",\n \"border-bottom\",\n \"border-top\",\n \"clip\",\n \"cursor\",\n \"filter\",\n \"float\",\n \"max-width\",\n \"font-style\",\n \"letter-spacing\",\n \"opacity\",\n \"zoom\",\n \"text-overflow\",\n \"text-indent\",\n \"text-rendering\",\n \"text-shadow\",\n \"transition\",\n \"transition\",\n \"transition\",\n \"transition\",\n \"transition-delay\",\n \"-moz-transition-delay\",\n \"-webkit-transition-delay\",\n \"-o-transition-delay\",\n \"transition-duration\",\n \"-moz-transition-duration\",\n \"-webkit-transition-duration\",\n \"-o-transition-duration\",\n \"transition-property\",\n \"-moz-transition-property\",\n \"-webkit-transition-property\",\n \"-o-transition-property\",\n \"transition-timing-function\",\n \"-moz-transition-timing-function\",\n \"-webkit-transition-timing-function\",\n \"-o-transition-timing-function\",\n \"color\",\n \"display\",\n \"position\",\n \"outline-color\",\n \"outline\",\n \"outline-offset\",\n \"box-shadow\",\n \"-moz-box-shadow\",\n \"-webkit-box-shadow\",\n \"-o-box-shadow\",\n \"linear-gradient\",\n \"-moz-linear-gradient\",\n \"-webkit-linear-gradient\",\n \"radial-gradient\",\n \"-moz-radial-gradient\",\n \"-webkit-radial-gradient\",\n \"text-decoration-style\",\n \"-moz-text-decoration-style\",\n \"text-decoration\",\n \"direction\",\n \"white-space\",\n \"unicode-bidi\",\n \"word-wrap\",\n]\nALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"irc\", \"news\", \"ftp\", \"ssh\", \"nntp\"]\n\nDIFF_WRAP_COLUMN = 65\nEXPERIMENT_TITLE_PREFIX = \"Experiment:\"\nLEGACY_MINDTOUCH_NAMESPACES = (\n \"Help\",\n \"Help_talk\",\n \"Project\",\n \"Project_talk\",\n \"Special\",\n \"Talk\",\n \"Template\",\n \"Template_talk\",\n \"User\",\n \"User_talk\",\n)\n\nDOCUMENTS_PER_PAGE = 100\n_ks_urlbits = urlparse(settings.KUMASCRIPT_URL_TEMPLATE)\nKUMASCRIPT_BASE_URL = urlunparse(\n (_ks_urlbits.scheme, _ks_urlbits.netloc, \"\", \"\", \"\", \"\")\n)\n\n# TODO: Put this under the control of Constance / Waffle?\n# Flags used to signify revisions in need of review\nREVIEW_FLAG_TAGS = (\n (\"technical\", _(\"Technical - code samples, APIs, or technologies\")),\n (\"editorial\", _(\"Editorial - prose, grammar, or content\")),\n)\nREVIEW_FLAG_TAGS_DEFAULT = [\"technical\", \"editorial\"]\n\nLOCALIZATION_FLAG_TAGS = (\n (\"inprogress\", _(\"Localization in progress - not completely translated yet.\")),\n)\n\nSLUG_CLEANSING_RE = re.compile(r\"^\\/?(([A-z-]+)?\\/?docs\\/)?\")\n# ?, whitespace, percentage, quote disallowed in slugs altogether\nINVALID_DOC_SLUG_CHARS_RE = re.compile(r\"\"\"[\\s'\"%%\\?\\$]+\"\"\")\nINVALID_REV_SLUG_CHARS_RE = re.compile(r\"\"\"[\\s\\?\\/%%]+\"\"\")\nDOCUMENT_PATH_RE = re.compile(r\"[^\\$]+\")\n\n# how a redirect looks as rendered HTML\nREDIRECT_HTML = \"REDIRECT <a \"\nREDIRECT_CONTENT = 'REDIRECT <a class=\"redirect\" href=\"%(href)s\">%(title)s</a>'\n\nDEKI_FILE_URL = re.compile(r\"@api/deki/files/(?P<file_id>\\d+)/=\")\nKUMA_FILE_URL = re.compile(\n r\"%s%s/files/(?P<file_id>\\d+)/\"\n % (re.escape(settings.PROTOCOL), re.escape(settings.ATTACHMENT_HOST))\n)\n\nSPAM_TRAINING_SWITCH = \"wiki_spam_training\"\nSPAM_SUBMISSION_REVISION_FIELDS = [\n \"title\",\n \"slug\",\n \"summary\",\n \"content\",\n \"comment\",\n \"tags\",\n \"keywords\",\n]\nSPAM_OTHER_HEADERS = ( # Header to send that don't start with HTTP\n \"REMOTE_ADDR\",\n \"REQUEST_URI\",\n \"DOCUMENT_URI\",\n)\n\nCODE_SAMPLE_MACROS = [\n \"LiveSampleURL\",\n \"EmbedDistLiveSample\",\n \"EmbedLiveSample\",\n \"LiveSampleLink\",\n \"FXOSUXLiveSampleEmbed\",\n]\n\nDEV_DOC_REQUEST_FORM = \"https://github.com/mdn/sprints/issues/new?template=issue-template.md&projects=mdn/sprints/2&labels=user-report\"\n\nWIKI_ONLY_DOCUMENT_QUERY_PARAMS = frozenset(\n (\"edit_links\", \"include\", \"macros\", \"nomacros\", \"raw\", \"section\", \"summary\",)\n)\n\n# Any slug that, case *sensitively*, matches this list get the\n# <meta name=\"robots\" content=\"noindex, nofollow\"> HTML header and they get\n# excluded from the sitemap XML files.\nNOINDEX_SLUG_PREFIXES = (\"MDN/Doc_status\",)\n",
"path": "kuma/wiki/constants.py"
}
] | [
{
"content": "import re\nfrom urllib.parse import urlparse, urlunparse\n\nimport bleach\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nALLOWED_TAGS = bleach.ALLOWED_TAGS + [\n \"div\",\n \"span\",\n \"p\",\n \"br\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"pre\",\n \"code\",\n \"cite\",\n \"dl\",\n \"dt\",\n \"dd\",\n \"small\",\n \"sub\",\n \"sup\",\n \"u\",\n \"strike\",\n \"samp\",\n \"abbr\",\n \"ul\",\n \"ol\",\n \"li\",\n \"nobr\",\n \"dfn\",\n \"caption\",\n \"var\",\n \"s\",\n \"i\",\n \"img\",\n \"hr\",\n \"input\",\n \"label\",\n \"select\",\n \"option\",\n \"textarea\",\n # Note: <iframe> is allowed, but src=\"\" is filtered after bleach\n \"iframe\",\n \"table\",\n \"tbody\",\n \"thead\",\n \"tfoot\",\n \"tr\",\n \"th\",\n \"td\",\n \"colgroup\",\n \"col\",\n \"section\",\n \"header\",\n \"footer\",\n \"nav\",\n \"article\",\n \"aside\",\n \"figure\",\n \"figcaption\",\n \"dialog\",\n \"hgroup\",\n \"mark\",\n \"time\",\n \"meter\",\n \"command\",\n \"output\",\n \"progress\",\n \"audio\",\n \"video\",\n \"details\",\n \"summary\",\n \"datagrid\",\n \"datalist\",\n \"table\",\n \"address\",\n \"font\",\n \"bdi\",\n \"bdo\",\n \"del\",\n \"ins\",\n \"kbd\",\n \"samp\",\n \"var\",\n \"ruby\",\n \"rp\",\n \"rt\",\n \"q\",\n # MathML\n \"math\",\n \"maction\",\n \"menclose\",\n \"merror\",\n \"mfenced\",\n \"mfrac\",\n \"mglyph\",\n \"mi\",\n \"mlabeledtr\",\n \"mmultiscripts\",\n \"mn\",\n \"mo\",\n \"mover\",\n \"mpadded\",\n \"mphantom\",\n \"mroot\",\n \"mrow\",\n \"ms\",\n \"mspace\",\n \"msqrt\",\n \"mstyle\",\n \"msub\",\n \"msup\",\n \"msubsup\",\n \"mtable\",\n \"mtd\",\n \"mtext\",\n \"mtr\",\n \"munder\",\n \"munderover\",\n \"none\",\n \"mprescripts\",\n \"semantics\",\n \"annotation\",\n \"annotation-xml\",\n]\nALLOWED_ATTRIBUTES = bleach.ALLOWED_ATTRIBUTES\n\nALLOWED_ATTRIBUTES[\"*\"] = [\"lang\"]\n# Note: <iframe> is allowed, but src=\"\" is pre-filtered before bleach\nALLOWED_ATTRIBUTES[\"iframe\"] = [\n \"id\",\n \"src\",\n \"sandbox\",\n \"seamless\",\n \"frameborder\",\n \"width\",\n \"height\",\n \"class\",\n \"allow\",\n \"title\",\n]\nALLOWED_ATTRIBUTES[\"p\"] = [\"style\", \"class\", \"id\", \"align\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"span\"] = [\"style\", \"class\", \"id\", \"title\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"abbr\"] = [\"style\", \"class\", \"id\", \"title\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"img\"] = [\n \"src\",\n \"id\",\n \"align\",\n \"alt\",\n \"class\",\n \"is\",\n \"title\",\n \"style\",\n \"lang\",\n \"dir\",\n \"width\",\n \"height\",\n]\nALLOWED_ATTRIBUTES[\"a\"] = [\n \"style\",\n \"id\",\n \"class\",\n \"href\",\n \"title\",\n \"lang\",\n \"name\",\n \"dir\",\n \"hreflang\",\n \"rel\",\n]\nALLOWED_ATTRIBUTES[\"i\"] = [\"class\"]\nALLOWED_ATTRIBUTES[\"td\"] = [\"style\", \"id\", \"class\", \"colspan\", \"rowspan\", \"lang\", \"dir\"]\nALLOWED_ATTRIBUTES[\"th\"] = [\n \"style\",\n \"id\",\n \"class\",\n \"colspan\",\n \"rowspan\",\n \"scope\",\n \"lang\",\n \"dir\",\n]\nALLOWED_ATTRIBUTES[\"video\"] = [\"style\", \"id\", \"class\", \"lang\", \"src\", \"controls\", \"dir\"]\nALLOWED_ATTRIBUTES[\"font\"] = [\"color\", \"face\", \"size\", \"dir\"]\nALLOWED_ATTRIBUTES[\"select\"] = [\"name\", \"dir\"]\nALLOWED_ATTRIBUTES[\"option\"] = [\"value\", \"selected\", \"dir\"]\nALLOWED_ATTRIBUTES[\"ol\"] = [\"style\", \"class\", \"id\", \"lang\", \"start\", \"dir\"]\nALLOWED_ATTRIBUTES.update(\n dict(\n (x, [\"style\", \"class\", \"id\", \"name\", \"lang\", \"dir\"])\n for x in (\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\")\n )\n)\nALLOWED_ATTRIBUTES.update(\n dict(\n (x, [\"style\", \"class\", \"id\", \"lang\", \"dir\", \"title\"])\n for x in (\n \"div\",\n \"pre\",\n \"ul\",\n \"li\",\n \"code\",\n \"dl\",\n \"dt\",\n \"dd\",\n \"section\",\n \"header\",\n \"footer\",\n \"nav\",\n \"article\",\n \"aside\",\n \"figure\",\n \"dialog\",\n \"hgroup\",\n \"mark\",\n \"time\",\n \"meter\",\n \"command\",\n \"output\",\n \"progress\",\n \"audio\",\n \"details\",\n \"datagrid\",\n \"datalist\",\n \"table\",\n \"tr\",\n \"address\",\n \"col\",\n \"s\",\n \"strong\",\n )\n )\n)\nALLOWED_ATTRIBUTES.update(\n dict((x, [\"cite\"]) for x in (\"blockquote\", \"del\", \"ins\", \"q\"))\n)\nALLOWED_ATTRIBUTES[\"li\"] += [\"data-default-state\"]\nALLOWED_ATTRIBUTES[\"time\"] += [\"datetime\"]\nALLOWED_ATTRIBUTES[\"ins\"] = [\"datetime\"]\nALLOWED_ATTRIBUTES[\"del\"] = [\"datetime\"]\nALLOWED_ATTRIBUTES[\"meter\"] += [\"max\", \"min\", \"value\", \"low\", \"high\", \"optimum\", \"form\"]\nALLOWED_ATTRIBUTES[\"details\"] += [\"open\"]\n# MathML\nALLOWED_ATTRIBUTES.update(\n dict((x, [\"encoding\", \"src\"]) for x in (\"annotation\", \"annotation-xml\"))\n)\nALLOWED_ATTRIBUTES.update(\n dict(\n (x, [\"href\", \"mathbackground\", \"mathcolor\", \"id\", \"class\", \"style\"])\n for x in (\n \"math\",\n \"maction\",\n \"menclose\",\n \"merror\",\n \"mfenced\",\n \"mfrac\",\n \"mglyph\",\n \"mi\",\n \"mlabeledtr\",\n \"mmultiscripts\",\n \"mn\",\n \"mo\",\n \"mover\",\n \"mpadded\",\n \"mphantom\",\n \"mroot\",\n \"mrow\",\n \"ms\",\n \"mspace\",\n \"msqrt\",\n \"mstyle\",\n \"msub\",\n \"msup\",\n \"msubsup\",\n \"mtable\",\n \"mtd\",\n \"mtext\",\n \"mtr\",\n \"munder\",\n \"munderover\",\n \"none\",\n \"mprescripts\",\n )\n )\n)\nALLOWED_ATTRIBUTES[\"math\"] += [\n \"display\",\n \"dir\",\n \"selection\",\n \"notation\",\n \"close\",\n \"open\",\n \"separators\",\n \"bevelled\",\n \"denomalign\",\n \"linethickness\",\n \"numalign\",\n \"largeop\",\n \"maxsize\",\n \"minsize\",\n \"movablelimits\",\n \"rspace\",\n \"separator\",\n \"stretchy\",\n \"symmetric\",\n \"depth\",\n \"lquote\",\n \"rquote\",\n \"align\",\n \"columnlines\",\n \"frame\",\n \"rowalign\",\n \"rowspacing\",\n \"rowspan\",\n \"columnspan\",\n \"accent\",\n \"accentunder\",\n \"dir\",\n \"mathsize\",\n \"mathvariant\",\n \"subscriptshift\",\n \"supscriptshift\",\n \"scriptlevel\",\n \"displaystyle\",\n \"scriptsizemultiplier\",\n \"scriptminsize\",\n \"altimg\",\n \"altimg-width\",\n \"altimg-height\",\n \"altimg-valign\",\n \"alttext\",\n]\nALLOWED_ATTRIBUTES[\"maction\"] += [\"actiontype\", \"selection\"]\nALLOWED_ATTRIBUTES[\"menclose\"] += [\"notation\"]\nALLOWED_ATTRIBUTES[\"mfenced\"] += [\"close\", \"open\", \"separators\"]\nALLOWED_ATTRIBUTES[\"mfrac\"] += [\"bevelled\", \"denomalign\", \"linethickness\", \"numalign\"]\nALLOWED_ATTRIBUTES[\"mi\"] += [\"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mn\"] += [\"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mmultiscripts\"] += [\"subscriptshift\", \"superscriptshift\"]\nALLOWED_ATTRIBUTES[\"mo\"] += [\n \"largeop\",\n \"lspace\",\n \"maxsize\",\n \"minsize\",\n \"movablelimits\",\n \"rspace\",\n \"separator\",\n \"stretchy\",\n \"symmetric\",\n \"accent\",\n \"dir\",\n \"mathsize\",\n \"mathvariant\",\n]\nALLOWED_ATTRIBUTES[\"mover\"] += [\"accent\"]\nALLOWED_ATTRIBUTES[\"mpadded\"] += [\"lspace\", \"voffset\", \"depth\"]\nALLOWED_ATTRIBUTES[\"mrow\"] += [\"dir\"]\nALLOWED_ATTRIBUTES[\"ms\"] += [\"lquote\", \"rquote\", \"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mspace\"] += [\"depth\", \"height\", \"width\"]\nALLOWED_ATTRIBUTES[\"mstyle\"] += [\n \"display\",\n \"dir\",\n \"selection\",\n \"notation\",\n \"close\",\n \"open\",\n \"separators\",\n \"bevelled\",\n \"denomalign\",\n \"linethickness\",\n \"numalign\",\n \"largeop\",\n \"maxsize\",\n \"minsize\",\n \"movablelimits\",\n \"rspace\",\n \"separator\",\n \"stretchy\",\n \"symmetric\",\n \"depth\",\n \"lquote\",\n \"rquote\",\n \"align\",\n \"columnlines\",\n \"frame\",\n \"rowalign\",\n \"rowspacing\",\n \"rowspan\",\n \"columnspan\",\n \"accent\",\n \"accentunder\",\n \"dir\",\n \"mathsize\",\n \"mathvariant\",\n \"subscriptshift\",\n \"supscriptshift\",\n \"scriptlevel\",\n \"displaystyle\",\n \"scriptsizemultiplier\",\n \"scriptminsize\",\n]\nALLOWED_ATTRIBUTES[\"msub\"] += [\"subscriptshift\"]\nALLOWED_ATTRIBUTES[\"msubsup\"] += [\"subscriptshift\", \"superscriptshift\"]\nALLOWED_ATTRIBUTES[\"msup\"] += [\"superscriptshift\"]\nALLOWED_ATTRIBUTES[\"mtable\"] += [\n \"align\",\n \"columnalign\",\n \"columnlines\",\n \"frame\",\n \"rowalign\",\n \"rowspacing\",\n \"rowlines\",\n]\nALLOWED_ATTRIBUTES[\"mtd\"] += [\"columnalign\", \"columnspan\", \"rowalign\", \"rowspan\"]\nALLOWED_ATTRIBUTES[\"mtext\"] += [\"dir\", \"mathsize\", \"mathvariant\"]\nALLOWED_ATTRIBUTES[\"mtr\"] += [\"columnalign\", \"rowalign\"]\nALLOWED_ATTRIBUTES[\"munder\"] += [\"accentunder\"]\nALLOWED_ATTRIBUTES[\"mundermover\"] = [\"accent\", \"accentunder\"]\n# CSS\nALLOWED_STYLES = [\n \"border\",\n \"border-top\",\n \"border-right\",\n \"border-bottom\",\n \"border-left\",\n \"float\",\n \"overflow\",\n \"min-height\",\n \"vertical-align\",\n \"white-space\",\n \"color\",\n \"border-radius\",\n \"-webkit-border-radius\",\n \"-moz-border-radius, -o-border-radius\",\n \"margin\",\n \"margin-left\",\n \"margin-top\",\n \"margin-bottom\",\n \"margin-right\",\n \"padding\",\n \"padding-left\",\n \"padding-top\",\n \"padding-bottom\",\n \"padding-right\",\n \"position\",\n \"top\",\n \"height\",\n \"left\",\n \"right\",\n \"background\", # TODO: Maybe not this one, it can load URLs\n \"background-color\",\n \"font\",\n \"font-size\",\n \"font-weight\",\n \"font-family\",\n \"font-variant\",\n \"text-align\",\n \"text-transform\",\n \"-moz-column-width\",\n \"-webkit-columns\",\n \"columns\",\n \"width\",\n \"list-style-type\",\n \"line-height\",\n # CSS properties needed for live examples (pending proper solution):\n \"backface-visibility\",\n \"-moz-backface-visibility\",\n \"-webkit-backface-visibility\",\n \"-o-backface-visibility\",\n \"perspective\",\n \"-moz-perspective\",\n \"-webkit-perspective\",\n \"-o-perspective\",\n \"perspective-origin\",\n \"-moz-perspective-origin\",\n \"-webkit-perspective-origin\",\n \"-o-perspective-origin\",\n \"transform\",\n \"-moz-transform\",\n \"-webkit-transform\",\n \"-o-transform\",\n \"transform-style\",\n \"-moz-transform-style\",\n \"-webkit-transform-style\",\n \"-o-transform-style\",\n \"columns\",\n \"-moz-columns\",\n \"-webkit-columns\",\n \"column-rule\",\n \"-moz-column-rule\",\n \"-webkit-column-rule\",\n \"column-width\",\n \"-moz-column-width\",\n \"-webkit-column-width\",\n \"image-rendering\",\n \"-ms-interpolation-mode\",\n \"position\",\n \"border-style\",\n \"background-clip\",\n \"border-bottom-right-radius\",\n \"border-bottom-left-radius\",\n \"border-top-right-radius\",\n \"border-top-left-radius\",\n \"border-bottom-style\",\n \"border-left-style\",\n \"border-right-style\",\n \"border-top-style\",\n \"border-bottom-width\",\n \"border-left-width\",\n \"border-right-width\",\n \"border-top-width\",\n \"vertical-align\",\n \"border-collapse\",\n \"border-width\",\n \"border-color\",\n \"border-left\",\n \"border-right\",\n \"border-bottom\",\n \"border-top\",\n \"clip\",\n \"cursor\",\n \"filter\",\n \"float\",\n \"max-width\",\n \"font-style\",\n \"letter-spacing\",\n \"opacity\",\n \"zoom\",\n \"text-overflow\",\n \"text-indent\",\n \"text-rendering\",\n \"text-shadow\",\n \"transition\",\n \"transition\",\n \"transition\",\n \"transition\",\n \"transition-delay\",\n \"-moz-transition-delay\",\n \"-webkit-transition-delay\",\n \"-o-transition-delay\",\n \"transition-duration\",\n \"-moz-transition-duration\",\n \"-webkit-transition-duration\",\n \"-o-transition-duration\",\n \"transition-property\",\n \"-moz-transition-property\",\n \"-webkit-transition-property\",\n \"-o-transition-property\",\n \"transition-timing-function\",\n \"-moz-transition-timing-function\",\n \"-webkit-transition-timing-function\",\n \"-o-transition-timing-function\",\n \"color\",\n \"display\",\n \"position\",\n \"outline-color\",\n \"outline\",\n \"outline-offset\",\n \"box-shadow\",\n \"-moz-box-shadow\",\n \"-webkit-box-shadow\",\n \"-o-box-shadow\",\n \"linear-gradient\",\n \"-moz-linear-gradient\",\n \"-webkit-linear-gradient\",\n \"radial-gradient\",\n \"-moz-radial-gradient\",\n \"-webkit-radial-gradient\",\n \"text-decoration-style\",\n \"-moz-text-decoration-style\",\n \"text-decoration\",\n \"direction\",\n \"white-space\",\n \"unicode-bidi\",\n \"word-wrap\",\n]\nALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"irc\", \"news\", \"ftp\", \"ssh\", \"nntp\"]\n\nDIFF_WRAP_COLUMN = 65\nEXPERIMENT_TITLE_PREFIX = \"Experiment:\"\nLEGACY_MINDTOUCH_NAMESPACES = (\n \"Help\",\n \"Help_talk\",\n \"Project\",\n \"Project_talk\",\n \"Special\",\n \"Talk\",\n \"Template\",\n \"Template_talk\",\n \"User\",\n \"User_talk\",\n)\n\nDOCUMENTS_PER_PAGE = 100\n_ks_urlbits = urlparse(settings.KUMASCRIPT_URL_TEMPLATE)\nKUMASCRIPT_BASE_URL = urlunparse(\n (_ks_urlbits.scheme, _ks_urlbits.netloc, \"\", \"\", \"\", \"\")\n)\n\n# TODO: Put this under the control of Constance / Waffle?\n# Flags used to signify revisions in need of review\nREVIEW_FLAG_TAGS = (\n (\"technical\", _(\"Technical - code samples, APIs, or technologies\")),\n (\"editorial\", _(\"Editorial - prose, grammar, or content\")),\n)\nREVIEW_FLAG_TAGS_DEFAULT = [\"technical\", \"editorial\"]\n\nLOCALIZATION_FLAG_TAGS = (\n (\"inprogress\", _(\"Localization in progress - not completely translated yet.\")),\n)\n\nSLUG_CLEANSING_RE = re.compile(r\"^\\/?(([A-z-]+)?\\/?docs\\/)?\")\n# ?, whitespace, percentage, quote disallowed in slugs altogether\nINVALID_DOC_SLUG_CHARS_RE = re.compile(r\"\"\"[\\s'\"%%\\?\\$]+\"\"\")\nINVALID_REV_SLUG_CHARS_RE = re.compile(r\"\"\"[\\s\\?\\/%%]+\"\"\")\nDOCUMENT_PATH_RE = re.compile(r\"[^\\$]+\")\n\n# how a redirect looks as rendered HTML\nREDIRECT_HTML = \"REDIRECT <a \"\nREDIRECT_CONTENT = 'REDIRECT <a class=\"redirect\" href=\"%(href)s\">%(title)s</a>'\n\nDEKI_FILE_URL = re.compile(r\"@api/deki/files/(?P<file_id>\\d+)/=\")\nKUMA_FILE_URL = re.compile(\n r\"%s%s/files/(?P<file_id>\\d+)/\"\n % (re.escape(settings.PROTOCOL), re.escape(settings.ATTACHMENT_HOST))\n)\n\nSPAM_TRAINING_SWITCH = \"wiki_spam_training\"\nSPAM_SUBMISSION_REVISION_FIELDS = [\n \"title\",\n \"slug\",\n \"summary\",\n \"content\",\n \"comment\",\n \"tags\",\n \"keywords\",\n]\nSPAM_OTHER_HEADERS = ( # Header to send that don't start with HTTP\n \"REMOTE_ADDR\",\n \"REQUEST_URI\",\n \"DOCUMENT_URI\",\n)\n\nCODE_SAMPLE_MACROS = [\n \"LiveSampleURL\",\n \"EmbedDistLiveSample\",\n \"EmbedLiveSample\",\n \"LiveSampleLink\",\n \"FXOSUXLiveSampleEmbed\",\n]\n\nDEV_DOC_REQUEST_FORM = \"https://github.com/mdn/sprints/issues/new?template=issue-template.md&projects=mdn/sprints/2&labels=user-report\"\n\nWIKI_ONLY_DOCUMENT_QUERY_PARAMS = frozenset(\n (\"edit_links\", \"include\", \"macros\", \"nomacros\", \"raw\", \"section\", \"summary\",)\n)\n\n# Any slug that, case *sensitively*, matches this list get the\n# <meta name=\"robots\" content=\"noindex, nofollow\"> HTML header and they get\n# excluded from the sitemap XML files.\nNOINDEX_SLUG_PREFIXES = (\n \"MDN/Doc_status\",\n \"MDN/Jobs\",\n)\n",
"path": "kuma/wiki/constants.py"
}
] | diff --git a/kuma/wiki/constants.py b/kuma/wiki/constants.py
index d0f383a4ba6..f73dd5c8ea3 100644
--- a/kuma/wiki/constants.py
+++ b/kuma/wiki/constants.py
@@ -665,4 +665,7 @@
# Any slug that, case *sensitively*, matches this list get the
# <meta name="robots" content="noindex, nofollow"> HTML header and they get
# excluded from the sitemap XML files.
-NOINDEX_SLUG_PREFIXES = ("MDN/Doc_status",)
+NOINDEX_SLUG_PREFIXES = (
+ "MDN/Doc_status",
+ "MDN/Jobs",
+)
|
xonsh__xonsh-1514 | vox deactivate no longer resets my path
`which python3` returns the same thing before and after `vox deactivate` now. `vox activate` correctly changes it, but `vox deactivate` no longer puts it back. I am on the newest label of xonsh.
| [
{
"content": "\"\"\"API for Vox, the Python virtual environment manager for xonsh.\"\"\"\nimport os\nimport venv\nimport shutil\nimport builtins\nimport collections.abc\n\nfrom xonsh.platform import ON_POSIX, ON_WINDOWS, scandir\n\nVirtualEnvironment = collections.namedtuple('VirtualEnvironment', ['env', 'bin'])\n\n\nclass EnvironmentInUse(Exception):\n pass\n\n\nclass NoEnvironmentActive(Exception):\n pass\n\n\nclass Vox(collections.abc.Mapping):\n \"\"\"API access to Vox and virtual environments, in a dict-like format.\n\n Makes use of the VirtualEnvironment namedtuple:\n\n 1. ``env``: The full path to the environment\n 2. ``bin``: The full path to the bin/Scripts directory of the environment\n \"\"\"\n\n def __init__(self):\n if not builtins.__xonsh_env__.get('VIRTUALENV_HOME'):\n home_path = os.path.expanduser('~')\n self.venvdir = os.path.join(home_path, '.virtualenvs')\n builtins.__xonsh_env__['VIRTUALENV_HOME'] = self.venvdir\n else:\n self.venvdir = builtins.__xonsh_env__['VIRTUALENV_HOME']\n\n def create(self, name, *, system_site_packages=False, symlinks=False,\n with_pip=True):\n \"\"\"Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``.\n\n Parameters\n ----------\n name : str\n Virtual environment name\n system_site_packages : bool\n If True, the system (global) site-packages dir is available to\n created environments.\n symlinks : bool\n If True, attempt to symlink rather than copy files into virtual\n environment.\n with_pip : bool\n If True, ensure pip is installed in the virtual environment. (Default is True)\n \"\"\"\n # NOTE: clear=True is the same as delete then create.\n # NOTE: upgrade=True is its own method\n env_path = os.path.join(self.venvdir, name)\n venv.create(\n env_path,\n system_site_packages=system_site_packages, symlinks=symlinks,\n with_pip=with_pip)\n\n def upgrade(self, name, *, symlinks=False, with_pip=True):\n \"\"\"Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``.\n\n WARNING: If a virtual environment was created with symlinks or without PIP, you must\n specify these options again on upgrade.\n\n Parameters\n ----------\n name : str\n Virtual environment name\n symlinks : bool\n If True, attempt to symlink rather than copy files into virtual\n environment.\n with_pip : bool\n If True, ensure pip is installed in the virtual environment.\n \"\"\"\n # venv doesn't reload this, so we have to do it ourselves.\n # Is there a bug for this in Python? There should be.\n env_path, bin_path = self[name]\n cfgfile = os.path.join(env_path, 'pyvenv.cfg')\n cfgops = {}\n with open(cfgfile) as cfgfile:\n for l in cfgfile:\n l = l.strip()\n if '=' not in l:\n continue\n k, v = l.split('=', 1)\n cfgops[k.strip()] = v.strip()\n flags = {\n 'system_site_packages': cfgops['include-system-site-packages'] == 'true',\n 'symlinks': symlinks,\n 'with_pip': with_pip,\n }\n # END things we shouldn't be doing.\n\n # Ok, do what we came here to do.\n venv.create(env_path, upgrade=True, **flags)\n\n @staticmethod\n def _binname():\n if ON_WINDOWS:\n return 'Scripts'\n elif ON_POSIX:\n return 'bin'\n else:\n raise OSError('This OS is not supported.')\n\n def __getitem__(self, name):\n \"\"\"Get information about a virtual environment.\n\n Parameters\n ----------\n name : str or Ellipsis\n Virtual environment name or absolute path. If ... is given, return\n the current one (throws a KeyError if there isn't one).\n \"\"\"\n if name is ...:\n env_path = builtins.__xonsh_env__['VIRTUAL_ENV']\n elif os.path.isabs(name):\n env_path = name\n else:\n env_path = os.path.join(self.venvdir, name)\n bin_dir = self._binname()\n bin_path = os.path.join(env_path, bin_dir)\n # Actually check if this is an actual venv or just a organizational directory\n # eg, if 'spam/eggs' is a venv, reject 'spam'\n if not os.path.exists(bin_path):\n raise KeyError()\n return VirtualEnvironment(env_path, bin_path)\n\n def __iter__(self):\n \"\"\"List available virtual environments found in $VIRTUALENV_HOME.\n \"\"\"\n # FIXME: Handle subdirs--this won't discover eg ``spam/eggs``\n for x in scandir(self.venvdir):\n if x.is_dir():\n yield x.name\n\n def __len__(self):\n \"\"\"Counts known virtual environments, using the same rules as iter().\n \"\"\"\n l = 0\n for _ in self:\n l += 1\n return l\n\n def active(self):\n \"\"\"Get the name of the active virtual environment.\n\n You can use this as a key to get further information.\n\n Returns None if no environment is active.\n \"\"\"\n if 'VIRTUAL_ENV' not in builtins.__xonsh_env__:\n return\n env_path = builtins.__xonsh_env__['VIRTUAL_ENV']\n if env_path.startswith(self.venvdir):\n name = env_path[len(self.venvdir):]\n if name[0] == '/':\n name = name[1:]\n return name\n else:\n return env_path\n\n def activate(self, name):\n \"\"\"\n Activate a virtual environment.\n\n Parameters\n ----------\n name : str\n Virtual environment name or absolute path.\n \"\"\"\n env = builtins.__xonsh_env__\n env_path, bin_path = self[name]\n if 'VIRTUAL_ENV' in env:\n self.deactivate()\n\n type(self).oldvars = {'PATH': env['PATH']}\n env['PATH'].insert(0, bin_path)\n env['VIRTUAL_ENV'] = env_path\n if 'PYTHONHOME' in env:\n type(self).oldvars['PYTHONHOME'] = env.pop('PYTHONHOME')\n\n def deactivate(self):\n \"\"\"\n Deactive the active virtual environment. Returns the name of it.\n \"\"\"\n env = builtins.__xonsh_env__\n if 'VIRTUAL_ENV' not in env:\n raise NoEnvironmentActive('No environment currently active.')\n\n env_path, bin_path = self[...]\n env_name = self.active()\n\n if hasattr(type(self), 'oldvars'):\n for k, v in type(self).oldvars.items():\n env[k] = v\n del type(self).oldvars\n\n env.pop('VIRTUAL_ENV')\n\n return env_name\n\n def __delitem__(self, name):\n \"\"\"\n Permanently deletes a virtual environment.\n\n Parameters\n ----------\n name : str\n Virtual environment name or absolute path.\n \"\"\"\n env_path = self[name].env\n try:\n if self[...].env == env_path:\n raise EnvironmentInUse('The \"%s\" environment is currently active.' % name)\n except KeyError:\n # No current venv, ... fails\n pass\n shutil.rmtree(env_path)\n",
"path": "xontrib/voxapi.py"
}
] | [
{
"content": "\"\"\"API for Vox, the Python virtual environment manager for xonsh.\"\"\"\nimport os\nimport venv\nimport shutil\nimport builtins\nimport collections.abc\n\nfrom xonsh.platform import ON_POSIX, ON_WINDOWS, scandir\n\nVirtualEnvironment = collections.namedtuple('VirtualEnvironment', ['env', 'bin'])\n\n\nclass EnvironmentInUse(Exception):\n pass\n\n\nclass NoEnvironmentActive(Exception):\n pass\n\n\nclass Vox(collections.abc.Mapping):\n \"\"\"API access to Vox and virtual environments, in a dict-like format.\n\n Makes use of the VirtualEnvironment namedtuple:\n\n 1. ``env``: The full path to the environment\n 2. ``bin``: The full path to the bin/Scripts directory of the environment\n \"\"\"\n\n def __init__(self):\n if not builtins.__xonsh_env__.get('VIRTUALENV_HOME'):\n home_path = os.path.expanduser('~')\n self.venvdir = os.path.join(home_path, '.virtualenvs')\n builtins.__xonsh_env__['VIRTUALENV_HOME'] = self.venvdir\n else:\n self.venvdir = builtins.__xonsh_env__['VIRTUALENV_HOME']\n\n def create(self, name, *, system_site_packages=False, symlinks=False,\n with_pip=True):\n \"\"\"Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``.\n\n Parameters\n ----------\n name : str\n Virtual environment name\n system_site_packages : bool\n If True, the system (global) site-packages dir is available to\n created environments.\n symlinks : bool\n If True, attempt to symlink rather than copy files into virtual\n environment.\n with_pip : bool\n If True, ensure pip is installed in the virtual environment. (Default is True)\n \"\"\"\n # NOTE: clear=True is the same as delete then create.\n # NOTE: upgrade=True is its own method\n env_path = os.path.join(self.venvdir, name)\n venv.create(\n env_path,\n system_site_packages=system_site_packages, symlinks=symlinks,\n with_pip=with_pip)\n\n def upgrade(self, name, *, symlinks=False, with_pip=True):\n \"\"\"Create a virtual environment in $VIRTUALENV_HOME with python3's ``venv``.\n\n WARNING: If a virtual environment was created with symlinks or without PIP, you must\n specify these options again on upgrade.\n\n Parameters\n ----------\n name : str\n Virtual environment name\n symlinks : bool\n If True, attempt to symlink rather than copy files into virtual\n environment.\n with_pip : bool\n If True, ensure pip is installed in the virtual environment.\n \"\"\"\n # venv doesn't reload this, so we have to do it ourselves.\n # Is there a bug for this in Python? There should be.\n env_path, bin_path = self[name]\n cfgfile = os.path.join(env_path, 'pyvenv.cfg')\n cfgops = {}\n with open(cfgfile) as cfgfile:\n for l in cfgfile:\n l = l.strip()\n if '=' not in l:\n continue\n k, v = l.split('=', 1)\n cfgops[k.strip()] = v.strip()\n flags = {\n 'system_site_packages': cfgops['include-system-site-packages'] == 'true',\n 'symlinks': symlinks,\n 'with_pip': with_pip,\n }\n # END things we shouldn't be doing.\n\n # Ok, do what we came here to do.\n venv.create(env_path, upgrade=True, **flags)\n\n @staticmethod\n def _binname():\n if ON_WINDOWS:\n return 'Scripts'\n elif ON_POSIX:\n return 'bin'\n else:\n raise OSError('This OS is not supported.')\n\n def __getitem__(self, name):\n \"\"\"Get information about a virtual environment.\n\n Parameters\n ----------\n name : str or Ellipsis\n Virtual environment name or absolute path. If ... is given, return\n the current one (throws a KeyError if there isn't one).\n \"\"\"\n if name is ...:\n env_path = builtins.__xonsh_env__['VIRTUAL_ENV']\n elif os.path.isabs(name):\n env_path = name\n else:\n env_path = os.path.join(self.venvdir, name)\n bin_dir = self._binname()\n bin_path = os.path.join(env_path, bin_dir)\n # Actually check if this is an actual venv or just a organizational directory\n # eg, if 'spam/eggs' is a venv, reject 'spam'\n if not os.path.exists(bin_path):\n raise KeyError()\n return VirtualEnvironment(env_path, bin_path)\n\n def __iter__(self):\n \"\"\"List available virtual environments found in $VIRTUALENV_HOME.\n \"\"\"\n # FIXME: Handle subdirs--this won't discover eg ``spam/eggs``\n for x in scandir(self.venvdir):\n if x.is_dir():\n yield x.name\n\n def __len__(self):\n \"\"\"Counts known virtual environments, using the same rules as iter().\n \"\"\"\n l = 0\n for _ in self:\n l += 1\n return l\n\n def active(self):\n \"\"\"Get the name of the active virtual environment.\n\n You can use this as a key to get further information.\n\n Returns None if no environment is active.\n \"\"\"\n if 'VIRTUAL_ENV' not in builtins.__xonsh_env__:\n return\n env_path = builtins.__xonsh_env__['VIRTUAL_ENV']\n if env_path.startswith(self.venvdir):\n name = env_path[len(self.venvdir):]\n if name[0] == '/':\n name = name[1:]\n return name\n else:\n return env_path\n\n def activate(self, name):\n \"\"\"\n Activate a virtual environment.\n\n Parameters\n ----------\n name : str\n Virtual environment name or absolute path.\n \"\"\"\n env = builtins.__xonsh_env__\n env_path, bin_path = self[name]\n if 'VIRTUAL_ENV' in env:\n self.deactivate()\n\n type(self).oldvars = {'PATH': list(env['PATH'])}\n env['PATH'].insert(0, bin_path)\n env['VIRTUAL_ENV'] = env_path\n if 'PYTHONHOME' in env:\n type(self).oldvars['PYTHONHOME'] = env.pop('PYTHONHOME')\n\n def deactivate(self):\n \"\"\"\n Deactive the active virtual environment. Returns the name of it.\n \"\"\"\n env = builtins.__xonsh_env__\n if 'VIRTUAL_ENV' not in env:\n raise NoEnvironmentActive('No environment currently active.')\n\n env_path, bin_path = self[...]\n env_name = self.active()\n\n if hasattr(type(self), 'oldvars'):\n for k, v in type(self).oldvars.items():\n env[k] = v\n del type(self).oldvars\n\n env.pop('VIRTUAL_ENV')\n\n return env_name\n\n def __delitem__(self, name):\n \"\"\"\n Permanently deletes a virtual environment.\n\n Parameters\n ----------\n name : str\n Virtual environment name or absolute path.\n \"\"\"\n env_path = self[name].env\n try:\n if self[...].env == env_path:\n raise EnvironmentInUse('The \"%s\" environment is currently active.' % name)\n except KeyError:\n # No current venv, ... fails\n pass\n shutil.rmtree(env_path)\n",
"path": "xontrib/voxapi.py"
}
] | diff --git a/tests/test_vox.py b/tests/test_vox.py
index e03ad2c31c..18d7dc4f83 100644
--- a/tests/test_vox.py
+++ b/tests/test_vox.py
@@ -43,3 +43,25 @@ def test_activate(xonsh_builtins, tmpdir):
assert xonsh_builtins.__xonsh_env__['VIRTUAL_ENV'] == vox['spam'].env
vox.deactivate()
assert 'VIRTUAL_ENV' not in xonsh_builtins.__xonsh_env__
+
+
+@skip_if_on_conda
+def test_path(xonsh_builtins, tmpdir):
+ """
+ Test to make sure Vox properly activates and deactivates by examining $PATH
+ """
+ xonsh_builtins.__xonsh_env__['VIRTUALENV_HOME'] = str(tmpdir)
+ # I consider the case that the user doesn't have a PATH set to be unreasonable
+ xonsh_builtins.__xonsh_env__.setdefault('PATH', [])
+
+ oldpath = list(xonsh_builtins.__xonsh_env__['PATH'])
+ vox = Vox()
+ vox.create('eggs')
+
+ vox.activate('eggs')
+
+ assert oldpath != xonsh_builtins.__xonsh_env__['PATH']
+
+ vox.deactivate()
+
+ assert oldpath == xonsh_builtins.__xonsh_env__['PATH']
diff --git a/xontrib/voxapi.py b/xontrib/voxapi.py
index 6a3b49bede..301f4b7f3c 100644
--- a/xontrib/voxapi.py
+++ b/xontrib/voxapi.py
@@ -178,7 +178,7 @@ def activate(self, name):
if 'VIRTUAL_ENV' in env:
self.deactivate()
- type(self).oldvars = {'PATH': env['PATH']}
+ type(self).oldvars = {'PATH': list(env['PATH'])}
env['PATH'].insert(0, bin_path)
env['VIRTUAL_ENV'] = env_path
if 'PYTHONHOME' in env:
|
pyinstaller__pyinstaller-3662 | Changing from -F to -D on consecutive runs causes crash
MacOS (have not tried Win, Linux; may well fail there too), PyInstaller 3.3.1. Demonstrate as follows.
~~~~
$ pyinstaller --version
3.3.1
$ cat > hello.py
import sys
print( 'hello',sys.argv[1] ) ^D
$ python hello.py world
hello world
$ pyinstaller --clean -y -F hello.py
...normal INFO messages...
$ dist/hello world
hello world
$ pyinstaller --clean -y -D hello.py
... normal INFO messages down to...
3576 INFO: Building COLLECT because out00-COLLECT.toc is non existent
Traceback (most recent call last):
File "/Developer/VENV-3.6/bin/pyinstaller", line 11, in <module>
load_entry_point('PyInstaller==3.3.1', 'console_scripts', 'pyinstaller')()
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/__main__.py", line 94, in run
run_build(pyi_config, spec_file, **vars(args))
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/__main__.py", line 46, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/building/build_main.py", line 791, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/building/build_main.py", line 737, in build
exec(text, spec_namespace)
File "<string>", line 33, in <module>
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/building/api.py", line 661, in __init__
self.__postinit__()
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/building/datastruct.py", line 161, in __postinit__
self.assemble()
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/building/api.py", line 674, in assemble
_make_clean_directory(self.name)
File "/Developer/VENV-3.6/lib/python3.6/site-packages/PyInstaller/building/utils.py", line 388, in _make_clean_directory
os.makedirs(path)
File "/usr/local/Cellar/python3/3.6.2/bin/../Frameworks/Python.framework/Versions/3.6/lib/python3.6/os.py", line 220, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Developer/VENV-3.6/dist/hello'
~~~~
| [
{
"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\n\n#--- functions for checking guts ---\n# NOTE: By GUTS it is meant intermediate files and data structures that\n# PyInstaller creates for bundling files and creating final executable.\nimport glob\nimport hashlib\nimport os\nimport os.path\nimport pkgutil\nimport platform\nimport shutil\nimport sys\n\nimport struct\n\nfrom PyInstaller.config import CONF\nfrom .. import compat\nfrom ..compat import is_darwin, is_win, EXTENSION_SUFFIXES, \\\n open_file, is_py3, is_py37\nfrom ..depend import dylib\nfrom ..depend.bindepend import match_binding_redirect\nfrom ..utils import misc\nfrom ..utils.misc import load_py_data_struct, save_py_data_struct\nfrom .. import log as logging\n\nif is_win:\n from ..utils.win32 import winmanifest, winresource\n\nlogger = logging.getLogger(__name__)\n\n\n#-- Helpers for checking guts.\n#\n# NOTE: By _GUTS it is meant intermediate files and data structures that\n# PyInstaller creates for bundling files and creating final executable.\n\ndef _check_guts_eq(attr, old, new, last_build):\n \"\"\"\n rebuild is required if values differ\n \"\"\"\n if old != new:\n logger.info(\"Building because %s changed\", attr)\n return True\n return False\n\n\ndef _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):\n \"\"\"\n rebuild is required if mtimes of files listed in old toc are newer\n than last_build\n\n if pyc=1, check for .py files, too\n\n Use this for calculated/analysed values read from cache.\n \"\"\"\n for (nm, fnm, typ) in old:\n if misc.mtime(fnm) > last_build:\n logger.info(\"Building because %s changed\", fnm)\n return True\n elif pyc and misc.mtime(fnm[:-1]) > last_build:\n logger.info(\"Building because %s changed\", fnm[:-1])\n return True\n return False\n\n\ndef _check_guts_toc(attr, old, toc, last_build, pyc=0):\n \"\"\"\n rebuild is required if either toc content changed or mtimes of\n files listed in old toc are newer than last_build\n\n if pyc=1, check for .py files, too\n\n Use this for input parameters.\n \"\"\"\n return (_check_guts_eq(attr, old, toc, last_build)\n or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))\n\n\n#---\n\ndef add_suffix_to_extensions(toc):\n \"\"\"\n Returns a new TOC with proper library suffix for EXTENSION items.\n \"\"\"\n # TODO: Fix this recursive import\n from .datastruct import TOC\n new_toc = TOC()\n for inm, fnm, typ in toc:\n if typ == 'EXTENSION':\n if is_py3:\n # Change the dotted name into a relative path. This places C\n # extensions in the Python-standard location. This only works\n # in Python 3; see comments above\n # ``sys.meta_path.append(CExtensionImporter())`` in\n # ``pyimod03_importers``.\n inm = inm.replace('.', os.sep)\n # In some rare cases extension might already contain a suffix.\n # Skip it in this case.\n if os.path.splitext(inm)[1] not in EXTENSION_SUFFIXES:\n # Determine the base name of the file.\n if is_py3:\n base_name = os.path.basename(inm)\n else:\n base_name = inm.rsplit('.')[-1]\n assert '.' not in base_name\n # Use this file's existing extension. For extensions such as\n # ``libzmq.cp36-win_amd64.pyd``, we can't use\n # ``os.path.splitext``, which would give only the ```.pyd`` part\n # of the extension.\n inm = inm + os.path.basename(fnm)[len(base_name):]\n\n elif typ == 'DEPENDENCY':\n # Use the suffix from the filename.\n # TODO Verify what extensions are by DEPENDENCIES.\n binext = os.path.splitext(fnm)[1]\n if not os.path.splitext(inm)[1] == binext:\n inm = inm + binext\n new_toc.append((inm, fnm, typ))\n return new_toc\n\ndef applyRedirects(manifest, redirects):\n \"\"\"\n Apply the binding redirects specified by 'redirects' to the dependent assemblies\n of 'manifest'.\n\n :param manifest:\n :type manifest:\n :param redirects:\n :type redirects:\n :return:\n :rtype:\n \"\"\"\n redirecting = False\n for binding in redirects:\n for dep in manifest.dependentAssemblies:\n if match_binding_redirect(dep, binding):\n logger.info(\"Redirecting %s version %s -> %s\",\n binding.name, dep.version, binding.newVersion)\n dep.version = binding.newVersion\n redirecting = True\n return redirecting\n\ndef checkCache(fnm, strip=False, upx=False, dist_nm=None):\n \"\"\"\n Cache prevents preprocessing binary files again and again.\n\n 'dist_nm' Filename relative to dist directory. We need it on Mac\n to determine level of paths for @loader_path like\n '@loader_path/../../' for qt4 plugins.\n \"\"\"\n from ..config import CONF\n # On darwin a cache is required anyway to keep the libaries\n # with relative install names. Caching on darwin does not work\n # since we need to modify binary headers to use relative paths\n # to dll depencies and starting with '@loader_path'.\n if not strip and not upx and not is_darwin and not is_win:\n return fnm\n\n if dist_nm is not None and \":\" in dist_nm:\n # A file embedded in another pyinstaller build via multipackage\n # No actual file exists to process\n return fnm\n\n if strip:\n strip = True\n else:\n strip = False\n if upx:\n upx = True\n else:\n upx = False\n\n # Load cache index\n # Make cachedir per Python major/minor version.\n # This allows parallel building of executables with different\n # Python versions as one user.\n pyver = ('py%d%s') % (sys.version_info[0], sys.version_info[1])\n arch = platform.architecture()[0]\n cachedir = os.path.join(CONF['cachedir'], 'bincache%d%d_%s_%s' % (strip, upx, pyver, arch))\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n cacheindexfn = os.path.join(cachedir, \"index.dat\")\n if os.path.exists(cacheindexfn):\n try:\n cache_index = load_py_data_struct(cacheindexfn)\n except Exception as e:\n # tell the user they may want to fix their cache\n # .. however, don't delete it for them; if it keeps getting\n # corrupted, we'll never find out\n logger.warn(\"pyinstaller bincache may be corrupted; \"\n \"use pyinstaller --clean to fix\")\n raise\n else:\n cache_index = {}\n\n # Verify if the file we're looking for is present in the cache.\n # Use the dist_mn if given to avoid different extension modules\n # sharing the same basename get corrupted.\n if dist_nm:\n basenm = os.path.normcase(dist_nm)\n else:\n basenm = os.path.normcase(os.path.basename(fnm))\n\n # Binding redirects should be taken into account to see if the file\n # needs to be reprocessed. The redirects may change if the versions of dependent\n # manifests change due to system updates.\n redirects = CONF.get('binding_redirects', [])\n digest = cacheDigest(fnm, redirects)\n cachedfile = os.path.join(cachedir, basenm)\n cmd = None\n if basenm in cache_index:\n if digest != cache_index[basenm]:\n os.remove(cachedfile)\n else:\n # On Mac OS X we need relative paths to dll dependencies\n # starting with @executable_path\n if is_darwin:\n dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)\n return cachedfile\n\n\n # Optionally change manifest and its deps to private assemblies\n if fnm.lower().endswith(\".manifest\"):\n manifest = winmanifest.Manifest()\n manifest.filename = fnm\n with open(fnm, \"rb\") as f:\n manifest.parse_string(f.read())\n if CONF.get('win_private_assemblies', False):\n if manifest.publicKeyToken:\n logger.info(\"Changing %s into private assembly\", os.path.basename(fnm))\n manifest.publicKeyToken = None\n for dep in manifest.dependentAssemblies:\n # Exclude common-controls which is not bundled\n if dep.name != \"Microsoft.Windows.Common-Controls\":\n dep.publicKeyToken = None\n\n applyRedirects(manifest, redirects)\n\n manifest.writeprettyxml(cachedfile)\n return cachedfile\n\n if upx:\n if strip:\n fnm = checkCache(fnm, strip=True, upx=False)\n bestopt = \"--best\"\n # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out)\n # A better configure-time check is due.\n if CONF[\"hasUPX\"] >= (3,) and os.name == \"nt\":\n bestopt = \"--lzma\"\n\n upx_executable = \"upx\"\n if CONF.get('upx_dir'):\n upx_executable = os.path.join(CONF['upx_dir'], upx_executable)\n cmd = [upx_executable, bestopt, \"-q\", cachedfile]\n else:\n if strip:\n strip_options = []\n if is_darwin:\n # The default strip behaviour breaks some shared libraries\n # under Mac OSX.\n # -S = strip only debug symbols.\n strip_options = [\"-S\"]\n cmd = [\"strip\"] + strip_options + [cachedfile]\n\n if not os.path.exists(os.path.dirname(cachedfile)):\n os.makedirs(os.path.dirname(cachedfile))\n # There are known some issues with 'shutil.copy2' on Mac OS X 10.11\n # with copying st_flags. Issue #1650.\n # 'shutil.copy' copies also permission bits and it should be sufficient for\n # PyInstalle purposes.\n shutil.copy(fnm, cachedfile)\n # TODO find out if this is still necessary when no longer using shutil.copy2()\n if hasattr(os, 'chflags'):\n # Some libraries on FreeBSD have immunable flag (libthr.so.3, for example)\n # If flags still remains, os.chmod will failed with:\n # OSError: [Errno 1] Operation not permitted.\n try:\n os.chflags(cachedfile, 0)\n except OSError:\n pass\n os.chmod(cachedfile, 0o755)\n\n if os.path.splitext(fnm.lower())[1] in (\".pyd\", \".dll\"):\n # When shared assemblies are bundled into the app, they may optionally be\n # changed into private assemblies.\n try:\n res = winmanifest.GetManifestResources(os.path.abspath(cachedfile))\n except winresource.pywintypes.error as e:\n if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT:\n # Not a win32 PE file\n pass\n else:\n logger.error(os.path.abspath(cachedfile))\n raise\n else:\n if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]):\n for name in res[winmanifest.RT_MANIFEST]:\n for language in res[winmanifest.RT_MANIFEST][name]:\n try:\n manifest = winmanifest.Manifest()\n manifest.filename = \":\".join([cachedfile,\n str(winmanifest.RT_MANIFEST),\n str(name),\n str(language)])\n manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language],\n False)\n except Exception as exc:\n logger.error(\"Cannot parse manifest resource %s, \"\n \"%s\", name, language)\n logger.error(\"From file %s\", cachedfile, exc_info=1)\n else:\n # optionally change manifest to private assembly\n private = CONF.get('win_private_assemblies', False)\n if private:\n if manifest.publicKeyToken:\n logger.info(\"Changing %s into a private assembly\",\n os.path.basename(fnm))\n manifest.publicKeyToken = None\n\n # Change dep to private assembly\n for dep in manifest.dependentAssemblies:\n # Exclude common-controls which is not bundled\n if dep.name != \"Microsoft.Windows.Common-Controls\":\n dep.publicKeyToken = None\n redirecting = applyRedirects(manifest, redirects)\n if redirecting or private:\n try:\n manifest.update_resources(os.path.abspath(cachedfile),\n [name],\n [language])\n except Exception as e:\n logger.error(os.path.abspath(cachedfile))\n raise\n\n if cmd:\n try:\n logger.info(\"Executing - \" + ' '.join(cmd))\n compat.exec_command(*cmd)\n except OSError as e:\n raise SystemExit(\"Execution failed: %s\" % e)\n\n # update cache index\n cache_index[basenm] = digest\n save_py_data_struct(cacheindexfn, cache_index)\n\n # On Mac OS X we need relative paths to dll dependencies\n # starting with @executable_path\n if is_darwin:\n dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)\n return cachedfile\n\n\ndef cacheDigest(fnm, redirects):\n hasher = hashlib.md5()\n with open(fnm, \"rb\") as f:\n for chunk in iter(lambda: f.read(16 * 1024), b\"\"):\n hasher.update(chunk)\n if redirects:\n redirects = str(redirects)\n if is_py3:\n redirects = redirects.encode('utf-8')\n hasher.update(redirects)\n digest = bytearray(hasher.digest())\n return digest\n\n\ndef _check_path_overlap(path):\n \"\"\"\n Check that path does not overlap with WORKPATH or SPECPATH (i.e.\n WORKPATH and SPECPATH may not start with path, which could be\n caused by a faulty hand-edited specfile)\n\n Raise SystemExit if there is overlap, return True otherwise\n \"\"\"\n from ..config import CONF\n specerr = 0\n if CONF['workpath'].startswith(path):\n logger.error('Specfile error: The output path \"%s\" contains '\n 'WORKPATH (%s)', path, CONF['workpath'])\n specerr += 1\n if CONF['specpath'].startswith(path):\n logger.error('Specfile error: The output path \"%s\" contains '\n 'SPECPATH (%s)', path, CONF['specpath'])\n specerr += 1\n if specerr:\n raise SystemExit('Error: Please edit/recreate the specfile (%s) '\n 'and set a different output name (e.g. \"dist\").'\n % CONF['spec'])\n return True\n\n\ndef _make_clean_directory(path):\n \"\"\"\n Create a clean directory from the given directory name\n \"\"\"\n if _check_path_overlap(path):\n if os.path.isdir(path):\n try:\n os.remove(path)\n except OSError:\n _rmtree(path)\n\n os.makedirs(path)\n\n\ndef _rmtree(path):\n \"\"\"\n Remove directory and all its contents, but only after user confirmation,\n or if the -y option is set\n \"\"\"\n from ..config import CONF\n if CONF['noconfirm']:\n choice = 'y'\n elif sys.stdout.isatty():\n choice = compat.stdin_input('WARNING: The output directory \"%s\" and ALL ITS '\n 'CONTENTS will be REMOVED! Continue? (y/n)' % path)\n else:\n raise SystemExit('Error: The output directory \"%s\" is not empty. '\n 'Please remove all its contents or use the '\n '-y option (remove output directory without '\n 'confirmation).' % path)\n if choice.strip().lower() == 'y':\n logger.info('Removing dir %s', path)\n shutil.rmtree(path)\n else:\n raise SystemExit('User aborted')\n\n\n# TODO Refactor to prohibit empty target directories. As the docstring\n#below documents, this function currently permits the second item of each\n#2-tuple in \"hook.datas\" to be the empty string, in which case the target\n#directory defaults to the source directory's basename. However, this\n#functionality is very fragile and hence bad. Instead:\n#\n#* An exception should be raised if such item is empty.\n#* All hooks currently passing the empty string for such item (e.g.,\n# \"hooks/hook-babel.py\", \"hooks/hook-matplotlib.py\") should be refactored\n# to instead pass such basename.\ndef format_binaries_and_datas(binaries_or_datas, workingdir=None):\n \"\"\"\n Convert the passed list of hook-style 2-tuples into a returned set of\n `TOC`-style 2-tuples.\n\n Elements of the passed list are 2-tuples `(source_dir_or_glob, target_dir)`.\n Elements of the returned set are 2-tuples `(target_file, source_file)`.\n For backwards compatibility, the order of elements in the former tuples are\n the reverse of the order of elements in the latter tuples!\n\n Parameters\n ----------\n binaries_or_datas : list\n List of hook-style 2-tuples (e.g., the top-level `binaries` and `datas`\n attributes defined by hooks) whose:\n * The first element is either:\n * A glob matching only the absolute or relative paths of source\n non-Python data files.\n * The absolute or relative path of a source directory containing only\n source non-Python data files.\n * The second element ist he relative path of the target directory\n into which these source files will be recursively copied.\n\n If the optional `workingdir` parameter is passed, source paths may be\n either absolute or relative; else, source paths _must_ be absolute.\n workingdir : str\n Optional absolute path of the directory to which all relative source\n paths in the `binaries_or_datas` parameter will be prepended by (and\n hence converted into absolute paths) _or_ `None` if these paths are to\n be preserved as relative. Defaults to `None`.\n\n Returns\n ----------\n set\n Set of `TOC`-style 2-tuples whose:\n * First element is the absolute or relative path of a target file.\n * Second element is the absolute or relative path of the corresponding\n source file to be copied to this target file.\n \"\"\"\n toc_datas = set()\n\n for src_root_path_or_glob, trg_root_dir in binaries_or_datas:\n if not trg_root_dir:\n raise SystemExit(\"Empty DEST not allowed when adding binary \"\n \"and data files. \"\n \"Maybe you want to used %r.\\nCaused by %r.\" %\n (os.curdir, src_root_path_or_glob))\n # Convert relative to absolute paths if required.\n if workingdir and not os.path.isabs(src_root_path_or_glob):\n src_root_path_or_glob = os.path.join(\n workingdir, src_root_path_or_glob)\n\n # Normalize paths.\n src_root_path_or_glob = os.path.normpath(src_root_path_or_glob)\n if os.path.isfile(src_root_path_or_glob):\n src_root_paths = [src_root_path_or_glob]\n else:\n # List of the absolute paths of all source paths matching the\n # current glob.\n src_root_paths = glob.glob(src_root_path_or_glob)\n\n if not src_root_paths:\n msg = 'Unable to find \"%s\" when adding binary and data files.' % (\n src_root_path_or_glob)\n # on Debian/Ubuntu, missing pyconfig.h files can be fixed with\n # installing python-dev\n if src_root_path_or_glob.endswith(\"pyconfig.h\"):\n msg += \"\"\"This would mean your Python installation doesn't\ncome with proper library files. This usually happens by missing development\npackage, or unsuitable build parameters of Python installation.\n* On Debian/Ubuntu, you would need to install Python development packages\n * apt-get install python3-dev\n * apt-get install python-dev\n* If you're building Python by yourself, please rebuild your Python with\n`--enable-shared` (or, `--enable-framework` on Darwin)\n\"\"\"\n raise SystemExit(msg)\n\n for src_root_path in src_root_paths:\n if os.path.isfile(src_root_path):\n # Normalizing the result to remove redundant relative\n # paths (e.g., removing \"./\" from \"trg/./file\").\n toc_datas.add((\n os.path.normpath(os.path.join(\n trg_root_dir, os.path.basename(src_root_path))),\n os.path.normpath(src_root_path)))\n elif os.path.isdir(src_root_path):\n for src_dir, src_subdir_basenames, src_file_basenames in \\\n os.walk(src_root_path):\n # Ensure the current source directory is a subdirectory\n # of the passed top-level source directory. Since\n # os.walk() does *NOT* follow symlinks by default, this\n # should be the case. (But let's make sure.)\n assert src_dir.startswith(src_root_path)\n\n # Relative path of the current target directory,\n # obtained by:\n #\n # * Stripping the top-level source directory from the\n # current source directory (e.g., removing \"/top\" from\n # \"/top/dir\").\n # * Normalizing the result to remove redundant relative\n # paths (e.g., removing \"./\" from \"trg/./file\").\n trg_dir = os.path.normpath(os.path.join(\n trg_root_dir,\n os.path.relpath(src_dir, src_root_path)))\n\n for src_file_basename in src_file_basenames:\n src_file = os.path.join(src_dir, src_file_basename)\n if os.path.isfile(src_file):\n # Normalize the result to remove redundant relative\n # paths (e.g., removing \"./\" from \"trg/./file\").\n toc_datas.add((\n os.path.normpath(\n os.path.join(trg_dir, src_file_basename)),\n os.path.normpath(src_file)))\n\n return toc_datas\n\n\ndef _load_code(modname, filename):\n path_item = os.path.dirname(filename)\n if os.path.basename(filename).startswith('__init__.py'):\n # this is a package\n path_item = os.path.dirname(path_item)\n if os.path.basename(path_item) == '__pycache__':\n path_item = os.path.dirname(path_item)\n importer = pkgutil.get_importer(path_item)\n package, _, modname = modname.rpartition('.')\n\n if sys.version_info >= (3, 3) and hasattr(importer, 'find_loader'):\n loader, portions = importer.find_loader(modname)\n else:\n loader = importer.find_module(modname)\n\n logger.debug('Compiling %s', filename)\n if loader and hasattr(loader, 'get_code'):\n return loader.get_code(modname)\n else:\n # Just as ``python foo.bar`` will read and execute statements in\n # ``foo.bar``, even though it lacks the ``.py`` extension, so\n # ``pyinstaller foo.bar`` should also work. However, Python's import\n # machinery doesn't load files without a ``.py`` extension. So, use\n # ``compile`` instead.\n #\n # On a side note, neither the Python 2 nor Python 3 calls to\n # ``pkgutil`` and ``find_module`` above handle modules ending in\n # ``.pyw``, even though ``imp.find_module`` and ``import <name>`` both\n # work. This code supports ``.pyw`` files.\n\n # Open the source file in binary mode and allow the `compile()` call to\n # detect the source encoding.\n with open_file(filename, 'rb') as f:\n source = f.read()\n return compile(source, filename, 'exec')\n\ndef get_code_object(modname, filename):\n \"\"\"\n Get the code-object for a module.\n\n This is a extra-simple version for compiling a module. It's\n not worth spending more effort here, as it is only used in the\n rare case if outXX-Analysis.toc exists, but outXX-PYZ.toc does\n not.\n \"\"\"\n\n try:\n if filename in ('-', None):\n # This is a NamespacePackage, modulegraph marks them\n # by using the filename '-'. (But wants to use None,\n # so check for None, too, to be forward-compatible.)\n logger.debug('Compiling namespace package %s', modname)\n txt = '#\\n'\n return compile(txt, filename, 'exec')\n else:\n logger.debug('Compiling %s', filename)\n co = _load_code(modname, filename)\n if not co:\n raise ValueError(\"Module file %s is missing\" % filename)\n return co\n except SyntaxError as e:\n print(\"Syntax error in \", filename)\n print(e.args)\n raise\n\n\ndef strip_paths_in_code(co, new_filename=None):\n\n # Paths to remove from filenames embedded in code objects\n replace_paths = sys.path + CONF['pathex']\n # Make sure paths end with os.sep\n replace_paths = [os.path.join(f, '') for f in replace_paths]\n\n if new_filename is None:\n original_filename = os.path.normpath(co.co_filename)\n for f in replace_paths:\n if original_filename.startswith(f):\n new_filename = original_filename[len(f):]\n break\n\n else:\n return co\n\n code_func = type(co)\n\n consts = tuple(\n strip_paths_in_code(const_co, new_filename)\n if isinstance(const_co, code_func) else const_co\n for const_co in co.co_consts\n )\n\n # co_kwonlyargcount added in some version of Python 3\n if hasattr(co, 'co_kwonlyargcount'):\n return code_func(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize,\n co.co_flags, co.co_code, consts, co.co_names,\n co.co_varnames, new_filename, co.co_name,\n co.co_firstlineno, co.co_lnotab,\n co.co_freevars, co.co_cellvars)\n else:\n return code_func(co.co_argcount, co.co_nlocals, co.co_stacksize,\n co.co_flags, co.co_code, consts, co.co_names,\n co.co_varnames, new_filename, co.co_name,\n co.co_firstlineno, co.co_lnotab,\n co.co_freevars, co.co_cellvars)\n\n\ndef fake_pyc_timestamp(buf):\n \"\"\"\n Reset the timestamp from a .pyc-file header to a fixed value.\n\n This enables deterministic builds without having to set pyinstaller\n source metadata (mtime) since that changes the pyc-file contents.\n\n _buf_ must at least contain the full pyc-file header.\n \"\"\"\n assert buf[:4] == compat.BYTECODE_MAGIC, \\\n \"Expected pyc magic {}, got {}\".format(compat.BYTECODE_MAGIC, buf[:4])\n start, end = 4, 8\n if is_py37:\n # see https://www.python.org/dev/peps/pep-0552/\n (flags,) = struct.unpack_from(\">I\", buf, 4)\n if flags & 1:\n # We are in the future and hash-based pyc-files are used, so\n # clear \"check_source\" flag, since there is no source\n buf[4:8] = struct.pack(\">I\", flags ^ 2)\n return buf\n else:\n # no hash-based pyc-file, timestamp is the next field\n start, end = 8, 12\n\n ts = b'pyi0' # So people know where this comes from\n return buf[:start] + ts + buf[end:]\n",
"path": "PyInstaller/building/utils.py"
}
] | [
{
"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\n\n#--- functions for checking guts ---\n# NOTE: By GUTS it is meant intermediate files and data structures that\n# PyInstaller creates for bundling files and creating final executable.\nimport glob\nimport hashlib\nimport os\nimport os.path\nimport pkgutil\nimport platform\nimport shutil\nimport sys\n\nimport struct\n\nfrom PyInstaller.config import CONF\nfrom .. import compat\nfrom ..compat import is_darwin, is_win, EXTENSION_SUFFIXES, \\\n open_file, is_py3, is_py37\nfrom ..depend import dylib\nfrom ..depend.bindepend import match_binding_redirect\nfrom ..utils import misc\nfrom ..utils.misc import load_py_data_struct, save_py_data_struct\nfrom .. import log as logging\n\nif is_win:\n from ..utils.win32 import winmanifest, winresource\n\nlogger = logging.getLogger(__name__)\n\n\n#-- Helpers for checking guts.\n#\n# NOTE: By _GUTS it is meant intermediate files and data structures that\n# PyInstaller creates for bundling files and creating final executable.\n\ndef _check_guts_eq(attr, old, new, last_build):\n \"\"\"\n rebuild is required if values differ\n \"\"\"\n if old != new:\n logger.info(\"Building because %s changed\", attr)\n return True\n return False\n\n\ndef _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):\n \"\"\"\n rebuild is required if mtimes of files listed in old toc are newer\n than last_build\n\n if pyc=1, check for .py files, too\n\n Use this for calculated/analysed values read from cache.\n \"\"\"\n for (nm, fnm, typ) in old:\n if misc.mtime(fnm) > last_build:\n logger.info(\"Building because %s changed\", fnm)\n return True\n elif pyc and misc.mtime(fnm[:-1]) > last_build:\n logger.info(\"Building because %s changed\", fnm[:-1])\n return True\n return False\n\n\ndef _check_guts_toc(attr, old, toc, last_build, pyc=0):\n \"\"\"\n rebuild is required if either toc content changed or mtimes of\n files listed in old toc are newer than last_build\n\n if pyc=1, check for .py files, too\n\n Use this for input parameters.\n \"\"\"\n return (_check_guts_eq(attr, old, toc, last_build)\n or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))\n\n\n#---\n\ndef add_suffix_to_extensions(toc):\n \"\"\"\n Returns a new TOC with proper library suffix for EXTENSION items.\n \"\"\"\n # TODO: Fix this recursive import\n from .datastruct import TOC\n new_toc = TOC()\n for inm, fnm, typ in toc:\n if typ == 'EXTENSION':\n if is_py3:\n # Change the dotted name into a relative path. This places C\n # extensions in the Python-standard location. This only works\n # in Python 3; see comments above\n # ``sys.meta_path.append(CExtensionImporter())`` in\n # ``pyimod03_importers``.\n inm = inm.replace('.', os.sep)\n # In some rare cases extension might already contain a suffix.\n # Skip it in this case.\n if os.path.splitext(inm)[1] not in EXTENSION_SUFFIXES:\n # Determine the base name of the file.\n if is_py3:\n base_name = os.path.basename(inm)\n else:\n base_name = inm.rsplit('.')[-1]\n assert '.' not in base_name\n # Use this file's existing extension. For extensions such as\n # ``libzmq.cp36-win_amd64.pyd``, we can't use\n # ``os.path.splitext``, which would give only the ```.pyd`` part\n # of the extension.\n inm = inm + os.path.basename(fnm)[len(base_name):]\n\n elif typ == 'DEPENDENCY':\n # Use the suffix from the filename.\n # TODO Verify what extensions are by DEPENDENCIES.\n binext = os.path.splitext(fnm)[1]\n if not os.path.splitext(inm)[1] == binext:\n inm = inm + binext\n new_toc.append((inm, fnm, typ))\n return new_toc\n\ndef applyRedirects(manifest, redirects):\n \"\"\"\n Apply the binding redirects specified by 'redirects' to the dependent assemblies\n of 'manifest'.\n\n :param manifest:\n :type manifest:\n :param redirects:\n :type redirects:\n :return:\n :rtype:\n \"\"\"\n redirecting = False\n for binding in redirects:\n for dep in manifest.dependentAssemblies:\n if match_binding_redirect(dep, binding):\n logger.info(\"Redirecting %s version %s -> %s\",\n binding.name, dep.version, binding.newVersion)\n dep.version = binding.newVersion\n redirecting = True\n return redirecting\n\ndef checkCache(fnm, strip=False, upx=False, dist_nm=None):\n \"\"\"\n Cache prevents preprocessing binary files again and again.\n\n 'dist_nm' Filename relative to dist directory. We need it on Mac\n to determine level of paths for @loader_path like\n '@loader_path/../../' for qt4 plugins.\n \"\"\"\n from ..config import CONF\n # On darwin a cache is required anyway to keep the libaries\n # with relative install names. Caching on darwin does not work\n # since we need to modify binary headers to use relative paths\n # to dll depencies and starting with '@loader_path'.\n if not strip and not upx and not is_darwin and not is_win:\n return fnm\n\n if dist_nm is not None and \":\" in dist_nm:\n # A file embedded in another pyinstaller build via multipackage\n # No actual file exists to process\n return fnm\n\n if strip:\n strip = True\n else:\n strip = False\n if upx:\n upx = True\n else:\n upx = False\n\n # Load cache index\n # Make cachedir per Python major/minor version.\n # This allows parallel building of executables with different\n # Python versions as one user.\n pyver = ('py%d%s') % (sys.version_info[0], sys.version_info[1])\n arch = platform.architecture()[0]\n cachedir = os.path.join(CONF['cachedir'], 'bincache%d%d_%s_%s' % (strip, upx, pyver, arch))\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n cacheindexfn = os.path.join(cachedir, \"index.dat\")\n if os.path.exists(cacheindexfn):\n try:\n cache_index = load_py_data_struct(cacheindexfn)\n except Exception as e:\n # tell the user they may want to fix their cache\n # .. however, don't delete it for them; if it keeps getting\n # corrupted, we'll never find out\n logger.warn(\"pyinstaller bincache may be corrupted; \"\n \"use pyinstaller --clean to fix\")\n raise\n else:\n cache_index = {}\n\n # Verify if the file we're looking for is present in the cache.\n # Use the dist_mn if given to avoid different extension modules\n # sharing the same basename get corrupted.\n if dist_nm:\n basenm = os.path.normcase(dist_nm)\n else:\n basenm = os.path.normcase(os.path.basename(fnm))\n\n # Binding redirects should be taken into account to see if the file\n # needs to be reprocessed. The redirects may change if the versions of dependent\n # manifests change due to system updates.\n redirects = CONF.get('binding_redirects', [])\n digest = cacheDigest(fnm, redirects)\n cachedfile = os.path.join(cachedir, basenm)\n cmd = None\n if basenm in cache_index:\n if digest != cache_index[basenm]:\n os.remove(cachedfile)\n else:\n # On Mac OS X we need relative paths to dll dependencies\n # starting with @executable_path\n if is_darwin:\n dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)\n return cachedfile\n\n\n # Optionally change manifest and its deps to private assemblies\n if fnm.lower().endswith(\".manifest\"):\n manifest = winmanifest.Manifest()\n manifest.filename = fnm\n with open(fnm, \"rb\") as f:\n manifest.parse_string(f.read())\n if CONF.get('win_private_assemblies', False):\n if manifest.publicKeyToken:\n logger.info(\"Changing %s into private assembly\", os.path.basename(fnm))\n manifest.publicKeyToken = None\n for dep in manifest.dependentAssemblies:\n # Exclude common-controls which is not bundled\n if dep.name != \"Microsoft.Windows.Common-Controls\":\n dep.publicKeyToken = None\n\n applyRedirects(manifest, redirects)\n\n manifest.writeprettyxml(cachedfile)\n return cachedfile\n\n if upx:\n if strip:\n fnm = checkCache(fnm, strip=True, upx=False)\n bestopt = \"--best\"\n # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out)\n # A better configure-time check is due.\n if CONF[\"hasUPX\"] >= (3,) and os.name == \"nt\":\n bestopt = \"--lzma\"\n\n upx_executable = \"upx\"\n if CONF.get('upx_dir'):\n upx_executable = os.path.join(CONF['upx_dir'], upx_executable)\n cmd = [upx_executable, bestopt, \"-q\", cachedfile]\n else:\n if strip:\n strip_options = []\n if is_darwin:\n # The default strip behaviour breaks some shared libraries\n # under Mac OSX.\n # -S = strip only debug symbols.\n strip_options = [\"-S\"]\n cmd = [\"strip\"] + strip_options + [cachedfile]\n\n if not os.path.exists(os.path.dirname(cachedfile)):\n os.makedirs(os.path.dirname(cachedfile))\n # There are known some issues with 'shutil.copy2' on Mac OS X 10.11\n # with copying st_flags. Issue #1650.\n # 'shutil.copy' copies also permission bits and it should be sufficient for\n # PyInstalle purposes.\n shutil.copy(fnm, cachedfile)\n # TODO find out if this is still necessary when no longer using shutil.copy2()\n if hasattr(os, 'chflags'):\n # Some libraries on FreeBSD have immunable flag (libthr.so.3, for example)\n # If flags still remains, os.chmod will failed with:\n # OSError: [Errno 1] Operation not permitted.\n try:\n os.chflags(cachedfile, 0)\n except OSError:\n pass\n os.chmod(cachedfile, 0o755)\n\n if os.path.splitext(fnm.lower())[1] in (\".pyd\", \".dll\"):\n # When shared assemblies are bundled into the app, they may optionally be\n # changed into private assemblies.\n try:\n res = winmanifest.GetManifestResources(os.path.abspath(cachedfile))\n except winresource.pywintypes.error as e:\n if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT:\n # Not a win32 PE file\n pass\n else:\n logger.error(os.path.abspath(cachedfile))\n raise\n else:\n if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]):\n for name in res[winmanifest.RT_MANIFEST]:\n for language in res[winmanifest.RT_MANIFEST][name]:\n try:\n manifest = winmanifest.Manifest()\n manifest.filename = \":\".join([cachedfile,\n str(winmanifest.RT_MANIFEST),\n str(name),\n str(language)])\n manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language],\n False)\n except Exception as exc:\n logger.error(\"Cannot parse manifest resource %s, \"\n \"%s\", name, language)\n logger.error(\"From file %s\", cachedfile, exc_info=1)\n else:\n # optionally change manifest to private assembly\n private = CONF.get('win_private_assemblies', False)\n if private:\n if manifest.publicKeyToken:\n logger.info(\"Changing %s into a private assembly\",\n os.path.basename(fnm))\n manifest.publicKeyToken = None\n\n # Change dep to private assembly\n for dep in manifest.dependentAssemblies:\n # Exclude common-controls which is not bundled\n if dep.name != \"Microsoft.Windows.Common-Controls\":\n dep.publicKeyToken = None\n redirecting = applyRedirects(manifest, redirects)\n if redirecting or private:\n try:\n manifest.update_resources(os.path.abspath(cachedfile),\n [name],\n [language])\n except Exception as e:\n logger.error(os.path.abspath(cachedfile))\n raise\n\n if cmd:\n try:\n logger.info(\"Executing - \" + ' '.join(cmd))\n compat.exec_command(*cmd)\n except OSError as e:\n raise SystemExit(\"Execution failed: %s\" % e)\n\n # update cache index\n cache_index[basenm] = digest\n save_py_data_struct(cacheindexfn, cache_index)\n\n # On Mac OS X we need relative paths to dll dependencies\n # starting with @executable_path\n if is_darwin:\n dylib.mac_set_relative_dylib_deps(cachedfile, dist_nm)\n return cachedfile\n\n\ndef cacheDigest(fnm, redirects):\n hasher = hashlib.md5()\n with open(fnm, \"rb\") as f:\n for chunk in iter(lambda: f.read(16 * 1024), b\"\"):\n hasher.update(chunk)\n if redirects:\n redirects = str(redirects)\n if is_py3:\n redirects = redirects.encode('utf-8')\n hasher.update(redirects)\n digest = bytearray(hasher.digest())\n return digest\n\n\ndef _check_path_overlap(path):\n \"\"\"\n Check that path does not overlap with WORKPATH or SPECPATH (i.e.\n WORKPATH and SPECPATH may not start with path, which could be\n caused by a faulty hand-edited specfile)\n\n Raise SystemExit if there is overlap, return True otherwise\n \"\"\"\n from ..config import CONF\n specerr = 0\n if CONF['workpath'].startswith(path):\n logger.error('Specfile error: The output path \"%s\" contains '\n 'WORKPATH (%s)', path, CONF['workpath'])\n specerr += 1\n if CONF['specpath'].startswith(path):\n logger.error('Specfile error: The output path \"%s\" contains '\n 'SPECPATH (%s)', path, CONF['specpath'])\n specerr += 1\n if specerr:\n raise SystemExit('Error: Please edit/recreate the specfile (%s) '\n 'and set a different output name (e.g. \"dist\").'\n % CONF['spec'])\n return True\n\n\ndef _make_clean_directory(path):\n \"\"\"\n Create a clean directory from the given directory name\n \"\"\"\n if _check_path_overlap(path):\n if os.path.isdir(path) or os.path.isfile(path):\n try:\n os.remove(path)\n except OSError:\n _rmtree(path)\n\n os.makedirs(path)\n\n\ndef _rmtree(path):\n \"\"\"\n Remove directory and all its contents, but only after user confirmation,\n or if the -y option is set\n \"\"\"\n from ..config import CONF\n if CONF['noconfirm']:\n choice = 'y'\n elif sys.stdout.isatty():\n choice = compat.stdin_input('WARNING: The output directory \"%s\" and ALL ITS '\n 'CONTENTS will be REMOVED! Continue? (y/n)' % path)\n else:\n raise SystemExit('Error: The output directory \"%s\" is not empty. '\n 'Please remove all its contents or use the '\n '-y option (remove output directory without '\n 'confirmation).' % path)\n if choice.strip().lower() == 'y':\n logger.info('Removing dir %s', path)\n shutil.rmtree(path)\n else:\n raise SystemExit('User aborted')\n\n\n# TODO Refactor to prohibit empty target directories. As the docstring\n#below documents, this function currently permits the second item of each\n#2-tuple in \"hook.datas\" to be the empty string, in which case the target\n#directory defaults to the source directory's basename. However, this\n#functionality is very fragile and hence bad. Instead:\n#\n#* An exception should be raised if such item is empty.\n#* All hooks currently passing the empty string for such item (e.g.,\n# \"hooks/hook-babel.py\", \"hooks/hook-matplotlib.py\") should be refactored\n# to instead pass such basename.\ndef format_binaries_and_datas(binaries_or_datas, workingdir=None):\n \"\"\"\n Convert the passed list of hook-style 2-tuples into a returned set of\n `TOC`-style 2-tuples.\n\n Elements of the passed list are 2-tuples `(source_dir_or_glob, target_dir)`.\n Elements of the returned set are 2-tuples `(target_file, source_file)`.\n For backwards compatibility, the order of elements in the former tuples are\n the reverse of the order of elements in the latter tuples!\n\n Parameters\n ----------\n binaries_or_datas : list\n List of hook-style 2-tuples (e.g., the top-level `binaries` and `datas`\n attributes defined by hooks) whose:\n * The first element is either:\n * A glob matching only the absolute or relative paths of source\n non-Python data files.\n * The absolute or relative path of a source directory containing only\n source non-Python data files.\n * The second element ist he relative path of the target directory\n into which these source files will be recursively copied.\n\n If the optional `workingdir` parameter is passed, source paths may be\n either absolute or relative; else, source paths _must_ be absolute.\n workingdir : str\n Optional absolute path of the directory to which all relative source\n paths in the `binaries_or_datas` parameter will be prepended by (and\n hence converted into absolute paths) _or_ `None` if these paths are to\n be preserved as relative. Defaults to `None`.\n\n Returns\n ----------\n set\n Set of `TOC`-style 2-tuples whose:\n * First element is the absolute or relative path of a target file.\n * Second element is the absolute or relative path of the corresponding\n source file to be copied to this target file.\n \"\"\"\n toc_datas = set()\n\n for src_root_path_or_glob, trg_root_dir in binaries_or_datas:\n if not trg_root_dir:\n raise SystemExit(\"Empty DEST not allowed when adding binary \"\n \"and data files. \"\n \"Maybe you want to used %r.\\nCaused by %r.\" %\n (os.curdir, src_root_path_or_glob))\n # Convert relative to absolute paths if required.\n if workingdir and not os.path.isabs(src_root_path_or_glob):\n src_root_path_or_glob = os.path.join(\n workingdir, src_root_path_or_glob)\n\n # Normalize paths.\n src_root_path_or_glob = os.path.normpath(src_root_path_or_glob)\n if os.path.isfile(src_root_path_or_glob):\n src_root_paths = [src_root_path_or_glob]\n else:\n # List of the absolute paths of all source paths matching the\n # current glob.\n src_root_paths = glob.glob(src_root_path_or_glob)\n\n if not src_root_paths:\n msg = 'Unable to find \"%s\" when adding binary and data files.' % (\n src_root_path_or_glob)\n # on Debian/Ubuntu, missing pyconfig.h files can be fixed with\n # installing python-dev\n if src_root_path_or_glob.endswith(\"pyconfig.h\"):\n msg += \"\"\"This would mean your Python installation doesn't\ncome with proper library files. This usually happens by missing development\npackage, or unsuitable build parameters of Python installation.\n* On Debian/Ubuntu, you would need to install Python development packages\n * apt-get install python3-dev\n * apt-get install python-dev\n* If you're building Python by yourself, please rebuild your Python with\n`--enable-shared` (or, `--enable-framework` on Darwin)\n\"\"\"\n raise SystemExit(msg)\n\n for src_root_path in src_root_paths:\n if os.path.isfile(src_root_path):\n # Normalizing the result to remove redundant relative\n # paths (e.g., removing \"./\" from \"trg/./file\").\n toc_datas.add((\n os.path.normpath(os.path.join(\n trg_root_dir, os.path.basename(src_root_path))),\n os.path.normpath(src_root_path)))\n elif os.path.isdir(src_root_path):\n for src_dir, src_subdir_basenames, src_file_basenames in \\\n os.walk(src_root_path):\n # Ensure the current source directory is a subdirectory\n # of the passed top-level source directory. Since\n # os.walk() does *NOT* follow symlinks by default, this\n # should be the case. (But let's make sure.)\n assert src_dir.startswith(src_root_path)\n\n # Relative path of the current target directory,\n # obtained by:\n #\n # * Stripping the top-level source directory from the\n # current source directory (e.g., removing \"/top\" from\n # \"/top/dir\").\n # * Normalizing the result to remove redundant relative\n # paths (e.g., removing \"./\" from \"trg/./file\").\n trg_dir = os.path.normpath(os.path.join(\n trg_root_dir,\n os.path.relpath(src_dir, src_root_path)))\n\n for src_file_basename in src_file_basenames:\n src_file = os.path.join(src_dir, src_file_basename)\n if os.path.isfile(src_file):\n # Normalize the result to remove redundant relative\n # paths (e.g., removing \"./\" from \"trg/./file\").\n toc_datas.add((\n os.path.normpath(\n os.path.join(trg_dir, src_file_basename)),\n os.path.normpath(src_file)))\n\n return toc_datas\n\n\ndef _load_code(modname, filename):\n path_item = os.path.dirname(filename)\n if os.path.basename(filename).startswith('__init__.py'):\n # this is a package\n path_item = os.path.dirname(path_item)\n if os.path.basename(path_item) == '__pycache__':\n path_item = os.path.dirname(path_item)\n importer = pkgutil.get_importer(path_item)\n package, _, modname = modname.rpartition('.')\n\n if sys.version_info >= (3, 3) and hasattr(importer, 'find_loader'):\n loader, portions = importer.find_loader(modname)\n else:\n loader = importer.find_module(modname)\n\n logger.debug('Compiling %s', filename)\n if loader and hasattr(loader, 'get_code'):\n return loader.get_code(modname)\n else:\n # Just as ``python foo.bar`` will read and execute statements in\n # ``foo.bar``, even though it lacks the ``.py`` extension, so\n # ``pyinstaller foo.bar`` should also work. However, Python's import\n # machinery doesn't load files without a ``.py`` extension. So, use\n # ``compile`` instead.\n #\n # On a side note, neither the Python 2 nor Python 3 calls to\n # ``pkgutil`` and ``find_module`` above handle modules ending in\n # ``.pyw``, even though ``imp.find_module`` and ``import <name>`` both\n # work. This code supports ``.pyw`` files.\n\n # Open the source file in binary mode and allow the `compile()` call to\n # detect the source encoding.\n with open_file(filename, 'rb') as f:\n source = f.read()\n return compile(source, filename, 'exec')\n\ndef get_code_object(modname, filename):\n \"\"\"\n Get the code-object for a module.\n\n This is a extra-simple version for compiling a module. It's\n not worth spending more effort here, as it is only used in the\n rare case if outXX-Analysis.toc exists, but outXX-PYZ.toc does\n not.\n \"\"\"\n\n try:\n if filename in ('-', None):\n # This is a NamespacePackage, modulegraph marks them\n # by using the filename '-'. (But wants to use None,\n # so check for None, too, to be forward-compatible.)\n logger.debug('Compiling namespace package %s', modname)\n txt = '#\\n'\n return compile(txt, filename, 'exec')\n else:\n logger.debug('Compiling %s', filename)\n co = _load_code(modname, filename)\n if not co:\n raise ValueError(\"Module file %s is missing\" % filename)\n return co\n except SyntaxError as e:\n print(\"Syntax error in \", filename)\n print(e.args)\n raise\n\n\ndef strip_paths_in_code(co, new_filename=None):\n\n # Paths to remove from filenames embedded in code objects\n replace_paths = sys.path + CONF['pathex']\n # Make sure paths end with os.sep\n replace_paths = [os.path.join(f, '') for f in replace_paths]\n\n if new_filename is None:\n original_filename = os.path.normpath(co.co_filename)\n for f in replace_paths:\n if original_filename.startswith(f):\n new_filename = original_filename[len(f):]\n break\n\n else:\n return co\n\n code_func = type(co)\n\n consts = tuple(\n strip_paths_in_code(const_co, new_filename)\n if isinstance(const_co, code_func) else const_co\n for const_co in co.co_consts\n )\n\n # co_kwonlyargcount added in some version of Python 3\n if hasattr(co, 'co_kwonlyargcount'):\n return code_func(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize,\n co.co_flags, co.co_code, consts, co.co_names,\n co.co_varnames, new_filename, co.co_name,\n co.co_firstlineno, co.co_lnotab,\n co.co_freevars, co.co_cellvars)\n else:\n return code_func(co.co_argcount, co.co_nlocals, co.co_stacksize,\n co.co_flags, co.co_code, consts, co.co_names,\n co.co_varnames, new_filename, co.co_name,\n co.co_firstlineno, co.co_lnotab,\n co.co_freevars, co.co_cellvars)\n\n\ndef fake_pyc_timestamp(buf):\n \"\"\"\n Reset the timestamp from a .pyc-file header to a fixed value.\n\n This enables deterministic builds without having to set pyinstaller\n source metadata (mtime) since that changes the pyc-file contents.\n\n _buf_ must at least contain the full pyc-file header.\n \"\"\"\n assert buf[:4] == compat.BYTECODE_MAGIC, \\\n \"Expected pyc magic {}, got {}\".format(compat.BYTECODE_MAGIC, buf[:4])\n start, end = 4, 8\n if is_py37:\n # see https://www.python.org/dev/peps/pep-0552/\n (flags,) = struct.unpack_from(\">I\", buf, 4)\n if flags & 1:\n # We are in the future and hash-based pyc-files are used, so\n # clear \"check_source\" flag, since there is no source\n buf[4:8] = struct.pack(\">I\", flags ^ 2)\n return buf\n else:\n # no hash-based pyc-file, timestamp is the next field\n start, end = 8, 12\n\n ts = b'pyi0' # So people know where this comes from\n return buf[:start] + ts + buf[end:]\n",
"path": "PyInstaller/building/utils.py"
}
] | diff --git a/PyInstaller/building/utils.py b/PyInstaller/building/utils.py
index e8dcf58d55..39a8d4b634 100644
--- a/PyInstaller/building/utils.py
+++ b/PyInstaller/building/utils.py
@@ -404,7 +404,7 @@ def _make_clean_directory(path):
Create a clean directory from the given directory name
"""
if _check_path_overlap(path):
- if os.path.isdir(path):
+ if os.path.isdir(path) or os.path.isfile(path):
try:
os.remove(path)
except OSError:
diff --git a/news/3662.bugfix.rst b/news/3662.bugfix.rst
new file mode 100644
index 0000000000..de05448c44
--- /dev/null
+++ b/news/3662.bugfix.rst
@@ -0,0 +1 @@
+Fix crash when changing from ``--onefile`` to ``--onedir`` on consecutive runs.
|
microsoft__botbuilder-python-1231 | [PORT] [Authentication] updates to support Arlington
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3734
# Changed projects
* Microsoft.Bot.Connector
* Microsoft.Bot.Connector.Tests
[R9]
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom abc import ABC\n\n\nclass GovernmentConstants(ABC):\n\n \"\"\"\n Government Channel Service property value\n \"\"\"\n\n CHANNEL_SERVICE = \"https://botframework.azure.us\"\n\n \"\"\"\n TO CHANNEL FROM BOT: Login URL\n \"\"\"\n TO_CHANNEL_FROM_BOT_LOGIN_URL = (\n \"https://login.microsoftonline.us/\"\n \"cab8a31a-1906-4287-a0d8-4eef66b95f6e/\"\n \"oauth2/v2.0/token\"\n )\n\n \"\"\"\n TO CHANNEL FROM BOT: OAuth scope to request\n \"\"\"\n TO_CHANNEL_FROM_BOT_OAUTH_SCOPE = \"https://api.botframework.us/.default\"\n\n \"\"\"\n TO BOT FROM CHANNEL: Token issuer\n \"\"\"\n TO_BOT_FROM_CHANNEL_TOKEN_ISSUER = \"https://api.botframework.us\"\n\n \"\"\"\n TO BOT FROM CHANNEL: OpenID metadata document for tokens coming from MSA\n \"\"\"\n TO_BOT_FROM_CHANNEL_OPEN_ID_METADATA_URL = (\n \"https://login.botframework.azure.us/v1/.well-known/openidconfiguration\"\n )\n\n \"\"\"\n TO BOT FROM GOV EMULATOR: OpenID metadata document for tokens coming from MSA\n \"\"\"\n TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL = (\n \"https://login.microsoftonline.us/\"\n \"cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0/\"\n \".well-known/openid-configuration\"\n )\n",
"path": "libraries/botframework-connector/botframework/connector/auth/government_constants.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom abc import ABC\n\n\nclass GovernmentConstants(ABC):\n\n \"\"\"\n Government Channel Service property value\n \"\"\"\n\n CHANNEL_SERVICE = \"https://botframework.azure.us\"\n\n \"\"\"\n TO CHANNEL FROM BOT: Login URL\n \"\"\"\n TO_CHANNEL_FROM_BOT_LOGIN_URL = (\n \"https://login.microsoftonline.us/MicrosoftServices.onmicrosoft.us\"\n )\n\n \"\"\"\n TO CHANNEL FROM BOT: OAuth scope to request\n \"\"\"\n TO_CHANNEL_FROM_BOT_OAUTH_SCOPE = \"https://api.botframework.us/.default\"\n\n \"\"\"\n TO BOT FROM CHANNEL: Token issuer\n \"\"\"\n TO_BOT_FROM_CHANNEL_TOKEN_ISSUER = \"https://api.botframework.us\"\n\n \"\"\"\n TO BOT FROM CHANNEL: OpenID metadata document for tokens coming from MSA\n \"\"\"\n TO_BOT_FROM_CHANNEL_OPEN_ID_METADATA_URL = (\n \"https://login.botframework.azure.us/v1/.well-known/openidconfiguration\"\n )\n\n \"\"\"\n TO BOT FROM GOV EMULATOR: OpenID metadata document for tokens coming from MSA\n \"\"\"\n TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL = (\n \"https://login.microsoftonline.us/\"\n \"cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0/\"\n \".well-known/openid-configuration\"\n )\n",
"path": "libraries/botframework-connector/botframework/connector/auth/government_constants.py"
}
] | diff --git a/libraries/botframework-connector/botframework/connector/auth/government_constants.py b/libraries/botframework-connector/botframework/connector/auth/government_constants.py
index 8dcb19b34..0d768397a 100644
--- a/libraries/botframework-connector/botframework/connector/auth/government_constants.py
+++ b/libraries/botframework-connector/botframework/connector/auth/government_constants.py
@@ -15,9 +15,7 @@ class GovernmentConstants(ABC):
TO CHANNEL FROM BOT: Login URL
"""
TO_CHANNEL_FROM_BOT_LOGIN_URL = (
- "https://login.microsoftonline.us/"
- "cab8a31a-1906-4287-a0d8-4eef66b95f6e/"
- "oauth2/v2.0/token"
+ "https://login.microsoftonline.us/MicrosoftServices.onmicrosoft.us"
)
"""
|
UTNkar__moore-53 | Confirmation e-mails are not sent
For some reason the confirmation e-mails are no longer being send.
| [
{
"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n",
"path": "website/members/signals.py"
}
] | [
{
"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n",
"path": "website/members/signals.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index e3c55585..753dc74f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
+### Fixed
+- Confirmation e-mails not being sent.
## [0.1.1] - 2017-03-30
### Added
diff --git a/website/members/signals.py b/website/members/signals.py
index c55021da..280ed8fa 100644
--- a/website/members/signals.py
+++ b/website/members/signals.py
@@ -12,6 +12,7 @@
@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
def send_confirmation_email(sender, email, user=None, **kwargs):
+ user = user or sender
if user is not None:
context = {
'email': email,
|
piskvorky__gensim-3252 | Use coveralls
While contributing in gensim I felt a need of an automated system which can confirm whether I have written tests corresponding to **every line of code** I wrote. I propose the immediate use of https://coveralls.io/ for ensuring this.
Here is coveralls in action - https://coveralls.io/github/jellAIfish/omega?branch=master for my own repository.
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nRun with::\n\n python ./setup.py install\n\"\"\"\n\nimport distutils.cmd\nimport distutils.log\nimport itertools\nimport os\nimport platform\nimport shutil\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_ext import build_ext\n\nc_extensions = {\n 'gensim.models.word2vec_inner': 'gensim/models/word2vec_inner.c',\n 'gensim.corpora._mmreader': 'gensim/corpora/_mmreader.c',\n 'gensim.models.fasttext_inner': 'gensim/models/fasttext_inner.c',\n 'gensim._matutils': 'gensim/_matutils.c',\n 'gensim.models.nmf_pgd': 'gensim/models/nmf_pgd.c',\n 'gensim.similarities.fastss': 'gensim/similarities/fastss.c',\n}\n\ncpp_extensions = {\n 'gensim.models.doc2vec_inner': 'gensim/models/doc2vec_inner.cpp',\n 'gensim.models.word2vec_corpusfile': 'gensim/models/word2vec_corpusfile.cpp',\n 'gensim.models.fasttext_corpusfile': 'gensim/models/fasttext_corpusfile.cpp',\n 'gensim.models.doc2vec_corpusfile': 'gensim/models/doc2vec_corpusfile.cpp',\n}\n\n\ndef need_cython():\n \"\"\"Return True if we need Cython to translate any of the extensions.\n\n If the extensions have already been translated to C/C++, then we don't need\n to install Cython and perform the translation.\n\n \"\"\"\n expected = list(c_extensions.values()) + list(cpp_extensions.values())\n return any([not os.path.isfile(f) for f in expected])\n\n\ndef make_c_ext(use_cython=False):\n for module, source in c_extensions.items():\n if use_cython:\n source = source.replace('.c', '.pyx')\n extra_args = []\n# extra_args.extend(['-g', '-O0']) # uncomment if optimization limiting crash info\n yield Extension(\n module,\n sources=[source],\n language='c',\n extra_compile_args=extra_args,\n )\n\n\ndef make_cpp_ext(use_cython=False):\n extra_args = []\n system = platform.system()\n\n if system == 'Linux':\n extra_args.append('-std=c++11')\n elif system == 'Darwin':\n extra_args.extend(['-stdlib=libc++', '-std=c++11'])\n# extra_args.extend(['-g', '-O0']) # uncomment if optimization limiting crash info\n for module, source in cpp_extensions.items():\n if use_cython:\n source = source.replace('.cpp', '.pyx')\n yield Extension(\n module,\n sources=[source],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args,\n )\n\n\n#\n# We use use_cython=False here for two reasons:\n#\n# 1. Cython may not be available at this stage\n# 2. The actual translation from Cython to C/C++ happens inside CustomBuildExt\n#\next_modules = list(itertools.chain(make_c_ext(use_cython=False), make_cpp_ext(use_cython=False)))\n\n\nclass CustomBuildExt(build_ext):\n \"\"\"Custom build_ext action with bootstrapping.\n\n We need this in order to use numpy and Cython in this script without\n importing them at module level, because they may not be available yet.\n \"\"\"\n #\n # http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\n #\n def finalize_options(self):\n build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n # https://docs.python.org/2/library/__builtin__.html#module-__builtin__\n __builtins__.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n if need_cython():\n import Cython.Build\n Cython.Build.cythonize(list(make_c_ext(use_cython=True)))\n Cython.Build.cythonize(list(make_cpp_ext(use_cython=True)))\n\n\nclass CleanExt(distutils.cmd.Command):\n description = 'Remove C sources, C++ sources and binaries for gensim extensions'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for root, dirs, files in os.walk('gensim'):\n files = [\n os.path.join(root, f)\n for f in files\n if os.path.splitext(f)[1] in ('.c', '.cpp', '.so')\n ]\n for f in files:\n self.announce('removing %s' % f, level=distutils.log.INFO)\n os.unlink(f)\n\n if os.path.isdir('build'):\n self.announce('recursively removing build', level=distutils.log.INFO)\n shutil.rmtree('build')\n\n\ncmdclass = {'build_ext': CustomBuildExt, 'clean_ext': CleanExt}\n\nWHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}\nif WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):\n import wheelhouse_uploader.cmd\n cmdclass.update(vars(wheelhouse_uploader.cmd))\n\n\nLONG_DESCRIPTION = u\"\"\"\n==============================================\ngensim -- Topic Modelling in Python\n==============================================\n\n|GA|_\n|Wheel|_\n\n.. |GA| image:: https://github.com/RaRe-Technologies/gensim/actions/workflows/tests.yml/badge.svg?branch=develop\n.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg\n\n.. _GA: https://github.com/RaRe-Technologies/gensim/actions\n.. _Downloads: https://pypi.python.org/pypi/gensim\n.. _License: http://radimrehurek.com/gensim/about.html\n.. _Wheel: https://pypi.python.org/pypi/gensim\n\nGensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.\nTarget audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.\n\nFeatures\n---------\n\n* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core)\n* **Intuitive interfaces**\n\n * easy to plug in your own input corpus/datastream (simple streaming API)\n * easy to extend with other Vector Space algorithms (simple transformation API)\n\n* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,\n **Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.\n* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.\n* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.\n\n\nIf this feature list left you scratching your head, you can first read more about the `Vector\nSpace Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised\ndocument analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.\n\nInstallation\n------------\n\nThis software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.\nYou must have them installed prior to installing `gensim`.\n\nIt is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as MKL, `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OSX, NumPy picks up its vecLib BLAS automatically, so you don't need to do anything special.\n\nInstall the latest version of gensim::\n\n pip install --upgrade gensim\n\nOr, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package::\n\n python setup.py install\n\n\nFor alternative modes of installation, see the `documentation <http://radimrehurek.com/gensim/#install>`_.\n\nGensim is being `continuously tested <http://radimrehurek.com/gensim/#testing>`_ under all `supported Python versions <https://github.com/RaRe-Technologies/gensim/wiki/Gensim-And-Compatibility>`_.\nSupport for Python 2.7 was dropped in gensim 4.0.0 – install gensim 3.8.3 if you must use Python 2.7.\n\n\nHow come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?\n--------------------------------------------------------------------------------------------------------\n\nMany scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).\n\nMemory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.\n\nDocumentation\n-------------\n* `QuickStart`_\n* `Tutorials`_\n* `Tutorial Videos`_\n* `Official Documentation and Walkthrough`_\n\nCiting gensim\n-------------\n\nWhen `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::\n\n @inproceedings{rehurek_lrec,\n title = {{Software Framework for Topic Modelling with Large Corpora}},\n author = {Radim {\\\\v R}eh{\\\\r u}{\\\\v r}ek and Petr Sojka},\n booktitle = {{Proceedings of the LREC 2010 Workshop on New\n Challenges for NLP Frameworks}},\n pages = {45--50},\n year = 2010,\n month = May,\n day = 22,\n publisher = {ELRA},\n address = {Valletta, Malta},\n language={English}\n }\n\n----------------\n\nGensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.\nCopyright (c) 2009-now Radim Rehurek\n\n|Analytics|_\n\n.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name\n.. _Analytics: https://github.com/igrigorik/ga-beacon\n.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/\n.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials\n.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos\n.. _QuickStart: https://radimrehurek.com/gensim/gensim_numfocus/auto_examples/core/run_core_concepts.html\n\n\"\"\"\n\ndistributed_env = ['Pyro4 >= 4.27']\n\nvisdom_req = ['visdom >= 0.1.8, != 0.1.8.7']\n\n# packages included for build-testing everywhere\ncore_testenv = [\n 'pytest',\n# 'pytest-rerunfailures', # disabled 2020-08-28 for <https://github.com/pytest-dev/pytest-rerunfailures/issues/128>\n 'mock',\n 'cython',\n 'testfixtures',\n 'Morfessor>=2.0.2a4',\n]\n\nif not (sys.platform.lower().startswith(\"win\") and sys.version_info[:2] >= (3, 9)):\n core_testenv.extend([\n 'pyemd',\n 'nmslib',\n ])\n\n# Add additional requirements for testing on Linux that are skipped on Windows.\nlinux_testenv = core_testenv[:] + visdom_req\n\n# Skip problematic/uninstallable packages (& thus related conditional tests) in Windows builds.\n# We still test them in Linux via Travis, see linux_testenv above.\n# See https://github.com/RaRe-Technologies/gensim/pull/2814\nwin_testenv = core_testenv[:]\n\n#\n# This list partially duplicates requirements_docs.txt.\n# The main difference is that we don't include version pins here unless\n# absolutely necessary, whereas requirements_docs.txt includes pins for\n# everything, by design.\n#\n# For more info about the difference between the two:\n#\n# https://packaging.python.org/discussions/install-requires-vs-requirements/\n#\n\ndocs_testenv = core_testenv + distributed_env + visdom_req + [\n 'sphinx',\n 'sphinx-gallery',\n 'sphinxcontrib.programoutput',\n 'sphinxcontrib-napoleon',\n 'matplotlib', # expected by sphinx-gallery\n 'memory_profiler',\n 'annoy',\n 'Pyro4',\n 'nltk',\n 'testfixtures',\n 'statsmodels',\n 'pandas',\n]\n\nNUMPY_STR = 'numpy >= 1.17.0'\n#\n# We pin the Cython version for reproducibility. We expect our extensions\n# to build with any sane version of Cython, so we should update this pin\n# periodically.\n#\nCYTHON_STR = 'Cython==0.29.23'\n\ninstall_requires = [\n NUMPY_STR,\n 'scipy >= 0.18.1',\n 'smart_open >= 1.8.1',\n \"dataclasses; python_version < '3.7'\", # pre-py3.7 needs `dataclasses` backport for use of `dataclass` in doc2vec.py\n]\n\nsetup_requires = [NUMPY_STR]\n\nif need_cython():\n install_requires.append(CYTHON_STR)\n setup_requires.append(CYTHON_STR)\n\nsetup(\n name='gensim',\n version='4.1.3.dev0',\n description='Python framework for fast Vector Space Modelling',\n long_description=LONG_DESCRIPTION,\n\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=find_packages(),\n\n author=u'Radim Rehurek',\n author_email='[email protected]',\n\n url='http://radimrehurek.com/gensim',\n download_url='http://pypi.python.org/pypi/gensim',\n\n license='LGPL-2.1-only',\n\n keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '\n 'LSA, LSI, Latent Dirichlet Allocation, LDA, '\n 'Hierarchical Dirichlet Process, HDP, Random Projections, '\n 'TFIDF, word2vec',\n\n platforms='any',\n\n zip_safe=False,\n\n classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing :: Linguistic',\n ],\n\n test_suite=\"gensim.test\",\n python_requires='>=3.6',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=linux_testenv,\n extras_require={\n 'distributed': distributed_env,\n 'test-win': win_testenv,\n 'test': linux_testenv,\n 'docs': docs_testenv,\n },\n\n include_package_data=True,\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nRun with::\n\n python ./setup.py install\n\"\"\"\n\nimport distutils.cmd\nimport distutils.log\nimport itertools\nimport os\nimport platform\nimport shutil\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_ext import build_ext\n\nc_extensions = {\n 'gensim.models.word2vec_inner': 'gensim/models/word2vec_inner.c',\n 'gensim.corpora._mmreader': 'gensim/corpora/_mmreader.c',\n 'gensim.models.fasttext_inner': 'gensim/models/fasttext_inner.c',\n 'gensim._matutils': 'gensim/_matutils.c',\n 'gensim.models.nmf_pgd': 'gensim/models/nmf_pgd.c',\n 'gensim.similarities.fastss': 'gensim/similarities/fastss.c',\n}\n\ncpp_extensions = {\n 'gensim.models.doc2vec_inner': 'gensim/models/doc2vec_inner.cpp',\n 'gensim.models.word2vec_corpusfile': 'gensim/models/word2vec_corpusfile.cpp',\n 'gensim.models.fasttext_corpusfile': 'gensim/models/fasttext_corpusfile.cpp',\n 'gensim.models.doc2vec_corpusfile': 'gensim/models/doc2vec_corpusfile.cpp',\n}\n\n\ndef need_cython():\n \"\"\"Return True if we need Cython to translate any of the extensions.\n\n If the extensions have already been translated to C/C++, then we don't need\n to install Cython and perform the translation.\n\n \"\"\"\n expected = list(c_extensions.values()) + list(cpp_extensions.values())\n return any([not os.path.isfile(f) for f in expected])\n\n\ndef make_c_ext(use_cython=False):\n for module, source in c_extensions.items():\n if use_cython:\n source = source.replace('.c', '.pyx')\n extra_args = []\n# extra_args.extend(['-g', '-O0']) # uncomment if optimization limiting crash info\n yield Extension(\n module,\n sources=[source],\n language='c',\n extra_compile_args=extra_args,\n )\n\n\ndef make_cpp_ext(use_cython=False):\n extra_args = []\n system = platform.system()\n\n if system == 'Linux':\n extra_args.append('-std=c++11')\n elif system == 'Darwin':\n extra_args.extend(['-stdlib=libc++', '-std=c++11'])\n# extra_args.extend(['-g', '-O0']) # uncomment if optimization limiting crash info\n for module, source in cpp_extensions.items():\n if use_cython:\n source = source.replace('.cpp', '.pyx')\n yield Extension(\n module,\n sources=[source],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args,\n )\n\n\n#\n# We use use_cython=False here for two reasons:\n#\n# 1. Cython may not be available at this stage\n# 2. The actual translation from Cython to C/C++ happens inside CustomBuildExt\n#\next_modules = list(itertools.chain(make_c_ext(use_cython=False), make_cpp_ext(use_cython=False)))\n\n\nclass CustomBuildExt(build_ext):\n \"\"\"Custom build_ext action with bootstrapping.\n\n We need this in order to use numpy and Cython in this script without\n importing them at module level, because they may not be available yet.\n \"\"\"\n #\n # http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\n #\n def finalize_options(self):\n build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n # https://docs.python.org/2/library/__builtin__.html#module-__builtin__\n __builtins__.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n if need_cython():\n import Cython.Build\n Cython.Build.cythonize(list(make_c_ext(use_cython=True)))\n Cython.Build.cythonize(list(make_cpp_ext(use_cython=True)))\n\n\nclass CleanExt(distutils.cmd.Command):\n description = 'Remove C sources, C++ sources and binaries for gensim extensions'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for root, dirs, files in os.walk('gensim'):\n files = [\n os.path.join(root, f)\n for f in files\n if os.path.splitext(f)[1] in ('.c', '.cpp', '.so')\n ]\n for f in files:\n self.announce('removing %s' % f, level=distutils.log.INFO)\n os.unlink(f)\n\n if os.path.isdir('build'):\n self.announce('recursively removing build', level=distutils.log.INFO)\n shutil.rmtree('build')\n\n\ncmdclass = {'build_ext': CustomBuildExt, 'clean_ext': CleanExt}\n\nWHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}\nif WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):\n import wheelhouse_uploader.cmd\n cmdclass.update(vars(wheelhouse_uploader.cmd))\n\n\nLONG_DESCRIPTION = u\"\"\"\n==============================================\ngensim -- Topic Modelling in Python\n==============================================\n\n|GA|_\n|Wheel|_\n\n.. |GA| image:: https://github.com/RaRe-Technologies/gensim/actions/workflows/tests.yml/badge.svg?branch=develop\n.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg\n\n.. _GA: https://github.com/RaRe-Technologies/gensim/actions\n.. _Downloads: https://pypi.python.org/pypi/gensim\n.. _License: http://radimrehurek.com/gensim/about.html\n.. _Wheel: https://pypi.python.org/pypi/gensim\n\nGensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.\nTarget audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.\n\nFeatures\n---------\n\n* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core)\n* **Intuitive interfaces**\n\n * easy to plug in your own input corpus/datastream (simple streaming API)\n * easy to extend with other Vector Space algorithms (simple transformation API)\n\n* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,\n **Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.\n* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.\n* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.\n\n\nIf this feature list left you scratching your head, you can first read more about the `Vector\nSpace Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised\ndocument analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.\n\nInstallation\n------------\n\nThis software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.\nYou must have them installed prior to installing `gensim`.\n\nIt is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as MKL, `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OSX, NumPy picks up its vecLib BLAS automatically, so you don't need to do anything special.\n\nInstall the latest version of gensim::\n\n pip install --upgrade gensim\n\nOr, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package::\n\n python setup.py install\n\n\nFor alternative modes of installation, see the `documentation <http://radimrehurek.com/gensim/#install>`_.\n\nGensim is being `continuously tested <http://radimrehurek.com/gensim/#testing>`_ under all `supported Python versions <https://github.com/RaRe-Technologies/gensim/wiki/Gensim-And-Compatibility>`_.\nSupport for Python 2.7 was dropped in gensim 4.0.0 – install gensim 3.8.3 if you must use Python 2.7.\n\n\nHow come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?\n--------------------------------------------------------------------------------------------------------\n\nMany scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).\n\nMemory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.\n\nDocumentation\n-------------\n* `QuickStart`_\n* `Tutorials`_\n* `Tutorial Videos`_\n* `Official Documentation and Walkthrough`_\n\nCiting gensim\n-------------\n\nWhen `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::\n\n @inproceedings{rehurek_lrec,\n title = {{Software Framework for Topic Modelling with Large Corpora}},\n author = {Radim {\\\\v R}eh{\\\\r u}{\\\\v r}ek and Petr Sojka},\n booktitle = {{Proceedings of the LREC 2010 Workshop on New\n Challenges for NLP Frameworks}},\n pages = {45--50},\n year = 2010,\n month = May,\n day = 22,\n publisher = {ELRA},\n address = {Valletta, Malta},\n language={English}\n }\n\n----------------\n\nGensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.\nCopyright (c) 2009-now Radim Rehurek\n\n|Analytics|_\n\n.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name\n.. _Analytics: https://github.com/igrigorik/ga-beacon\n.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/\n.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials\n.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos\n.. _QuickStart: https://radimrehurek.com/gensim/gensim_numfocus/auto_examples/core/run_core_concepts.html\n\n\"\"\"\n\ndistributed_env = ['Pyro4 >= 4.27']\n\nvisdom_req = ['visdom >= 0.1.8, != 0.1.8.7']\n\n# packages included for build-testing everywhere\ncore_testenv = [\n 'pytest',\n 'pytest-cov',\n# 'pytest-rerunfailures', # disabled 2020-08-28 for <https://github.com/pytest-dev/pytest-rerunfailures/issues/128>\n 'mock',\n 'cython',\n 'testfixtures',\n 'Morfessor>=2.0.2a4',\n]\n\nif not (sys.platform.lower().startswith(\"win\") and sys.version_info[:2] >= (3, 9)):\n core_testenv.extend([\n 'pyemd',\n 'nmslib',\n ])\n\n# Add additional requirements for testing on Linux that are skipped on Windows.\nlinux_testenv = core_testenv[:] + visdom_req\n\n# Skip problematic/uninstallable packages (& thus related conditional tests) in Windows builds.\n# We still test them in Linux via Travis, see linux_testenv above.\n# See https://github.com/RaRe-Technologies/gensim/pull/2814\nwin_testenv = core_testenv[:]\n\n#\n# This list partially duplicates requirements_docs.txt.\n# The main difference is that we don't include version pins here unless\n# absolutely necessary, whereas requirements_docs.txt includes pins for\n# everything, by design.\n#\n# For more info about the difference between the two:\n#\n# https://packaging.python.org/discussions/install-requires-vs-requirements/\n#\n\ndocs_testenv = core_testenv + distributed_env + visdom_req + [\n 'sphinx',\n 'sphinx-gallery',\n 'sphinxcontrib.programoutput',\n 'sphinxcontrib-napoleon',\n 'matplotlib', # expected by sphinx-gallery\n 'memory_profiler',\n 'annoy',\n 'Pyro4',\n 'nltk',\n 'testfixtures',\n 'statsmodels',\n 'pandas',\n]\n\nNUMPY_STR = 'numpy >= 1.17.0'\n#\n# We pin the Cython version for reproducibility. We expect our extensions\n# to build with any sane version of Cython, so we should update this pin\n# periodically.\n#\nCYTHON_STR = 'Cython==0.29.23'\n\ninstall_requires = [\n NUMPY_STR,\n 'scipy >= 0.18.1',\n 'smart_open >= 1.8.1',\n \"dataclasses; python_version < '3.7'\", # pre-py3.7 needs `dataclasses` backport for use of `dataclass` in doc2vec.py\n]\n\nsetup_requires = [NUMPY_STR]\n\nif need_cython():\n install_requires.append(CYTHON_STR)\n setup_requires.append(CYTHON_STR)\n\nsetup(\n name='gensim',\n version='4.1.3.dev0',\n description='Python framework for fast Vector Space Modelling',\n long_description=LONG_DESCRIPTION,\n\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=find_packages(),\n\n author=u'Radim Rehurek',\n author_email='[email protected]',\n\n url='http://radimrehurek.com/gensim',\n download_url='http://pypi.python.org/pypi/gensim',\n\n license='LGPL-2.1-only',\n\n keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '\n 'LSA, LSI, Latent Dirichlet Allocation, LDA, '\n 'Hierarchical Dirichlet Process, HDP, Random Projections, '\n 'TFIDF, word2vec',\n\n platforms='any',\n\n zip_safe=False,\n\n classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing :: Linguistic',\n ],\n\n test_suite=\"gensim.test\",\n python_requires='>=3.6',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=linux_testenv,\n extras_require={\n 'distributed': distributed_env,\n 'test-win': win_testenv,\n 'test': linux_testenv,\n 'docs': docs_testenv,\n },\n\n include_package_data=True,\n)\n",
"path": "setup.py"
}
] | diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml
index 85e8637b86..ebe2201a6d 100644
--- a/.github/workflows/build-wheels.yml
+++ b/.github/workflows/build-wheels.yml
@@ -114,7 +114,7 @@ jobs:
PLAT: x86_64
UNICODE_WIDTH: 32
MB_PYTHON_VERSION: ${{ matrix.python-version }} # MB_PYTHON_VERSION is needed by Multibuild
- TEST_DEPENDS: Morfessor==2.0.2a4 python-levenshtein==0.12.0 visdom==0.1.8.9 pytest mock cython nmslib pyemd testfixtures scikit-learn pyemd
+ TEST_DEPENDS: Morfessor==2.0.2a4 python-levenshtein==0.12.0 visdom==0.1.8.9 pytest pytest-cov mock cython nmslib pyemd testfixtures scikit-learn pyemd
DOCKER_TEST_IMAGE: multibuild/xenial_x86_64
TRAVIS_OS_NAME: ${{ matrix.travis-os-name }}
SKIP_NETWORK_TESTS: 1
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 41a608ef90..0117fb2ea1 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -53,9 +53,18 @@ jobs:
run: ulimit -c unlimited -S # enable core dumps
- name: Run tox tests
run: tox -e ${{ matrix.tox }}
+ - name: Upload coverage to Codecov
+ if: matrix.os == 'ubuntu-20.04' && matrix.python == '3.8'
+ uses: codecov/codecov-action@v2
+ with:
+ fail_ci_if_error: true
+ files: ./coverage.xml
+ verbose: true
+
+
- name: Collect corefile
if: ${{ failure() }}
run: |
pwd
COREFILE=$(find . -maxdepth 1 -name "core*" | head -n 1)
- if [[ -f "$COREFILE" ]]; then EXECFILE=$(gdb -c "$COREFILE" -batch | grep "Core was generated" | tr -d "\`" | cut -d' ' -f5); file "$COREFILE"; gdb -c "$COREFILE" "$EXECFILE" -x continuous_integration/debug.gdb -batch; fi
+ if [[ -f "$COREFILE" ]]; then EXECFILE=$(gdb -c "$COREFILE" -batch | grep "Core was generated" | tr -d "\`" | cut -d' ' -f5); file "$COREFILE"; gdb -c "$COREFILE" "$EXECFILE" -x continuous_integration/debug.gdb -batch; fi
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 81c9a81ccc..ccb1142fb6 100644
--- a/setup.py
+++ b/setup.py
@@ -268,6 +268,7 @@ def run(self):
# packages included for build-testing everywhere
core_testenv = [
'pytest',
+ 'pytest-cov',
# 'pytest-rerunfailures', # disabled 2020-08-28 for <https://github.com/pytest-dev/pytest-rerunfailures/issues/128>
'mock',
'cython',
diff --git a/tox.ini b/tox.ini
index 12811b8ba5..d3feeccca0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -24,8 +24,26 @@ ignore = E203, # space before :
exclude = .venv, .git, .tox, dist, doc, build, gensim/models/deprecated
+[coverage:run]
+source=gensim
+
+[coverage:report]
+omit =
+ gensim/test/*
+ */__init__.py
+
+exclude_lines =
+ pragma: no cover
+ def __repr__
+ def __str__
+ raise AssertionError
+ raise NotImplementedError
+ if __name__ == .__main__.:
+
+ignore_errors = True
+
[pytest]
-addopts = -rfxEXs --durations=20 --showlocals
+addopts = -rfxEXs --durations=20 --showlocals --cov=gensim/ --cov-report=xml
[testenv]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.