in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
RedHatInsights__insights-core-1641
RedhatRelease parser failed to parse minor release version in some scenarios In few cases where redhat_release content is something similar to below, RedhatRelease parser fails to get the minor version extracted from it Run: ``` >>> from insights.parsers.redhat_release import RedhatRelease >>> from insights.tests import context_wrap >>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).major 7 >>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).minor Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/insights/insights-core/insights/parsers/redhat_release.py", line 59, in minor return int(s[1]) ValueError: invalid literal for int() with base 10: '5-0' >>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).version '7.5-0.14' ```
[ { "content": "\"\"\"\nredhat-release - File ``/etc/redhat-release``\n=============================================\n\nThis module provides plugins access to file ``/etc/redhat-release``\n\nTypical content of file ``/etc/redhat-release`` is::\n\n Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\nThis module parses the file content and stores data in the dict ``self.parsed``.\nThe version info can also be get via ``obj.major`` and ``obj.minor``.\nProperty ``is_rhel`` and ``is_hypervisor`` specifies the host type.\n\nExamples:\n >>> rh_rls_content = '''\n ... Red Hat Enterprise Linux Server release 7.2 (Maipo)\n ... '''.strip()\n >>> from insights.tests import context_wrap\n >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}\n >>> release = shared[RedhatRelease]\n >>> assert release.raw == rh_rls_content\n >>> assert release.major == 7\n >>> assert release.minor == 2\n >>> assert release.version == \"7.2\"\n >>> assert release.is_rhel\n >>> assert release.product == \"Red Hat Enterprise Linux Server\"\n\"\"\"\nfrom .. import Parser, parser\nfrom ..specs import Specs\n\n\n@parser(Specs.redhat_release)\nclass RedhatRelease(Parser):\n \"\"\"Parses the content of file ``/etc/redhat-release``.\"\"\"\n\n def parse_content(self, content):\n self.raw = content[0]\n product, _, version_name = [v.strip() for v in content[0].partition(\"release\")]\n version_name_split = [v.strip() for v in version_name.split(None, 1)]\n code_name = (version_name_split[1].strip(\"()\")\n if len(version_name_split) > 1 else None)\n self.parsed = {\n \"product\": product,\n \"version\": version_name_split[0],\n \"code_name\": code_name\n }\n\n @property\n def major(self):\n \"\"\"int: the major version of this OS.\"\"\"\n return int(self.parsed[\"version\"].split(\".\")[0])\n\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n s = self.parsed[\"version\"].split(\".\")\n if len(s) > 1:\n return int(s[1])\n\n @property\n def version(self):\n \"\"\"string: version of this OS.\"\"\"\n return self.parsed[\"version\"]\n\n @property\n def is_rhel(self):\n \"\"\"bool: True if this OS belong to RHEL, else False.\"\"\"\n return \"Red Hat Enterprise Linux\" in self.parsed[\"product\"]\n\n @property\n def product(self):\n \"\"\"string: product of this OS.\"\"\"\n return self.parsed[\"product\"]\n", "path": "insights/parsers/redhat_release.py" } ]
[ { "content": "\"\"\"\nredhat-release - File ``/etc/redhat-release``\n=============================================\n\nThis module provides plugins access to file ``/etc/redhat-release``\n\nTypical content of file ``/etc/redhat-release`` is::\n\n Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\nThis module parses the file content and stores data in the dict ``self.parsed``.\nThe version info can also be get via ``obj.major`` and ``obj.minor``.\nProperty ``is_rhel`` and ``is_hypervisor`` specifies the host type.\n\nExamples:\n >>> rh_rls_content = '''\n ... Red Hat Enterprise Linux Server release 7.2 (Maipo)\n ... '''.strip()\n >>> from insights.tests import context_wrap\n >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}\n >>> release = shared[RedhatRelease]\n >>> assert release.raw == rh_rls_content\n >>> assert release.major == 7\n >>> assert release.minor == 2\n >>> assert release.version == \"7.2\"\n >>> assert release.is_rhel\n >>> assert release.product == \"Red Hat Enterprise Linux Server\"\n\"\"\"\nfrom .. import Parser, parser\nfrom ..specs import Specs\n\n\n@parser(Specs.redhat_release)\nclass RedhatRelease(Parser):\n \"\"\"Parses the content of file ``/etc/redhat-release``.\"\"\"\n\n def parse_content(self, content):\n self.raw = content[0]\n product, _, version_name = [v.strip() for v in content[0].partition(\"release\")]\n version_name_split = [v.strip() for v in version_name.split(None, 1)]\n code_name = (version_name_split[1].strip(\"()\")\n if len(version_name_split) > 1 else None)\n self.parsed = {\n \"product\": product,\n \"version\": version_name_split[0],\n \"code_name\": code_name\n }\n\n @property\n def major(self):\n \"\"\"int: the major version of this OS.\"\"\"\n return int(self.parsed[\"version\"].split(\".\")[0])\n\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n s = self.parsed[\"version\"].split(\"-\", 1)[0].split(\".\")\n if len(s) > 1:\n return int(s[1])\n\n @property\n def version(self):\n \"\"\"string: version of this OS.\"\"\"\n return self.parsed[\"version\"]\n\n @property\n def is_rhel(self):\n \"\"\"bool: True if this OS belong to RHEL, else False.\"\"\"\n return \"Red Hat Enterprise Linux\" in self.parsed[\"product\"]\n\n @property\n def product(self):\n \"\"\"string: product of this OS.\"\"\"\n return self.parsed[\"product\"]\n", "path": "insights/parsers/redhat_release.py" } ]
diff --git a/insights/parsers/redhat_release.py b/insights/parsers/redhat_release.py index a0eccf7e1c..5bc134c8b6 100644 --- a/insights/parsers/redhat_release.py +++ b/insights/parsers/redhat_release.py @@ -54,7 +54,7 @@ def major(self): @property def minor(self): """int: the minor version of this OS.""" - s = self.parsed["version"].split(".") + s = self.parsed["version"].split("-", 1)[0].split(".") if len(s) > 1: return int(s[1]) diff --git a/insights/parsers/tests/test_redhat_release.py b/insights/parsers/tests/test_redhat_release.py index f91ed08dde..8da2bb10c0 100644 --- a/insights/parsers/tests/test_redhat_release.py +++ b/insights/parsers/tests/test_redhat_release.py @@ -10,6 +10,10 @@ Red Hat Enterprise Linux Server release 7.2 (Maipo) """.strip() +REDHAT_RELEASE3 = """ +Red Hat Enterprise Linux release 7.5-0.14 +""".strip() + RHVH_RHV40 = """ Red Hat Enterprise Linux release 7.3 """.strip() @@ -43,6 +47,16 @@ def test_rhe7(): assert release.product == "Red Hat Enterprise Linux Server" +def test_rhe75_0_14(): + release = RedhatRelease(context_wrap(REDHAT_RELEASE3)) + assert release.raw == REDHAT_RELEASE3 + assert release.major == 7 + assert release.minor == 5 + assert release.version == "7.5-0.14" + assert release.is_rhel + assert release.product == "Red Hat Enterprise Linux" + + def test_rhevh35(): release = RedhatRelease(context_wrap(RHEVH_RHEV35)) assert release.raw == RHEVH_RHEV35
ipython__ipython-11978
7.10 breaking tests with exception in publish The new 7.10 release is breaking Bokeh unit tests with an exception coming from within ipython: ``` self = <IPython.core.displaypub.DisplayPublisher object at 0x11883d7f0> data = {'text/html': '\n <div class="bk-root">\n <a href="https://bokeh.org" target="_blank" class="bk-logo bk-logo...version \'1.0\' from Bokeh development version \'1.0-1-abc\'. This configuration is unsupported and may not work!</p>'} metadata = None, source = None, transient = None, update = False, kwargs = {} handlers = {} << omitted >> handlers = {} if self.shell is not None: > handlers = self.shell.mime_renderers E AttributeError: 'InteractiveShell' object has no attribute 'mime_renderers' ../miniconda/envs/testenv/lib/python3.6/site-packages/IPython/core/displaypub.py:108: AttributeError ``` Is this an intentional change (documented anwhere?) or a regression/bug? cc @Carreau
[ { "content": "\"\"\"An interface for publishing rich data to frontends.\n\nThere are two components of the display system:\n\n* Display formatters, which take a Python object and compute the\n representation of the object in various formats (text, HTML, SVG, etc.).\n* The display publisher that is used to send the representation data to the\n various frontends.\n\nThis module defines the logic display publishing. The display publisher uses\nthe ``display_data`` message type that is defined in the IPython messaging\nspec.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport sys\n\nfrom traitlets.config.configurable import Configurable\nfrom traitlets import List, Dict\n\n# This used to be defined here - it is imported for backwards compatibility\nfrom .display import publish_display_data\n\n#-----------------------------------------------------------------------------\n# Main payload class\n#-----------------------------------------------------------------------------\n\n\nclass DisplayPublisher(Configurable):\n \"\"\"A traited class that publishes display data to frontends.\n\n Instances of this class are created by the main IPython object and should\n be accessed there.\n \"\"\"\n\n def __init__(self, shell=None, *args, **kwargs):\n self.shell = shell\n super().__init__(*args, **kwargs)\n\n def _validate_data(self, data, metadata=None):\n \"\"\"Validate the display data.\n\n Parameters\n ----------\n data : dict\n The formata data dictionary.\n metadata : dict\n Any metadata for the data.\n \"\"\"\n\n if not isinstance(data, dict):\n raise TypeError('data must be a dict, got: %r' % data)\n if metadata is not None:\n if not isinstance(metadata, dict):\n raise TypeError('metadata must be a dict, got: %r' % data)\n\n # use * to indicate transient, update are keyword-only\n def publish(self, data, metadata=None, source=None, *, transient=None, update=False, **kwargs) -> None:\n \"\"\"Publish data and metadata to all frontends.\n\n See the ``display_data`` message in the messaging documentation for\n more details about this message type.\n\n The following MIME types are currently implemented:\n\n * text/plain\n * text/html\n * text/markdown\n * text/latex\n * application/json\n * application/javascript\n * image/png\n * image/jpeg\n * image/svg+xml\n\n Parameters\n ----------\n data : dict\n A dictionary having keys that are valid MIME types (like\n 'text/plain' or 'image/svg+xml') and values that are the data for\n that MIME type. The data itself must be a JSON'able data\n structure. Minimally all data should have the 'text/plain' data,\n which can be displayed by all frontends. If more than the plain\n text is given, it is up to the frontend to decide which\n representation to use.\n metadata : dict\n A dictionary for metadata related to the data. This can contain\n arbitrary key, value pairs that frontends can use to interpret\n the data. Metadata specific to each mime-type can be specified\n in the metadata dict with the same mime-type keys as\n the data itself.\n source : str, deprecated\n Unused.\n transient: dict, keyword-only\n A dictionary for transient data.\n Data in this dictionary should not be persisted as part of saving this output.\n Examples include 'display_id'.\n update: bool, keyword-only, default: False\n If True, only update existing outputs with the same display_id,\n rather than creating a new output.\n \"\"\"\n\n handlers = {}\n if self.shell is not None:\n handlers = self.shell.mime_renderers\n\n for mime, handler in handlers.items():\n if mime in data:\n handler(data[mime], metadata.get(mime, None))\n return\n\n if 'text/plain' in data:\n print(data['text/plain'])\n\n def clear_output(self, wait=False):\n \"\"\"Clear the output of the cell receiving output.\"\"\"\n print('\\033[2K\\r', end='')\n sys.stdout.flush()\n print('\\033[2K\\r', end='')\n sys.stderr.flush()\n\n\nclass CapturingDisplayPublisher(DisplayPublisher):\n \"\"\"A DisplayPublisher that stores\"\"\"\n outputs = List()\n\n def publish(self, data, metadata=None, source=None, *, transient=None, update=False):\n self.outputs.append({'data':data, 'metadata':metadata,\n 'transient':transient, 'update':update})\n\n def clear_output(self, wait=False):\n super(CapturingDisplayPublisher, self).clear_output(wait)\n\n # empty the list, *do not* reassign a new list\n self.outputs.clear()\n", "path": "IPython/core/displaypub.py" } ]
[ { "content": "\"\"\"An interface for publishing rich data to frontends.\n\nThere are two components of the display system:\n\n* Display formatters, which take a Python object and compute the\n representation of the object in various formats (text, HTML, SVG, etc.).\n* The display publisher that is used to send the representation data to the\n various frontends.\n\nThis module defines the logic display publishing. The display publisher uses\nthe ``display_data`` message type that is defined in the IPython messaging\nspec.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport sys\n\nfrom traitlets.config.configurable import Configurable\nfrom traitlets import List, Dict\n\n# This used to be defined here - it is imported for backwards compatibility\nfrom .display import publish_display_data\n\n#-----------------------------------------------------------------------------\n# Main payload class\n#-----------------------------------------------------------------------------\n\n\nclass DisplayPublisher(Configurable):\n \"\"\"A traited class that publishes display data to frontends.\n\n Instances of this class are created by the main IPython object and should\n be accessed there.\n \"\"\"\n\n def __init__(self, shell=None, *args, **kwargs):\n self.shell = shell\n super().__init__(*args, **kwargs)\n\n def _validate_data(self, data, metadata=None):\n \"\"\"Validate the display data.\n\n Parameters\n ----------\n data : dict\n The formata data dictionary.\n metadata : dict\n Any metadata for the data.\n \"\"\"\n\n if not isinstance(data, dict):\n raise TypeError('data must be a dict, got: %r' % data)\n if metadata is not None:\n if not isinstance(metadata, dict):\n raise TypeError('metadata must be a dict, got: %r' % data)\n\n # use * to indicate transient, update are keyword-only\n def publish(self, data, metadata=None, source=None, *, transient=None, update=False, **kwargs) -> None:\n \"\"\"Publish data and metadata to all frontends.\n\n See the ``display_data`` message in the messaging documentation for\n more details about this message type.\n\n The following MIME types are currently implemented:\n\n * text/plain\n * text/html\n * text/markdown\n * text/latex\n * application/json\n * application/javascript\n * image/png\n * image/jpeg\n * image/svg+xml\n\n Parameters\n ----------\n data : dict\n A dictionary having keys that are valid MIME types (like\n 'text/plain' or 'image/svg+xml') and values that are the data for\n that MIME type. The data itself must be a JSON'able data\n structure. Minimally all data should have the 'text/plain' data,\n which can be displayed by all frontends. If more than the plain\n text is given, it is up to the frontend to decide which\n representation to use.\n metadata : dict\n A dictionary for metadata related to the data. This can contain\n arbitrary key, value pairs that frontends can use to interpret\n the data. Metadata specific to each mime-type can be specified\n in the metadata dict with the same mime-type keys as\n the data itself.\n source : str, deprecated\n Unused.\n transient: dict, keyword-only\n A dictionary for transient data.\n Data in this dictionary should not be persisted as part of saving this output.\n Examples include 'display_id'.\n update: bool, keyword-only, default: False\n If True, only update existing outputs with the same display_id,\n rather than creating a new output.\n \"\"\"\n\n handlers = {}\n if self.shell is not None:\n handlers = getattr(self.shell, 'mime_renderers', {})\n\n for mime, handler in handlers.items():\n if mime in data:\n handler(data[mime], metadata.get(mime, None))\n return\n\n if 'text/plain' in data:\n print(data['text/plain'])\n\n def clear_output(self, wait=False):\n \"\"\"Clear the output of the cell receiving output.\"\"\"\n print('\\033[2K\\r', end='')\n sys.stdout.flush()\n print('\\033[2K\\r', end='')\n sys.stderr.flush()\n\n\nclass CapturingDisplayPublisher(DisplayPublisher):\n \"\"\"A DisplayPublisher that stores\"\"\"\n outputs = List()\n\n def publish(self, data, metadata=None, source=None, *, transient=None, update=False):\n self.outputs.append({'data':data, 'metadata':metadata,\n 'transient':transient, 'update':update})\n\n def clear_output(self, wait=False):\n super(CapturingDisplayPublisher, self).clear_output(wait)\n\n # empty the list, *do not* reassign a new list\n self.outputs.clear()\n", "path": "IPython/core/displaypub.py" } ]
diff --git a/IPython/core/displaypub.py b/IPython/core/displaypub.py index d769692e969..f651a2a0cf6 100644 --- a/IPython/core/displaypub.py +++ b/IPython/core/displaypub.py @@ -105,7 +105,7 @@ def publish(self, data, metadata=None, source=None, *, transient=None, update=Fa handlers = {} if self.shell is not None: - handlers = self.shell.mime_renderers + handlers = getattr(self.shell, 'mime_renderers', {}) for mime, handler in handlers.items(): if mime in data:
pyodide__pyodide-3959
pyodide-build test suite fails locally I'm trying to run the pyodide-build test suite locally inside Docker, ``` source pyodide_env.sh pytest pyodide-build ``` and so far it has had multiple failures, ``` FAILED pyodide-build/pyodide_build/tests/test_build_env.py::TestOutOfTree::get_build_environment_vars[node] - PermissionError: [Errno 13] Permission denied: '/packages' FAILED pyodide-build/pyodide_build/tests/test_build_env.py::TestOutOfTree::get_build_flag[node] - PermissionError: [Errno 13] Permission denied: '/packages' FAILED pyodide-build/pyodide_build/tests/test_build_env.py::TestOutOfTree::init_environment[node] - PermissionError: [Errno 13] Permission denied: '/packages' FAILED pyodide-build/pyodide_build/tests/test_build_env.py::TestOutOfTree::get_pyodide_root[node] - PermissionError: [Errno 13] Permission denied: '/packages' FAILED pyodide-build/pyodide_build/tests/test_build_env.py::TestOutOfTree::in_xbuildenv[node] - PermissionError: [Errno 13] Permission denied: '/packages' FAILED pyodide-build/pyodide_build/tests/test_pypi.py::fetch_or_build_pypi[node] - AssertionError: * Creating virtualenv isolated environment... FAILED pyodide-build/pyodide_build/tests/test_pypi.py::fetch_or_build_pypi_with_deps_and_extras[node] - AssertionError: Successfully fetched: eth_hash-0.5.2-py3-none-any.whl FAILED pyodide-build/pyodide_build/tests/test_pypi.py::fake_pypi_succeed[node] - AssertionError: 127.0.0.1 - - [23/Jun/2023 14:06:56] "GET /simple/resolves-package/ HTTP/1.1" 200 - FAILED pyodide-build/pyodide_build/tests/test_pypi.py::fake_pypi_extras_build[node] - AssertionError: 127.0.0.1 - - [23/Jun/2023 14:07:02] "GET /simple/pkg-b/ HTTP/1.1" 200 - ``` Maybe I'm doing something wrong or I should spend time reading through what we are doing in the CircleCI config but IMO the above should have worked, or else we need to update [developer instructions](https://pyodide.org/en/stable/development/testing.html#testing-and-benchmarking). Anyway, I can probably figure it out by spending enough time on it, but we probably need to improve the docs as this is not very developer friendly.
[ { "content": "# This file contains functions for managing the Pyodide build environment.\n\nimport functools\nimport os\nimport re\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\nfrom contextlib import nullcontext, redirect_stdout\nfrom io import StringIO\nfrom pathlib import Path\n\nif sys.version_info < (3, 11, 0):\n import tomli as tomllib\nelse:\n import tomllib\n\nfrom packaging.tags import Tag, compatible_tags, cpython_tags\n\nfrom .common import exit_with_stdio\nfrom .logger import logger\nfrom .recipe import load_all_recipes\n\nRUST_BUILD_PRELUDE = \"\"\"\nrustup toolchain install ${RUST_TOOLCHAIN} && rustup default ${RUST_TOOLCHAIN}\nrustup target add wasm32-unknown-emscripten --toolchain ${RUST_TOOLCHAIN}\n\"\"\"\n\n\nBUILD_VARS: set[str] = {\n \"PATH\",\n \"PYTHONPATH\",\n \"PYODIDE_JOBS\",\n \"PYODIDE_ROOT\",\n \"PYTHONINCLUDE\",\n \"NUMPY_LIB\",\n \"PYODIDE_PACKAGE_ABI\",\n \"HOME\",\n \"HOSTINSTALLDIR\",\n \"TARGETINSTALLDIR\",\n \"SYSCONFIG_NAME\",\n \"HOSTSITEPACKAGES\",\n \"PYTHON_ARCHIVE_URL\",\n \"PYTHON_ARCHIVE_SHA256\",\n \"PYVERSION\",\n \"PYMAJOR\",\n \"PYMINOR\",\n \"PYMICRO\",\n \"SIDE_MODULE_CFLAGS\",\n \"SIDE_MODULE_CXXFLAGS\",\n \"SIDE_MODULE_LDFLAGS\",\n \"STDLIB_MODULE_CFLAGS\",\n \"WASM_LIBRARY_DIR\",\n \"PKG_CONFIG_PATH\",\n \"CARGO_BUILD_TARGET\",\n \"CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_LINKER\",\n \"RUSTFLAGS\",\n \"PYO3_CROSS_LIB_DIR\",\n \"PYO3_CROSS_INCLUDE_DIR\",\n \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"PLATFORM_TRIPLET\",\n \"SYSCONFIGDATA_DIR\",\n \"RUST_TOOLCHAIN\",\n}\n\n\ndef init_environment(*, quiet: bool = False) -> None:\n \"\"\"\n Initialize Pyodide build environment.\n This function needs to be called before any other Pyodide build functions.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n \"\"\"\n\n # Already initialized\n if \"PYODIDE_ROOT\" in os.environ:\n return\n\n try:\n root = search_pyodide_root(Path.cwd())\n except FileNotFoundError: # Not in Pyodide tree\n root = _init_xbuild_env(quiet=quiet)\n\n os.environ[\"PYODIDE_ROOT\"] = str(root)\n\n\ndef _init_xbuild_env(*, quiet: bool = False) -> Path:\n \"\"\"\n Initialize the build environment for out-of-tree builds.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n\n Returns\n -------\n The path to the Pyodide root directory inside the xbuild environment\n \"\"\"\n from . import install_xbuildenv # avoid circular import\n\n # TODO: Do not hardcode the path\n # TODO: Add version numbers to the path\n xbuildenv_path = Path(\".pyodide-xbuildenv\").resolve()\n\n context = redirect_stdout(StringIO()) if quiet else nullcontext()\n with context:\n return install_xbuildenv.install(xbuildenv_path, download=True)\n\n\[email protected]\ndef get_pyodide_root() -> Path:\n init_environment()\n return Path(os.environ[\"PYODIDE_ROOT\"])\n\n\ndef search_pyodide_root(curdir: str | Path, *, max_depth: int = 5) -> Path:\n \"\"\"\n Recursively search for the root of the Pyodide repository,\n by looking for the pyproject.toml file in the parent directories\n which contains [tool.pyodide] section.\n \"\"\"\n\n # We want to include \"curdir\" in parent_dirs, so add a garbage suffix\n parent_dirs = (Path(curdir) / \"garbage\").parents[:max_depth]\n\n for base in parent_dirs:\n pyproject_file = base / \"pyproject.toml\"\n\n if not pyproject_file.is_file():\n continue\n\n try:\n with pyproject_file.open(\"rb\") as f:\n configs = tomllib.load(f)\n except tomllib.TOMLDecodeError as e:\n raise ValueError(f\"Could not parse {pyproject_file}.\") from e\n\n if \"tool\" in configs and \"pyodide\" in configs[\"tool\"]:\n return base\n\n raise FileNotFoundError(\n \"Could not find Pyodide root directory. If you are not in the Pyodide directory, set `PYODIDE_ROOT=<pyodide-root-directory>`.\"\n )\n\n\ndef in_xbuildenv() -> bool:\n pyodide_root = get_pyodide_root()\n return pyodide_root.name == \"pyodide-root\"\n\n\[email protected]\ndef get_build_environment_vars() -> dict[str, str]:\n \"\"\"\n Get common environment variables for the in-tree and out-of-tree build.\n \"\"\"\n env = _get_make_environment_vars().copy()\n\n # Allow users to overwrite the build environment variables by setting\n # host environment variables.\n # TODO: Add modifiable configuration file instead.\n # (https://github.com/pyodide/pyodide/pull/3737/files#r1161247201)\n env.update({key: os.environ[key] for key in BUILD_VARS if key in os.environ})\n env[\"PYODIDE\"] = \"1\"\n\n tools_dir = Path(__file__).parent / \"tools\"\n\n env[\"CMAKE_TOOLCHAIN_FILE\"] = str(\n tools_dir / \"cmake/Modules/Platform/Emscripten.cmake\"\n )\n env[\"PYO3_CONFIG_FILE\"] = str(tools_dir / \"pyo3_config.ini\")\n\n hostsitepackages = env[\"HOSTSITEPACKAGES\"]\n pythonpath = [\n hostsitepackages,\n ]\n env[\"PYTHONPATH\"] = \":\".join(pythonpath)\n\n return env\n\n\ndef _get_make_environment_vars(*, pyodide_root: Path | None = None) -> dict[str, str]:\n \"\"\"Load environment variables from Makefile.envs\n\n This allows us to set all build vars in one place\n\n Parameters\n ----------\n pyodide_root\n The root directory of the Pyodide repository. If None, this will be inferred.\n \"\"\"\n\n PYODIDE_ROOT = get_pyodide_root() if pyodide_root is None else pyodide_root\n environment = {}\n result = subprocess.run(\n [\"make\", \"-f\", str(PYODIDE_ROOT / \"Makefile.envs\"), \".output_vars\"],\n capture_output=True,\n text=True,\n )\n\n if result.returncode != 0:\n logger.error(\"ERROR: Failed to load environment variables from Makefile.envs\")\n exit_with_stdio(result)\n\n for line in result.stdout.splitlines():\n equalPos = line.find(\"=\")\n if equalPos != -1:\n varname = line[0:equalPos]\n\n if varname not in BUILD_VARS:\n continue\n\n value = line[equalPos + 1 :]\n value = value.strip(\"'\").strip()\n environment[varname] = value\n return environment\n\n\ndef get_build_flag(name: str) -> str:\n \"\"\"\n Get a value of a build flag.\n \"\"\"\n build_vars = get_build_environment_vars()\n if name not in build_vars:\n raise ValueError(f\"Unknown build flag: {name}\")\n\n return build_vars[name]\n\n\ndef get_pyversion_major() -> str:\n return get_build_flag(\"PYMAJOR\")\n\n\ndef get_pyversion_minor() -> str:\n return get_build_flag(\"PYMINOR\")\n\n\ndef get_pyversion_major_minor() -> str:\n return f\"{get_pyversion_major()}.{get_pyversion_minor()}\"\n\n\ndef get_pyversion() -> str:\n return f\"python{get_pyversion_major_minor()}\"\n\n\ndef get_hostsitepackages() -> str:\n return get_build_flag(\"HOSTSITEPACKAGES\")\n\n\[email protected]\ndef get_unisolated_packages() -> list[str]:\n PYODIDE_ROOT = get_pyodide_root()\n\n unisolated_file = PYODIDE_ROOT / \"unisolated.txt\"\n if unisolated_file.exists():\n # in xbuild env, read from file\n unisolated_packages = unisolated_file.read_text().splitlines()\n else:\n unisolated_packages = []\n recipe_dir = PYODIDE_ROOT / \"packages\"\n recipes = load_all_recipes(recipe_dir)\n for name, config in recipes.items():\n if config.build.cross_build_env:\n unisolated_packages.append(name)\n\n return unisolated_packages\n\n\ndef platform() -> str:\n emscripten_version = get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n version = emscripten_version.replace(\".\", \"_\")\n return f\"emscripten_{version}_wasm32\"\n\n\ndef pyodide_tags() -> Iterator[Tag]:\n \"\"\"\n Returns the sequence of tag triples for the Pyodide interpreter.\n\n The sequence is ordered in decreasing specificity.\n \"\"\"\n PYMAJOR = get_pyversion_major()\n PYMINOR = get_pyversion_minor()\n PLATFORM = platform()\n python_version = (int(PYMAJOR), int(PYMINOR))\n yield from cpython_tags(platforms=[PLATFORM], python_version=python_version)\n yield from compatible_tags(platforms=[PLATFORM], python_version=python_version)\n # Following line can be removed once packaging 22.0 is released and we update to it.\n yield Tag(interpreter=f\"cp{PYMAJOR}{PYMINOR}\", abi=\"none\", platform=\"any\")\n\n\ndef replace_so_abi_tags(wheel_dir: Path) -> None:\n \"\"\"Replace native abi tag with emscripten abi tag in .so file names\"\"\"\n import sysconfig\n\n build_soabi = sysconfig.get_config_var(\"SOABI\")\n assert build_soabi\n ext_suffix = sysconfig.get_config_var(\"EXT_SUFFIX\")\n assert ext_suffix\n build_triplet = \"-\".join(build_soabi.split(\"-\")[2:])\n host_triplet = get_build_flag(\"PLATFORM_TRIPLET\")\n for file in wheel_dir.glob(f\"**/*{ext_suffix}\"):\n file.rename(file.with_name(file.name.replace(build_triplet, host_triplet)))\n\n\ndef emscripten_version() -> str:\n return get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n\n\ndef get_emscripten_version_info() -> str:\n \"\"\"Extracted for testing purposes.\"\"\"\n return subprocess.run([\"emcc\", \"-v\"], capture_output=True, encoding=\"utf8\").stderr\n\n\ndef check_emscripten_version() -> None:\n needed_version = emscripten_version()\n try:\n version_info = get_emscripten_version_info()\n except FileNotFoundError:\n raise RuntimeError(\n f\"No Emscripten compiler found. Need Emscripten version {needed_version}\"\n ) from None\n installed_version = None\n try:\n for x in reversed(version_info.partition(\"\\n\")[0].split(\" \")):\n if re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", x):\n installed_version = x\n break\n except Exception:\n raise RuntimeError(\"Failed to determine Emscripten version.\") from None\n if installed_version is None:\n raise RuntimeError(\"Failed to determine Emscripten version.\")\n if installed_version != needed_version:\n raise RuntimeError(\n f\"Incorrect Emscripten version {installed_version}. Need Emscripten version {needed_version}\"\n )\n", "path": "pyodide-build/pyodide_build/build_env.py" } ]
[ { "content": "# This file contains functions for managing the Pyodide build environment.\n\nimport functools\nimport os\nimport re\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\nfrom contextlib import nullcontext, redirect_stdout\nfrom io import StringIO\nfrom pathlib import Path\n\nif sys.version_info < (3, 11, 0):\n import tomli as tomllib\nelse:\n import tomllib\n\nfrom packaging.tags import Tag, compatible_tags, cpython_tags\n\nfrom .common import exit_with_stdio\nfrom .logger import logger\nfrom .recipe import load_all_recipes\n\nRUST_BUILD_PRELUDE = \"\"\"\nrustup toolchain install ${RUST_TOOLCHAIN} && rustup default ${RUST_TOOLCHAIN}\nrustup target add wasm32-unknown-emscripten --toolchain ${RUST_TOOLCHAIN}\n\"\"\"\n\n\nBUILD_VARS: set[str] = {\n \"PATH\",\n \"PYTHONPATH\",\n \"PYODIDE_JOBS\",\n \"PYODIDE_ROOT\",\n \"PYTHONINCLUDE\",\n \"NUMPY_LIB\",\n \"PYODIDE_PACKAGE_ABI\",\n \"HOME\",\n \"HOSTINSTALLDIR\",\n \"TARGETINSTALLDIR\",\n \"SYSCONFIG_NAME\",\n \"HOSTSITEPACKAGES\",\n \"PYTHON_ARCHIVE_URL\",\n \"PYTHON_ARCHIVE_SHA256\",\n \"PYVERSION\",\n \"PYMAJOR\",\n \"PYMINOR\",\n \"PYMICRO\",\n \"SIDE_MODULE_CFLAGS\",\n \"SIDE_MODULE_CXXFLAGS\",\n \"SIDE_MODULE_LDFLAGS\",\n \"STDLIB_MODULE_CFLAGS\",\n \"WASM_LIBRARY_DIR\",\n \"PKG_CONFIG_PATH\",\n \"CARGO_BUILD_TARGET\",\n \"CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_LINKER\",\n \"RUSTFLAGS\",\n \"PYO3_CROSS_LIB_DIR\",\n \"PYO3_CROSS_INCLUDE_DIR\",\n \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"PLATFORM_TRIPLET\",\n \"SYSCONFIGDATA_DIR\",\n \"RUST_TOOLCHAIN\",\n}\n\n\ndef init_environment(*, quiet: bool = False) -> None:\n \"\"\"\n Initialize Pyodide build environment.\n This function needs to be called before any other Pyodide build functions.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n \"\"\"\n\n # Already initialized\n if \"PYODIDE_ROOT\" in os.environ:\n return\n\n try:\n root = search_pyodide_root(Path.cwd())\n except FileNotFoundError: # Not in Pyodide tree\n root = _init_xbuild_env(quiet=quiet)\n\n os.environ[\"PYODIDE_ROOT\"] = str(root)\n\n\ndef _init_xbuild_env(*, quiet: bool = False) -> Path:\n \"\"\"\n Initialize the build environment for out-of-tree builds.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n\n Returns\n -------\n The path to the Pyodide root directory inside the xbuild environment\n \"\"\"\n from . import install_xbuildenv # avoid circular import\n\n # TODO: Do not hardcode the path\n # TODO: Add version numbers to the path\n xbuildenv_path = Path(\".pyodide-xbuildenv\").resolve()\n\n context = redirect_stdout(StringIO()) if quiet else nullcontext()\n with context:\n return install_xbuildenv.install(xbuildenv_path, download=True)\n\n\[email protected]\ndef get_pyodide_root() -> Path:\n init_environment()\n return Path(os.environ[\"PYODIDE_ROOT\"])\n\n\ndef search_pyodide_root(curdir: str | Path, *, max_depth: int = 5) -> Path:\n \"\"\"\n Recursively search for the root of the Pyodide repository,\n by looking for the pyproject.toml file in the parent directories\n which contains [tool.pyodide] section.\n \"\"\"\n\n # We want to include \"curdir\" in parent_dirs, so add a garbage suffix\n parent_dirs = (Path(curdir) / \"garbage\").parents[:max_depth]\n\n for base in parent_dirs:\n pyproject_file = base / \"pyproject.toml\"\n\n if not pyproject_file.is_file():\n continue\n\n try:\n with pyproject_file.open(\"rb\") as f:\n configs = tomllib.load(f)\n except tomllib.TOMLDecodeError as e:\n raise ValueError(f\"Could not parse {pyproject_file}.\") from e\n\n if \"tool\" in configs and \"pyodide\" in configs[\"tool\"]:\n return base\n\n raise FileNotFoundError(\n \"Could not find Pyodide root directory. If you are not in the Pyodide directory, set `PYODIDE_ROOT=<pyodide-root-directory>`.\"\n )\n\n\ndef in_xbuildenv() -> bool:\n pyodide_root = get_pyodide_root()\n return pyodide_root.name == \"pyodide-root\"\n\n\[email protected]\ndef get_build_environment_vars() -> dict[str, str]:\n \"\"\"\n Get common environment variables for the in-tree and out-of-tree build.\n \"\"\"\n env = _get_make_environment_vars().copy()\n\n # Allow users to overwrite the build environment variables by setting\n # host environment variables.\n # TODO: Add modifiable configuration file instead.\n # (https://github.com/pyodide/pyodide/pull/3737/files#r1161247201)\n env.update({key: os.environ[key] for key in BUILD_VARS if key in os.environ})\n env[\"PYODIDE\"] = \"1\"\n\n tools_dir = Path(__file__).parent / \"tools\"\n\n env[\"CMAKE_TOOLCHAIN_FILE\"] = str(\n tools_dir / \"cmake/Modules/Platform/Emscripten.cmake\"\n )\n env[\"PYO3_CONFIG_FILE\"] = str(tools_dir / \"pyo3_config.ini\")\n\n hostsitepackages = env[\"HOSTSITEPACKAGES\"]\n pythonpath = [\n hostsitepackages,\n ]\n env[\"PYTHONPATH\"] = \":\".join(pythonpath)\n\n return env\n\n\ndef _get_make_environment_vars(*, pyodide_root: Path | None = None) -> dict[str, str]:\n \"\"\"Load environment variables from Makefile.envs\n\n This allows us to set all build vars in one place\n\n Parameters\n ----------\n pyodide_root\n The root directory of the Pyodide repository. If None, this will be inferred.\n \"\"\"\n\n PYODIDE_ROOT = get_pyodide_root() if pyodide_root is None else pyodide_root\n environment = {}\n result = subprocess.run(\n [\"make\", \"-f\", str(PYODIDE_ROOT / \"Makefile.envs\"), \".output_vars\"],\n capture_output=True,\n text=True,\n env={\"PYODIDE_ROOT\": str(PYODIDE_ROOT)},\n )\n\n if result.returncode != 0:\n logger.error(\"ERROR: Failed to load environment variables from Makefile.envs\")\n exit_with_stdio(result)\n\n for line in result.stdout.splitlines():\n equalPos = line.find(\"=\")\n if equalPos != -1:\n varname = line[0:equalPos]\n\n if varname not in BUILD_VARS:\n continue\n\n value = line[equalPos + 1 :]\n value = value.strip(\"'\").strip()\n environment[varname] = value\n return environment\n\n\ndef get_build_flag(name: str) -> str:\n \"\"\"\n Get a value of a build flag.\n \"\"\"\n build_vars = get_build_environment_vars()\n if name not in build_vars:\n raise ValueError(f\"Unknown build flag: {name}\")\n\n return build_vars[name]\n\n\ndef get_pyversion_major() -> str:\n return get_build_flag(\"PYMAJOR\")\n\n\ndef get_pyversion_minor() -> str:\n return get_build_flag(\"PYMINOR\")\n\n\ndef get_pyversion_major_minor() -> str:\n return f\"{get_pyversion_major()}.{get_pyversion_minor()}\"\n\n\ndef get_pyversion() -> str:\n return f\"python{get_pyversion_major_minor()}\"\n\n\ndef get_hostsitepackages() -> str:\n return get_build_flag(\"HOSTSITEPACKAGES\")\n\n\[email protected]\ndef get_unisolated_packages() -> list[str]:\n PYODIDE_ROOT = get_pyodide_root()\n\n unisolated_file = PYODIDE_ROOT / \"unisolated.txt\"\n if unisolated_file.exists():\n # in xbuild env, read from file\n unisolated_packages = unisolated_file.read_text().splitlines()\n else:\n unisolated_packages = []\n recipe_dir = PYODIDE_ROOT / \"packages\"\n recipes = load_all_recipes(recipe_dir)\n for name, config in recipes.items():\n if config.build.cross_build_env:\n unisolated_packages.append(name)\n\n return unisolated_packages\n\n\ndef platform() -> str:\n emscripten_version = get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n version = emscripten_version.replace(\".\", \"_\")\n return f\"emscripten_{version}_wasm32\"\n\n\ndef pyodide_tags() -> Iterator[Tag]:\n \"\"\"\n Returns the sequence of tag triples for the Pyodide interpreter.\n\n The sequence is ordered in decreasing specificity.\n \"\"\"\n PYMAJOR = get_pyversion_major()\n PYMINOR = get_pyversion_minor()\n PLATFORM = platform()\n python_version = (int(PYMAJOR), int(PYMINOR))\n yield from cpython_tags(platforms=[PLATFORM], python_version=python_version)\n yield from compatible_tags(platforms=[PLATFORM], python_version=python_version)\n # Following line can be removed once packaging 22.0 is released and we update to it.\n yield Tag(interpreter=f\"cp{PYMAJOR}{PYMINOR}\", abi=\"none\", platform=\"any\")\n\n\ndef replace_so_abi_tags(wheel_dir: Path) -> None:\n \"\"\"Replace native abi tag with emscripten abi tag in .so file names\"\"\"\n import sysconfig\n\n build_soabi = sysconfig.get_config_var(\"SOABI\")\n assert build_soabi\n ext_suffix = sysconfig.get_config_var(\"EXT_SUFFIX\")\n assert ext_suffix\n build_triplet = \"-\".join(build_soabi.split(\"-\")[2:])\n host_triplet = get_build_flag(\"PLATFORM_TRIPLET\")\n for file in wheel_dir.glob(f\"**/*{ext_suffix}\"):\n file.rename(file.with_name(file.name.replace(build_triplet, host_triplet)))\n\n\ndef emscripten_version() -> str:\n return get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n\n\ndef get_emscripten_version_info() -> str:\n \"\"\"Extracted for testing purposes.\"\"\"\n return subprocess.run([\"emcc\", \"-v\"], capture_output=True, encoding=\"utf8\").stderr\n\n\ndef check_emscripten_version() -> None:\n needed_version = emscripten_version()\n try:\n version_info = get_emscripten_version_info()\n except FileNotFoundError:\n raise RuntimeError(\n f\"No Emscripten compiler found. Need Emscripten version {needed_version}\"\n ) from None\n installed_version = None\n try:\n for x in reversed(version_info.partition(\"\\n\")[0].split(\" \")):\n if re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", x):\n installed_version = x\n break\n except Exception:\n raise RuntimeError(\"Failed to determine Emscripten version.\") from None\n if installed_version is None:\n raise RuntimeError(\"Failed to determine Emscripten version.\")\n if installed_version != needed_version:\n raise RuntimeError(\n f\"Incorrect Emscripten version {installed_version}. Need Emscripten version {needed_version}\"\n )\n", "path": "pyodide-build/pyodide_build/build_env.py" } ]
diff --git a/pyodide-build/pyodide_build/build_env.py b/pyodide-build/pyodide_build/build_env.py index d37a8473e7f..7155033678c 100644 --- a/pyodide-build/pyodide_build/build_env.py +++ b/pyodide-build/pyodide_build/build_env.py @@ -199,6 +199,7 @@ def _get_make_environment_vars(*, pyodide_root: Path | None = None) -> dict[str, ["make", "-f", str(PYODIDE_ROOT / "Makefile.envs"), ".output_vars"], capture_output=True, text=True, + env={"PYODIDE_ROOT": str(PYODIDE_ROOT)}, ) if result.returncode != 0: diff --git a/pyodide-build/pyodide_build/tests/fixture.py b/pyodide-build/pyodide_build/tests/fixture.py index ef123f33c8f..060dbdf91f4 100644 --- a/pyodide-build/pyodide_build/tests/fixture.py +++ b/pyodide-build/pyodide_build/tests/fixture.py @@ -1,11 +1,14 @@ import json +import os import shutil from pathlib import Path from typing import Any import pytest -from ..common import chdir +from conftest import ROOT_PATH +from pyodide_build import build_env +from pyodide_build.common import chdir @pytest.fixture(scope="module") @@ -89,3 +92,59 @@ def temp_xbuildenv(tmp_path_factory): archive_name = shutil.make_archive("xbuildenv", "tar") yield base, archive_name + + [email protected](scope="function") +def reset_env_vars(): + # Will reset the environment variables to their original values after each test. + + os.environ.pop("PYODIDE_ROOT", None) + old_environ = dict(os.environ) + + try: + yield + finally: + os.environ.clear() + os.environ.update(old_environ) + + [email protected](scope="function") +def reset_cache(): + # Will remove all caches before each test. + + build_env.get_pyodide_root.cache_clear() + build_env.get_build_environment_vars.cache_clear() + build_env.get_unisolated_packages.cache_clear() + + yield + + [email protected](scope="function") +def xbuildenv(selenium, tmp_path, reset_env_vars, reset_cache): + import subprocess as sp + + assert "PYODIDE_ROOT" not in os.environ + + envpath = Path(tmp_path) / ".pyodide-xbuildenv" + result = sp.run( + [ + "pyodide", + "xbuildenv", + "create", + str(envpath), + "--root", + ROOT_PATH, + "--skip-missing-files", + ] + ) + + assert result.returncode == 0 + + cur_dir = os.getcwd() + + os.chdir(tmp_path) + + try: + yield tmp_path + finally: + os.chdir(cur_dir) diff --git a/pyodide-build/pyodide_build/tests/test_build_env.py b/pyodide-build/pyodide_build/tests/test_build_env.py index 73eee7f83ef..5545b0291f8 100644 --- a/pyodide-build/pyodide_build/tests/test_build_env.py +++ b/pyodide-build/pyodide_build/tests/test_build_env.py @@ -1,40 +1,15 @@ +# flake8: noqa # This file contains tests that ensure build environment is properly initialized in # both in-tree and out-of-tree builds. -# TODO: move functions that are tested here to a separate module - import os -from pathlib import Path import pytest from conftest import ROOT_PATH from pyodide_build import build_env, common - [email protected](scope="function") -def reset_env_vars(): - # Will reset the environment variables to their original values after each test. - - os.environ.pop("PYODIDE_ROOT", None) - old_environ = dict(os.environ) - - try: - yield - finally: - os.environ.clear() - os.environ.update(old_environ) - - [email protected](scope="function") -def reset_cache(): - # Will remove all caches before each test. - - build_env.get_pyodide_root.cache_clear() - build_env.get_build_environment_vars.cache_clear() - build_env.get_unisolated_packages.cache_clear() - - yield +from .fixture import reset_cache, reset_env_vars, xbuildenv class TestInTree: @@ -96,6 +71,13 @@ def test_get_build_environment_vars(self, reset_env_vars, reset_cache): for var in extra_vars: assert var in build_vars, f"Missing {var}" + def test_get_make_environment_vars(self, reset_env_vars, reset_cache): + make_vars = build_env._get_make_environment_vars() + assert make_vars["PYODIDE_ROOT"] == str(ROOT_PATH) + + make_vars = build_env._get_make_environment_vars(pyodide_root=ROOT_PATH) + assert make_vars["PYODIDE_ROOT"] == str(ROOT_PATH) + def test_get_build_flag(self, reset_env_vars, reset_cache): for key, val in build_env.get_build_environment_vars().items(): assert build_env.get_build_flag(key) == val @@ -141,41 +123,6 @@ def test_get_build_environment_vars_host_env( class TestOutOfTree(TestInTree): - # TODO: selenium fixture is a hack to make these tests run only after building Pyodide. - @pytest.fixture(scope="function", autouse=True) - def xbuildenv(self, selenium, tmp_path, reset_env_vars, reset_cache): - import subprocess as sp - - assert "PYODIDE_ROOT" not in os.environ - - envpath = Path(tmp_path) / ".pyodide-xbuildenv" - result = sp.run( - [ - "pyodide", - "xbuildenv", - "create", - str(envpath), - "--root", - ROOT_PATH, - "--skip-missing-files", - ] - ) - - assert result.returncode == 0 - - yield tmp_path - - @pytest.fixture(scope="function", autouse=True) - def chdir_xbuildenv(self, xbuildenv): - cur_dir = os.getcwd() - - os.chdir(xbuildenv) - - try: - yield - finally: - os.chdir(cur_dir) - # Note: other tests are inherited from TestInTree def test_init_environment(self, xbuildenv, reset_env_vars, reset_cache): @@ -196,9 +143,17 @@ def test_get_pyodide_root(self, xbuildenv, reset_env_vars, reset_cache): == xbuildenv / ".pyodide-xbuildenv/xbuildenv/pyodide-root" ) - def test_in_xbuildenv(self, reset_env_vars, reset_cache): + def test_in_xbuildenv(self, xbuildenv, reset_env_vars, reset_cache): assert build_env.in_xbuildenv() + def test_get_make_environment_vars(self, xbuildenv, reset_env_vars, reset_cache): + xbuildenv_root = xbuildenv / ".pyodide-xbuildenv/xbuildenv/pyodide-root" + make_vars = build_env._get_make_environment_vars() + assert make_vars["PYODIDE_ROOT"] == str(xbuildenv_root) + + make_vars = build_env._get_make_environment_vars(pyodide_root=xbuildenv_root) + assert make_vars["PYODIDE_ROOT"] == str(xbuildenv_root) + def test_check_emscripten_version(monkeypatch): s = None diff --git a/pyodide-build/pyodide_build/tests/test_pypi.py b/pyodide-build/pyodide_build/tests/test_pypi.py index 1ae181db4ed..559246edeef 100644 --- a/pyodide-build/pyodide_build/tests/test_pypi.py +++ b/pyodide-build/pyodide_build/tests/test_pypi.py @@ -1,4 +1,5 @@ -import os +# flake8: noqa + import re import subprocess import sys @@ -14,7 +15,7 @@ from typer.testing import CliRunner from pyodide_build.cli import build -from pyodide_build.common import chdir +from .fixture import reset_cache, reset_env_vars, xbuildenv runner = CliRunner() @@ -213,16 +214,15 @@ def fake_pypi_url(fake_pypi_server): pyodide_build.out_of_tree.pypi._PYPI_INDEX = pypi_old -def test_fetch_or_build_pypi(selenium, tmp_path): +def test_fetch_or_build_pypi(xbuildenv): # TODO: - make test run without pyodide - output_dir = tmp_path / "dist" + output_dir = xbuildenv / "dist" # one pure-python package (doesn't need building) and one sdist package (needs building) pkgs = ["pytest-pyodide", "pycryptodome==3.15.0"] app = typer.Typer() app.command()(build.main) - os.chdir(tmp_path) for p in pkgs: result = runner.invoke( app, @@ -234,16 +234,15 @@ def test_fetch_or_build_pypi(selenium, tmp_path): assert len(built_wheels) == len(pkgs) -def test_fetch_or_build_pypi_with_deps_and_extras(selenium, tmp_path): +def test_fetch_or_build_pypi_with_deps_and_extras(xbuildenv): # TODO: - make test run without pyodide - output_dir = tmp_path / "dist" + output_dir = xbuildenv / "dist" # one pure-python package (doesn't need building) which depends on one sdist package (needs building) pkgs = ["eth-hash[pycryptodome]==0.5.1", "safe-pysha3 (>=1.0.0)"] app = typer.Typer() app.command()(build.main) - os.chdir(tmp_path) for p in pkgs: result = runner.invoke( app, @@ -255,38 +254,36 @@ def test_fetch_or_build_pypi_with_deps_and_extras(selenium, tmp_path): assert len(built_wheels) == 3 -def test_fake_pypi_succeed(selenium, tmp_path, fake_pypi_url): +def test_fake_pypi_succeed(xbuildenv, fake_pypi_url): # TODO: - make test run without pyodide - output_dir = tmp_path / "dist" + output_dir = xbuildenv / "dist" # build package that resolves right app = typer.Typer() app.command()(build.main) - with chdir(tmp_path): - result = runner.invoke( - app, - ["resolves-package", "--build-dependencies"], - ) + result = runner.invoke( + app, + ["resolves-package", "--build-dependencies"], + ) - assert result.exit_code == 0, str(result.stdout) + str(result) + assert result.exit_code == 0, str(result.stdout) + str(result) built_wheels = set(output_dir.glob("*.whl")) assert len(built_wheels) == 5 -def test_fake_pypi_resolve_fail(selenium, tmp_path, fake_pypi_url): - # TODO: - make test run without pyodide - output_dir = tmp_path / "dist" +def test_fake_pypi_resolve_fail(xbuildenv, fake_pypi_url): + output_dir = xbuildenv / "dist" + # build package that resolves right app = typer.Typer() app.command()(build.main) - with chdir(tmp_path): - result = runner.invoke( - app, - ["fails-package", "--build-dependencies"], - ) + result = runner.invoke( + app, + ["fails-package", "--build-dependencies"], + ) # this should fail and should not build any wheels assert result.exit_code != 0, result.stdout @@ -294,18 +291,17 @@ def test_fake_pypi_resolve_fail(selenium, tmp_path, fake_pypi_url): assert len(built_wheels) == 0 -def test_fake_pypi_extras_build(selenium, tmp_path, fake_pypi_url): +def test_fake_pypi_extras_build(xbuildenv, fake_pypi_url): # TODO: - make test run without pyodide - output_dir = tmp_path / "dist" + output_dir = xbuildenv / "dist" # build package that resolves right app = typer.Typer() app.command()(build.main) - with chdir(tmp_path): - result = runner.invoke( - app, - ["pkg-b[docs]", "--build-dependencies"], - ) + result = runner.invoke( + app, + ["pkg-b[docs]", "--build-dependencies"], + ) # this should work assert result.exit_code == 0, result.stdout @@ -313,16 +309,16 @@ def test_fake_pypi_extras_build(selenium, tmp_path, fake_pypi_url): assert len(built_wheels) == 2 -def test_fake_pypi_repeatable_build(selenium, tmp_path, fake_pypi_url): - # TODO: - make test run without pyodide - output_dir = tmp_path / "dist" +def test_fake_pypi_repeatable_build(xbuildenv, fake_pypi_url): + output_dir = xbuildenv / "dist" + # build package that resolves right app = typer.Typer() app.command()(build.main) # override a dependency version and build # pkg-a - with open(tmp_path / "requirements.txt", "w") as req_file: + with open("requirements.txt", "w") as req_file: req_file.write( """ # Whole line comment @@ -330,17 +326,17 @@ def test_fake_pypi_repeatable_build(selenium, tmp_path, fake_pypi_url): pkg-a """ ) - with chdir(tmp_path): - result = runner.invoke( - app, - [ - "-r", - "requirements.txt", - "--build-dependencies", - "--output-lockfile", - "lockfile.txt", - ], - ) + + result = runner.invoke( + app, + [ + "-r", + "requirements.txt", + "--build-dependencies", + "--output-lockfile", + "lockfile.txt", + ], + ) # this should work assert result.exit_code == 0, result.stdout built_wheels = list(output_dir.glob("*.whl")) @@ -354,11 +350,10 @@ def test_fake_pypi_repeatable_build(selenium, tmp_path, fake_pypi_url): # rebuild from package-versions lockfile and # check it outputs the same version number - with chdir(tmp_path): - result = runner.invoke( - app, - ["-r", str(tmp_path / "lockfile.txt")], - ) + result = runner.invoke( + app, + ["-r", "lockfile.txt"], + ) # should still have built 1.0.0 of pkg-c built_wheels = list(output_dir.glob("*.whl")) @@ -369,7 +364,7 @@ def test_fake_pypi_repeatable_build(selenium, tmp_path, fake_pypi_url): assert len(built_wheels) == 2, result.stdout -def test_bad_requirements_text(selenium, tmp_path): +def test_bad_requirements_text(xbuildenv): app = typer.Typer() app.command()(build.main) # test 1 - error on URL location in requirements @@ -377,11 +372,11 @@ def test_bad_requirements_text(selenium, tmp_path): # test 3 - error on editable install of package bad_lines = [" pkg-c@http://www.pkg-c.org", " -r bob.txt", " -e pkg-c"] for line in bad_lines: - with open(tmp_path / "requirements.txt", "w") as req_file: + with open("requirements.txt", "w") as req_file: req_file.write(line + "\n") - with chdir(tmp_path): - result = runner.invoke( - app, - ["-r", "requirements.txt"], - ) - assert result.exit_code != 0 and line.strip() in str(result) + + result = runner.invoke( + app, + ["-r", "requirements.txt"], + ) + assert result.exit_code != 0 and line.strip() in str(result)
PrefectHQ__prefect-2609
Consider promoting `case` to the top level ## Current behavior *Please describe how the feature works today* Currently, the `case` context manager must be imported from `prefect.tasks.control_flow.case`. ## Proposed behavior *Please describe your proposed change to the current behavior* I think we should consider promoting `case` to being importable as `prefect.case`, since it forms a fundamental part of the Python API. Other control flow utilities have "task-like" semantics (even if they are called as functions), and it's more appropriate for them to live in a `tasks` submodule. However, like `task`, `Flow`, `tags`, and `unmapped`, I believe `case` represents a significant component of Prefect's Python syntax and warrants top-level availability. ## Example *Please give an example of how the enhancement would be useful* ``` from prefect import Flow, case with Flow("example"): with case(is_this_easy, True): do_stuff() with prefect.tasks.control_flow.case(is_this_easy, False): do_other_stuff() ```
[ { "content": "import prefect.utilities\nfrom prefect.configuration import config\n\nfrom prefect.utilities.context import context\n\nfrom prefect.client import Client\nimport prefect.schedules\nimport prefect.triggers\nimport prefect.environments\n\nfrom prefect.core import Task, Flow, Parameter\nimport prefect.engine\nimport prefect.tasks\nfrom prefect.utilities.tasks import task, tags, unmapped\n\nimport prefect.serialization\n\nimport prefect.agent\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"] # type: ignore\ndel get_versions\n\ntry:\n import signal as _signal\n from ._siginfo import sig_handler as _sig_handler\n\n _signal.signal(29, _sig_handler)\nexcept:\n pass\n", "path": "src/prefect/__init__.py" } ]
[ { "content": "import prefect.utilities\nfrom prefect.configuration import config\n\nfrom prefect.utilities.context import context\n\nfrom prefect.client import Client\nimport prefect.schedules\nimport prefect.triggers\nimport prefect.environments\n\nfrom prefect.core import Task, Flow, Parameter\nimport prefect.engine\nimport prefect.tasks\nfrom prefect.tasks.control_flow import case\nfrom prefect.utilities.tasks import task, tags, unmapped\n\nimport prefect.serialization\n\nimport prefect.agent\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"] # type: ignore\ndel get_versions\n\ntry:\n import signal as _signal\n from ._siginfo import sig_handler as _sig_handler\n\n _signal.signal(29, _sig_handler)\nexcept:\n pass\n", "path": "src/prefect/__init__.py" } ]
diff --git a/changes/issue2568.yaml b/changes/issue2568.yaml new file mode 100644 index 000000000000..40b94cf8076f --- /dev/null +++ b/changes/issue2568.yaml @@ -0,0 +1,2 @@ +enhancement: + - Add `case` to top-level namespace - [#2609](https://github.com/PrefectHQ/prefect/pull/2609) diff --git a/src/prefect/__init__.py b/src/prefect/__init__.py index 0ec3a15778d7..2bcc5d016d24 100644 --- a/src/prefect/__init__.py +++ b/src/prefect/__init__.py @@ -11,6 +11,7 @@ from prefect.core import Task, Flow, Parameter import prefect.engine import prefect.tasks +from prefect.tasks.control_flow import case from prefect.utilities.tasks import task, tags, unmapped import prefect.serialization
vacanza__python-holidays-754
Diwali Singapore is on 24th of October not 24th of November Holidays returns (datetime.date(2022, 11, 24), 'Deepavali') not the actual day which is this coming Monday the 24th of October. Full list here https://www.mom.gov.sg/employment-practices/public-holidays
[ { "content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\nfrom typing import Dict, Iterable, List, Optional, Tuple, Union\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import FR, MO, SA\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import (\n APR,\n AUG,\n DEC,\n FEB,\n JAN,\n JUL,\n JUN,\n MAR,\n MAY,\n NOV,\n OCT,\n SEP,\n SUN,\n)\nfrom holidays.holiday_base import HolidayBase\nfrom holidays.utils import _ChineseLuniSolar, _islamic_to_gre\n\n\nclass Singapore(HolidayBase):\n country = \"SG\"\n\n def __init__(\n self,\n years: Union[int, Iterable[int]] = None,\n expand: bool = True,\n observed: bool = True,\n subdiv: Optional[str] = None,\n prov: Optional[str] = None,\n state: Optional[str] = None,\n ) -> None:\n \"\"\"\n A subclass of :py:class:`HolidayBase` representing public holidays in\n Singapore.\n\n Limitations:\n\n - Prior to 1969: holidays are estimated.\n - Prior to 2000: holidays may not be accurate.\n - 2024 and later: the following four moving date holidays (whose exact\n date is announced yearly) are estimated, and so denoted:\n\n - Hari Raya Puasa\n - Hari Raya Haji\n - Vesak Day\n - Deepavali\n\n Sources:\n\n - `Holidays Act <https://sso.agc.gov.sg/Act/HA1998>`__ (Act 24 of\n 1968—Holidays (Amendment) Act 1968)\n - `Ministry of Manpower\n <https://www.mom.gov.sg/employment-practices/public-holidays>`__\n\n References:\n\n - `Wikipedia\n <https://en.wikipedia.org/wiki/Public_holidays_in_Singapore>`__\n\n Country created and maintained by: `Mike Borsetti\n <https://github.com/mborsetti>`__\n\n See parameters and usage in :py:class:`HolidayBase`.\n \"\"\"\n\n self.cnls = _ChineseLuniSolar()\n super().__init__(years, expand, observed, subdiv, prov, state)\n\n def _populate(self, year: int) -> None:\n\n # New Year's Day\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n # Chinese New Year (two days)\n hol_date = self.cnls.lunar_n_y_date(year)\n self[hol_date] = \"Chinese New Year\"\n self[hol_date + rd(days=+1)] = \"Chinese New Year\"\n\n # Hari Raya Puasa\n # aka Eid al-Fitr\n # Date of observance is announced yearly.\n # An Islamic holiday could fall twice in the same Gregorian year.\n dates_fixed_multiple_obs: Dict[int, Tuple[Tuple[int, int], ...]] = {\n 2001: ((DEC, 16),),\n 2002: ((DEC, 6),),\n 2003: ((NOV, 25),),\n 2004: ((NOV, 14),),\n 2005: ((NOV, 3),),\n 2006: ((OCT, 24),),\n 2007: ((OCT, 13),),\n 2008: ((OCT, 1),),\n 2009: ((SEP, 20),),\n 2010: ((SEP, 10),),\n 2011: ((AUG, 30),),\n 2012: ((AUG, 19),),\n 2013: ((AUG, 8),),\n 2014: ((JUL, 28),),\n 2015: ((JUL, 17),),\n 2016: ((JUL, 6),),\n 2017: ((JUN, 25),),\n 2018: ((JUN, 15),),\n 2019: ((JUN, 5),),\n 2020: ((MAY, 24),),\n 2021: ((MAY, 13),),\n 2022: ((MAY, 2),),\n 2023: ((APR, 22),),\n }\n if year in dates_fixed_multiple_obs:\n for month_day in dates_fixed_multiple_obs[year]:\n hol_date = date(year, *month_day)\n self[hol_date] = \"Hari Raya Puasa\"\n # Second day of Hari Raya Puasa (up to and including 1968)\n # Removed since we don't have Hari Raya Puasa dates for the\n # the years <= 1968:\n # if year <= 1968:\n # self[hol_date + rd(days=+1),\n # \"Second day of Hari Raya Puasa\")\n else:\n for date_obs in _islamic_to_gre(year, 10, 1):\n hol_date = date_obs\n self[hol_date] = \"Hari Raya Puasa* (*estimated)\"\n # Second day of Hari Raya Puasa (up to and including 1968)\n if year <= 1968:\n hol_date += rd(days=+1)\n self[hol_date] = (\n \"Second day of Hari Raya Puasa*\" \" (*estimated)\"\n )\n\n # Hari Raya Haji\n # aka Eid al-Adha\n # Date of observance is announced yearly.\n # An Islamic holiday could fall twice in the same Gregorian year.\n dates_fixed_multiple_obs = {\n 2001: ((MAR, 6),),\n 2002: ((FEB, 23),),\n 2003: ((FEB, 12),),\n 2004: ((FEB, 1),),\n 2005: ((JAN, 21),),\n 2006: ((JAN, 10),),\n 2007: ((DEC, 20),),\n 2008: ((DEC, 8),),\n 2009: ((NOV, 27),),\n 2010: ((NOV, 17),),\n 2011: ((NOV, 6),),\n 2012: ((OCT, 26),),\n 2013: ((OCT, 15),),\n 2014: ((OCT, 5),),\n 2015: ((SEP, 24),),\n 2016: ((SEP, 12),),\n 2017: ((SEP, 1),),\n 2018: ((AUG, 22),),\n 2019: ((AUG, 11),),\n 2020: ((JUL, 31),),\n 2021: ((JUL, 20),),\n 2022: ((JUL, 9),),\n 2023: ((JUN, 29),),\n }\n if year in dates_fixed_multiple_obs:\n for month_day in dates_fixed_multiple_obs[year]:\n hol_date = date(year, *month_day)\n self[hol_date] = \"Hari Raya Haji\"\n else:\n for date_obs in _islamic_to_gre(year, 12, 10):\n hol_date = date_obs\n self[hol_date] = \"Hari Raya Haji* (*estimated)\"\n\n # Holy Saturday (up to and including 1968)\n if year <= 1968:\n self[easter(year) + rd(weekday=SA(-1))] = \"Holy Saturday\"\n\n # Good Friday\n self[easter(year) + rd(weekday=FR(-1))] = \"Good Friday\"\n\n # Easter Monday\n if year <= 1968:\n self[easter(year) + rd(weekday=MO(1))] = \"Easter Monday\"\n\n # Labour Day\n self[date(year, MAY, 1)] = \"Labour Day\"\n\n # Vesak Day\n # date of observance is announced yearly\n # https://en.wikipedia.org/wiki/Vesak#Dates_of_observance\n dates_fixed_obs: Dict[int, Tuple[int, int]] = {\n 2001: (MAY, 7),\n 2002: (MAY, 27),\n 2003: (MAY, 15),\n 2004: (JUN, 2),\n 2005: (MAY, 23),\n 2006: (MAY, 12),\n 2007: (MAY, 31),\n 2008: (MAY, 19),\n 2009: (MAY, 9),\n 2010: (MAY, 28),\n 2011: (MAY, 17),\n 2012: (MAY, 5),\n 2013: (MAY, 24),\n 2014: (MAY, 13),\n 2015: (JUN, 1),\n 2016: (MAY, 20),\n 2017: (MAY, 10),\n 2018: (MAY, 29),\n 2019: (MAY, 19),\n 2020: (MAY, 7),\n 2021: (MAY, 26),\n 2022: (MAY, 15),\n # 2023 date revised by MOM on 29-sep-22\n # https://www.mom.gov.sg/newsroom/press-releases/2022/0929-revised-date-for-vesak-day-2023\n 2023: (JUN, 2),\n }\n if year in dates_fixed_obs:\n hol_date = date(year, *dates_fixed_obs[year])\n self[hol_date] = \"Vesak Day\"\n else:\n hol_date = self.cnls.vesak_date(year)\n self[hol_date] = \"Vesak Day* (*estimated; ~10% chance +/- 1 day)\"\n\n # National Day\n self[date(year, AUG, 9)] = \"National Day\"\n\n # Deepavali\n # aka Diwali\n # date of observance is announced yearly\n dates_fixed_obs = {\n 2001: (NOV, 14),\n 2002: (NOV, 3),\n 2003: (OCT, 23),\n 2004: (NOV, 11),\n 2005: (NOV, 1),\n 2006: (OCT, 21),\n 2007: (NOV, 8),\n 2008: (OCT, 27),\n 2009: (OCT, 17),\n 2010: (NOV, 5),\n 2011: (OCT, 26),\n 2012: (NOV, 13),\n 2013: (NOV, 2),\n 2014: (OCT, 22),\n 2015: (NOV, 10),\n 2016: (OCT, 29),\n 2017: (OCT, 18),\n 2018: (NOV, 6),\n 2019: (OCT, 27),\n 2020: (NOV, 14),\n 2021: (NOV, 4),\n 2022: (NOV, 24),\n 2023: (NOV, 12),\n }\n if year in dates_fixed_obs:\n hol_date = date(year, *dates_fixed_obs[year])\n self[hol_date] = \"Deepavali\"\n else:\n hol_date = self.cnls.s_diwali_date(year)\n self[hol_date] = \"Deepavali* (*estimated; rarely on day after)\"\n\n # Christmas Day\n self[date(year, DEC, 25)] = \"Christmas Day\"\n\n # Boxing day (up to and including 1968)\n if year <= 1968:\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Polling Day\n dates_fixed_obs = {\n 2001: (NOV, 3),\n 2006: (MAY, 6),\n 2011: (MAY, 7),\n 2015: (SEP, 11),\n 2020: (JUL, 10),\n }\n if year in dates_fixed_obs:\n self[date(year, *dates_fixed_obs[year])] = \"Polling Day\"\n\n # SG50 Public holiday\n # Announced on 14 March 2015\n # https://www.mom.gov.sg/newsroom/press-releases/2015/sg50-public-holiday-on-7-august-2015\n if year == 2015:\n self[date(2015, AUG, 7)] = \"SG50 Public Holiday\"\n\n # Check for holidays that fall on a Sunday and implement Section 4(2)\n # of the Holidays Act: \"if any day specified in the Schedule falls on\n # a Sunday, the day next following not being itself a public holiday\n # is declared a public holiday in Singapore.\"\n for (hol_date, hol_name) in list(self.items()):\n if hol_date.year == year and hol_date.weekday() == SUN:\n self[hol_date] += \" [Sunday]\"\n in_lieu_date = hol_date + rd(days=+1)\n while in_lieu_date in self:\n in_lieu_date += rd(days=+1)\n self[in_lieu_date] = hol_name + \" [In lieu]\"\n\n\nclass SG(Singapore):\n\n # __init__ required for IDE typing and inheritance of docstring.\n def __init__(\n self,\n years: Union[int, Iterable[int]] = None,\n expand: bool = True,\n observed: bool = True,\n subdiv: Optional[str] = None,\n prov: Optional[str] = None,\n state: Optional[str] = None,\n ) -> None:\n super().__init__(years, expand, observed, subdiv, prov, state)\n\n\nclass SGP(Singapore):\n\n # __init__ required for IDE typing and inheritance of docstring.\n def __init__(\n self,\n years: Union[int, Iterable[int]] = None,\n expand: bool = True,\n observed: bool = True,\n subdiv: Optional[str] = None,\n prov: Optional[str] = None,\n state: Optional[str] = None,\n ) -> None:\n super().__init__(years, expand, observed, subdiv, prov, state)\n", "path": "holidays/countries/singapore.py" } ]
[ { "content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\nfrom typing import Dict, Iterable, List, Optional, Tuple, Union\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import FR, MO, SA\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import (\n APR,\n AUG,\n DEC,\n FEB,\n JAN,\n JUL,\n JUN,\n MAR,\n MAY,\n NOV,\n OCT,\n SEP,\n SUN,\n)\nfrom holidays.holiday_base import HolidayBase\nfrom holidays.utils import _ChineseLuniSolar, _islamic_to_gre\n\n\nclass Singapore(HolidayBase):\n country = \"SG\"\n\n def __init__(\n self,\n years: Union[int, Iterable[int]] = None,\n expand: bool = True,\n observed: bool = True,\n subdiv: Optional[str] = None,\n prov: Optional[str] = None,\n state: Optional[str] = None,\n ) -> None:\n \"\"\"\n A subclass of :py:class:`HolidayBase` representing public holidays in\n Singapore.\n\n Limitations:\n\n - Prior to 1969: holidays are estimated.\n - Prior to 2000: holidays may not be accurate.\n - 2024 and later: the following four moving date holidays (whose exact\n date is announced yearly) are estimated, and so denoted:\n\n - Hari Raya Puasa\n - Hari Raya Haji\n - Vesak Day\n - Deepavali\n\n Sources:\n\n - `Holidays Act <https://sso.agc.gov.sg/Act/HA1998>`__ (Act 24 of\n 1968—Holidays (Amendment) Act 1968)\n - `Ministry of Manpower\n <https://www.mom.gov.sg/employment-practices/public-holidays>`__\n\n References:\n\n - `Wikipedia\n <https://en.wikipedia.org/wiki/Public_holidays_in_Singapore>`__\n\n Country created and maintained by: `Mike Borsetti\n <https://github.com/mborsetti>`__\n\n See parameters and usage in :py:class:`HolidayBase`.\n \"\"\"\n\n self.cnls = _ChineseLuniSolar()\n super().__init__(years, expand, observed, subdiv, prov, state)\n\n def _populate(self, year: int) -> None:\n\n # New Year's Day\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n # Chinese New Year (two days)\n hol_date = self.cnls.lunar_n_y_date(year)\n self[hol_date] = \"Chinese New Year\"\n self[hol_date + rd(days=+1)] = \"Chinese New Year\"\n\n # Hari Raya Puasa\n # aka Eid al-Fitr\n # Date of observance is announced yearly.\n # An Islamic holiday could fall twice in the same Gregorian year.\n dates_fixed_multiple_obs: Dict[int, Tuple[Tuple[int, int], ...]] = {\n 2001: ((DEC, 16),),\n 2002: ((DEC, 6),),\n 2003: ((NOV, 25),),\n 2004: ((NOV, 14),),\n 2005: ((NOV, 3),),\n 2006: ((OCT, 24),),\n 2007: ((OCT, 13),),\n 2008: ((OCT, 1),),\n 2009: ((SEP, 20),),\n 2010: ((SEP, 10),),\n 2011: ((AUG, 30),),\n 2012: ((AUG, 19),),\n 2013: ((AUG, 8),),\n 2014: ((JUL, 28),),\n 2015: ((JUL, 17),),\n 2016: ((JUL, 6),),\n 2017: ((JUN, 25),),\n 2018: ((JUN, 15),),\n 2019: ((JUN, 5),),\n 2020: ((MAY, 24),),\n 2021: ((MAY, 13),),\n 2022: ((MAY, 2),),\n 2023: ((APR, 22),),\n }\n if year in dates_fixed_multiple_obs:\n for month_day in dates_fixed_multiple_obs[year]:\n hol_date = date(year, *month_day)\n self[hol_date] = \"Hari Raya Puasa\"\n # Second day of Hari Raya Puasa (up to and including 1968)\n # Removed since we don't have Hari Raya Puasa dates for the\n # the years <= 1968:\n # if year <= 1968:\n # self[hol_date + rd(days=+1),\n # \"Second day of Hari Raya Puasa\")\n else:\n for date_obs in _islamic_to_gre(year, 10, 1):\n hol_date = date_obs\n self[hol_date] = \"Hari Raya Puasa* (*estimated)\"\n # Second day of Hari Raya Puasa (up to and including 1968)\n if year <= 1968:\n hol_date += rd(days=+1)\n self[hol_date] = (\n \"Second day of Hari Raya Puasa*\" \" (*estimated)\"\n )\n\n # Hari Raya Haji\n # aka Eid al-Adha\n # Date of observance is announced yearly.\n # An Islamic holiday could fall twice in the same Gregorian year.\n dates_fixed_multiple_obs = {\n 2001: ((MAR, 6),),\n 2002: ((FEB, 23),),\n 2003: ((FEB, 12),),\n 2004: ((FEB, 1),),\n 2005: ((JAN, 21),),\n 2006: ((JAN, 10),),\n 2007: ((DEC, 20),),\n 2008: ((DEC, 8),),\n 2009: ((NOV, 27),),\n 2010: ((NOV, 17),),\n 2011: ((NOV, 6),),\n 2012: ((OCT, 26),),\n 2013: ((OCT, 15),),\n 2014: ((OCT, 5),),\n 2015: ((SEP, 24),),\n 2016: ((SEP, 12),),\n 2017: ((SEP, 1),),\n 2018: ((AUG, 22),),\n 2019: ((AUG, 11),),\n 2020: ((JUL, 31),),\n 2021: ((JUL, 20),),\n 2022: ((JUL, 9),),\n 2023: ((JUN, 29),),\n }\n if year in dates_fixed_multiple_obs:\n for month_day in dates_fixed_multiple_obs[year]:\n hol_date = date(year, *month_day)\n self[hol_date] = \"Hari Raya Haji\"\n else:\n for date_obs in _islamic_to_gre(year, 12, 10):\n hol_date = date_obs\n self[hol_date] = \"Hari Raya Haji* (*estimated)\"\n\n # Holy Saturday (up to and including 1968)\n if year <= 1968:\n self[easter(year) + rd(weekday=SA(-1))] = \"Holy Saturday\"\n\n # Good Friday\n self[easter(year) + rd(weekday=FR(-1))] = \"Good Friday\"\n\n # Easter Monday\n if year <= 1968:\n self[easter(year) + rd(weekday=MO(1))] = \"Easter Monday\"\n\n # Labour Day\n self[date(year, MAY, 1)] = \"Labour Day\"\n\n # Vesak Day\n # date of observance is announced yearly\n # https://en.wikipedia.org/wiki/Vesak#Dates_of_observance\n dates_fixed_obs: Dict[int, Tuple[int, int]] = {\n 2001: (MAY, 7),\n 2002: (MAY, 27),\n 2003: (MAY, 15),\n 2004: (JUN, 2),\n 2005: (MAY, 23),\n 2006: (MAY, 12),\n 2007: (MAY, 31),\n 2008: (MAY, 19),\n 2009: (MAY, 9),\n 2010: (MAY, 28),\n 2011: (MAY, 17),\n 2012: (MAY, 5),\n 2013: (MAY, 24),\n 2014: (MAY, 13),\n 2015: (JUN, 1),\n 2016: (MAY, 20),\n 2017: (MAY, 10),\n 2018: (MAY, 29),\n 2019: (MAY, 19),\n 2020: (MAY, 7),\n 2021: (MAY, 26),\n 2022: (MAY, 15),\n # 2023 date revised by MOM on 29-sep-22\n # https://www.mom.gov.sg/newsroom/press-releases/2022/0929-revised-date-for-vesak-day-2023\n 2023: (JUN, 2),\n }\n if year in dates_fixed_obs:\n hol_date = date(year, *dates_fixed_obs[year])\n self[hol_date] = \"Vesak Day\"\n else:\n hol_date = self.cnls.vesak_date(year)\n self[hol_date] = \"Vesak Day* (*estimated; ~10% chance +/- 1 day)\"\n\n # National Day\n self[date(year, AUG, 9)] = \"National Day\"\n\n # Deepavali\n # aka Diwali\n # date of observance is announced yearly\n dates_fixed_obs = {\n 2001: (NOV, 14),\n 2002: (NOV, 3),\n 2003: (OCT, 23),\n 2004: (NOV, 11),\n 2005: (NOV, 1),\n 2006: (OCT, 21),\n 2007: (NOV, 8),\n 2008: (OCT, 27),\n 2009: (OCT, 17),\n 2010: (NOV, 5),\n 2011: (OCT, 26),\n 2012: (NOV, 13),\n 2013: (NOV, 2),\n 2014: (OCT, 22),\n 2015: (NOV, 10),\n 2016: (OCT, 29),\n 2017: (OCT, 18),\n 2018: (NOV, 6),\n 2019: (OCT, 27),\n 2020: (NOV, 14),\n 2021: (NOV, 4),\n 2022: (OCT, 24),\n 2023: (NOV, 12),\n }\n if year in dates_fixed_obs:\n hol_date = date(year, *dates_fixed_obs[year])\n self[hol_date] = \"Deepavali\"\n else:\n hol_date = self.cnls.s_diwali_date(year)\n self[hol_date] = \"Deepavali* (*estimated; rarely on day after)\"\n\n # Christmas Day\n self[date(year, DEC, 25)] = \"Christmas Day\"\n\n # Boxing day (up to and including 1968)\n if year <= 1968:\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Polling Day\n dates_fixed_obs = {\n 2001: (NOV, 3),\n 2006: (MAY, 6),\n 2011: (MAY, 7),\n 2015: (SEP, 11),\n 2020: (JUL, 10),\n }\n if year in dates_fixed_obs:\n self[date(year, *dates_fixed_obs[year])] = \"Polling Day\"\n\n # SG50 Public holiday\n # Announced on 14 March 2015\n # https://www.mom.gov.sg/newsroom/press-releases/2015/sg50-public-holiday-on-7-august-2015\n if year == 2015:\n self[date(2015, AUG, 7)] = \"SG50 Public Holiday\"\n\n # Check for holidays that fall on a Sunday and implement Section 4(2)\n # of the Holidays Act: \"if any day specified in the Schedule falls on\n # a Sunday, the day next following not being itself a public holiday\n # is declared a public holiday in Singapore.\"\n for (hol_date, hol_name) in list(self.items()):\n if hol_date.year == year and hol_date.weekday() == SUN:\n self[hol_date] += \" [Sunday]\"\n in_lieu_date = hol_date + rd(days=+1)\n while in_lieu_date in self:\n in_lieu_date += rd(days=+1)\n self[in_lieu_date] = hol_name + \" [In lieu]\"\n\n\nclass SG(Singapore):\n\n # __init__ required for IDE typing and inheritance of docstring.\n def __init__(\n self,\n years: Union[int, Iterable[int]] = None,\n expand: bool = True,\n observed: bool = True,\n subdiv: Optional[str] = None,\n prov: Optional[str] = None,\n state: Optional[str] = None,\n ) -> None:\n super().__init__(years, expand, observed, subdiv, prov, state)\n\n\nclass SGP(Singapore):\n\n # __init__ required for IDE typing and inheritance of docstring.\n def __init__(\n self,\n years: Union[int, Iterable[int]] = None,\n expand: bool = True,\n observed: bool = True,\n subdiv: Optional[str] = None,\n prov: Optional[str] = None,\n state: Optional[str] = None,\n ) -> None:\n super().__init__(years, expand, observed, subdiv, prov, state)\n", "path": "holidays/countries/singapore.py" } ]
diff --git a/holidays/countries/singapore.py b/holidays/countries/singapore.py index b055f77d2..da1f9d2be 100644 --- a/holidays/countries/singapore.py +++ b/holidays/countries/singapore.py @@ -261,7 +261,7 @@ def _populate(self, year: int) -> None: 2019: (OCT, 27), 2020: (NOV, 14), 2021: (NOV, 4), - 2022: (NOV, 24), + 2022: (OCT, 24), 2023: (NOV, 12), } if year in dates_fixed_obs: diff --git a/test/countries/test_singapore.py b/test/countries/test_singapore.py index 3113c0c16..6e25c3465 100644 --- a/test/countries/test_singapore.py +++ b/test/countries/test_singapore.py @@ -60,7 +60,7 @@ def test_Singapore(self): self.assertIn(date(2022, 5, 16), self.holidays) self.assertIn(date(2022, 7, 9), self.holidays) self.assertIn(date(2022, 8, 9), self.holidays) - self.assertIn(date(2022, 11, 24), self.holidays) + self.assertIn(date(2022, 10, 24), self.holidays) self.assertIn(date(2022, 12, 25), self.holidays) self.assertIn(date(2022, 12, 26), self.holidays) # 2022: total holidays (11 + 3 falling on a Sunday)
automl__auto-sklearn-190
Add warning if dependencies are not met There should be a warning if one of the following dependencies is not met: - scikit-learn==0.17 - smac==0.0.1 - lockfile>=0.10 - ConfigSpace>=0.2.1 - pyrfr==0.2.1
[ { "content": "from warnings import warn\n\nimport pkg_resources\nimport re\n\nfrom distutils.version import LooseVersion\n\n\nRE_PATTERN = re.compile('^(?P<name>[\\w\\-]+)((?P<operation>==|>=|>)(?P<version>(\\d+\\.)?(\\d+\\.)?(\\d+)))?$')\n\n\ndef verify_packages(packages):\n if not packages:\n return\n if isinstance(packages, str):\n packages = packages.splitlines()\n\n for package in packages:\n if not package:\n continue\n\n match = RE_PATTERN.match(package)\n if match:\n name = match.group('name')\n operation = match.group('operation')\n version = match.group('version')\n _verify_package(name, operation, version)\n else:\n raise ValueError('Unable to read requirement: %s' % package)\n\n\ndef _verify_package(name, operation, version):\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n raise MissingPackageError(name) from None\n\n if not operation:\n return\n\n required_version = LooseVersion(version)\n installed_version = LooseVersion(module.version)\n\n if operation == '==':\n check = required_version == installed_version\n elif operation == '>':\n check = installed_version > required_version\n elif operation == '>=':\n check = installed_version > required_version or \\\n installed_version == required_version\n else:\n raise NotImplementedError('operation \\'%s\\' is not supported' % operation)\n if not check:\n raise IncorrectPackageVersionError(name, installed_version, operation, required_version)\n\n\nclass MissingPackageError(Exception):\n\n error_message = 'mandatory package \\'{name}\\' not found'\n\n def __init__(self, package_name):\n self.package_name = package_name\n super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))\n\n\nclass IncorrectPackageVersionError(Exception):\n\n error_message = '\\'{name} {installed_version}\\' version mismatch ({operation}{required_version})'\n\n def __init__(self, package_name, installed_version, operation, required_version):\n self.package_name = package_name\n self.installed_version = installed_version\n self.operation = operation\n self.required_version = required_version\n message = self.error_message.format(name=package_name,\n installed_version=installed_version,\n operation=operation,\n required_version=required_version)\n super(IncorrectPackageVersionError, self).__init__(message)\n", "path": "autosklearn/util/dependencies.py" } ]
[ { "content": "from warnings import warn\n\nimport pkg_resources\nimport re\n\nfrom distutils.version import LooseVersion\n\n\nRE_PATTERN = re.compile('^(?P<name>[\\w\\-]+)((?P<operation>==|>=|>)(?P<version>(\\d+\\.)?(\\d+\\.)?(\\d+)))?$')\n\n\ndef verify_packages(packages):\n if not packages:\n return\n if isinstance(packages, str):\n packages = packages.splitlines()\n\n for package in packages:\n if not package:\n continue\n\n match = RE_PATTERN.match(package)\n if match:\n name = match.group('name')\n operation = match.group('operation')\n version = match.group('version')\n _verify_package(name, operation, version)\n else:\n raise ValueError('Unable to read requirement: %s' % package)\n\n\ndef _verify_package(name, operation, version):\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n raise MissingPackageError(name)\n\n if not operation:\n return\n\n required_version = LooseVersion(version)\n installed_version = LooseVersion(module.version)\n\n if operation == '==':\n check = required_version == installed_version\n elif operation == '>':\n check = installed_version > required_version\n elif operation == '>=':\n check = installed_version > required_version or \\\n installed_version == required_version\n else:\n raise NotImplementedError('operation \\'%s\\' is not supported' % operation)\n if not check:\n raise IncorrectPackageVersionError(name, installed_version, operation, required_version)\n\n\nclass MissingPackageError(Exception):\n\n error_message = 'mandatory package \\'{name}\\' not found'\n\n def __init__(self, package_name):\n self.package_name = package_name\n super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))\n\n\nclass IncorrectPackageVersionError(Exception):\n\n error_message = '\\'{name} {installed_version}\\' version mismatch ({operation}{required_version})'\n\n def __init__(self, package_name, installed_version, operation, required_version):\n self.package_name = package_name\n self.installed_version = installed_version\n self.operation = operation\n self.required_version = required_version\n message = self.error_message.format(name=package_name,\n installed_version=installed_version,\n operation=operation,\n required_version=required_version)\n super(IncorrectPackageVersionError, self).__init__(message)\n", "path": "autosklearn/util/dependencies.py" } ]
diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py index 61ed76adf5..d36ad7e5c8 100644 --- a/autosklearn/util/dependencies.py +++ b/autosklearn/util/dependencies.py @@ -33,7 +33,7 @@ def _verify_package(name, operation, version): try: module = pkg_resources.get_distribution(name) except pkg_resources.DistributionNotFound: - raise MissingPackageError(name) from None + raise MissingPackageError(name) if not operation: return
pre-commit__pre-commit-948
Support GIT_SSH_COMMAND Currently, `GIT_SSH` is supported for overriding the git command, but it is is sightly less ergonomic than `GIT_SSH_COMMAND` since it requires file. Adding support for passing `GIT_SSH_COMMAND` to git commands would obviate the need for creating a new file when you want to change the ssh command.
[ { "content": "from __future__ import unicode_literals\n\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s):\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return {\n k: v for k, v in os.environ.items()\n if not k.startswith('GIT_') or k in {'GIT_SSH'}\n }\n\n\ndef get_root():\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n\n\ndef get_git_dir(git_root='.'):\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1])\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files():\n return zsplit(cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n )[1])\n\n\ndef intent_to_add_files():\n _, stdout_binary, _ = cmd_output('git', 'status', '--porcelain', '-z')\n parts = list(reversed(zsplit(stdout_binary)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files():\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(new, old):\n return zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '{}...{}'.format(old, new),\n )[1])\n\n\ndef head_rev(remote):\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args, **kwargs):\n repo = kwargs.pop('repo', '.')\n assert not kwargs, kwargs\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args\n return cmd_output(*cmd, cwd=repo, retcode=None)[0]\n\n\ndef commit(repo='.'):\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name, repo='.'):\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python], exe_type[is_cygwin_git],\n ),\n )\n", "path": "pre_commit/git.py" } ]
[ { "content": "from __future__ import unicode_literals\n\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s):\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return {\n k: v for k, v in os.environ.items()\n if not k.startswith('GIT_') or k in {'GIT_SSH', 'GIT_SSH_COMMAND'}\n }\n\n\ndef get_root():\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n\n\ndef get_git_dir(git_root='.'):\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1])\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files():\n return zsplit(cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n )[1])\n\n\ndef intent_to_add_files():\n _, stdout_binary, _ = cmd_output('git', 'status', '--porcelain', '-z')\n parts = list(reversed(zsplit(stdout_binary)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files():\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(new, old):\n return zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '{}...{}'.format(old, new),\n )[1])\n\n\ndef head_rev(remote):\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args, **kwargs):\n repo = kwargs.pop('repo', '.')\n assert not kwargs, kwargs\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args\n return cmd_output(*cmd, cwd=repo, retcode=None)[0]\n\n\ndef commit(repo='.'):\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name, repo='.'):\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python], exe_type[is_cygwin_git],\n ),\n )\n", "path": "pre_commit/git.py" } ]
diff --git a/pre_commit/git.py b/pre_commit/git.py index f0b504043..4849d7c64 100644 --- a/pre_commit/git.py +++ b/pre_commit/git.py @@ -29,7 +29,7 @@ def no_git_env(): # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit return { k: v for k, v in os.environ.items() - if not k.startswith('GIT_') or k in {'GIT_SSH'} + if not k.startswith('GIT_') or k in {'GIT_SSH', 'GIT_SSH_COMMAND'} }
uclapi__uclapi-4023
[Feature Request] Add /authorize Oauth route **Is your feature request related to a problem? Please describe.** I have been attempting to use 'auth0-react' to implement Oauth with UCL API, however, this requires a fair bit of tinkering as the defaults of this and many other auth libraries are to redirect to a "/authorize?client_id..." endpoint which the UCL API does not support. While this can be avoided through customisation, would it be possible to add a "/authorize" route, as I believe this could make it easier to use some of the "plug and play" Americanized auth libraries available? **Describe the solution you'd like** Edit uclapi/backend/uclapi/oauth/urls.py as below ``` urlpatterns = [ url(r'authorise/$', views.authorise), url(r'authorize/$', views.authorise), <===== Including views.authorise for the 'authorize/$' route. url(r'shibcallback', views.shibcallback), url(r'token$', views.token), url(r'tokens/scopes$', views.scope_map), url(r'tokens/test$', views.token_test), url(r'user/allow$', views.userallow), url(r'user/deny$', views.userdeny), url(r'user/data$', views.userdata), url(r'user/studentnumber$', views.get_student_number), url(r'deauthorise$', views.deauthorise_app), url(r'user/settings$', views.get_settings) ] ``` ![image](https://user-images.githubusercontent.com/57833957/219857932-86c371d5-c0f0-444b-a179-6c312b48c9c2.png) ![image](https://user-images.githubusercontent.com/57833957/219857962-7da7e11e-475b-4d6c-bb71-1c6eefcf0354.png)
[ { "content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'authorise/$', views.authorise),\n url(r'adcallback', views.adcallback),\n url(r'token$', views.token),\n url(r'tokens/scopes$', views.scope_map),\n url(r'tokens/test$', views.token_test),\n url(r'user/allow$', views.userallow),\n url(r'user/deny$', views.userdeny),\n url(r'user/data$', views.userdata),\n url(r'user/studentnumber$', views.get_student_number),\n url(r'deauthorise$', views.deauthorise_app),\n url(r'user/settings$', views.get_settings)\n]\n", "path": "backend/uclapi/oauth/urls.py" } ]
[ { "content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'authorise/$', views.authorise),\n url(r'authorize/$', views.authorise),\n url(r'adcallback', views.adcallback),\n url(r'token$', views.token),\n url(r'tokens/scopes$', views.scope_map),\n url(r'tokens/test$', views.token_test),\n url(r'user/allow$', views.userallow),\n url(r'user/deny$', views.userdeny),\n url(r'user/data$', views.userdata),\n url(r'user/studentnumber$', views.get_student_number),\n url(r'deauthorise$', views.deauthorise_app),\n url(r'user/settings$', views.get_settings)\n]\n", "path": "backend/uclapi/oauth/urls.py" } ]
diff --git a/backend/uclapi/oauth/urls.py b/backend/uclapi/oauth/urls.py index 0f9a33847..7564f4d21 100644 --- a/backend/uclapi/oauth/urls.py +++ b/backend/uclapi/oauth/urls.py @@ -4,6 +4,7 @@ urlpatterns = [ url(r'authorise/$', views.authorise), + url(r'authorize/$', views.authorise), url(r'adcallback', views.adcallback), url(r'token$', views.token), url(r'tokens/scopes$', views.scope_map), diff --git a/uclapi.openapi.json b/uclapi.openapi.json index eb0b13fe1..7fe18e8e8 100644 --- a/uclapi.openapi.json +++ b/uclapi.openapi.json @@ -2170,7 +2170,7 @@ }, "OAuthToken": { "type": "apiKey", - "description": "This API requires you to pass your OAuth2 token as a query parameter called 'token'. Use the /authorize and /oauth/token endpoints to authorize a user and get this token.", + "description": "This API requires you to pass your OAuth2 token as a query parameter called 'token'. Use the /authorise and /oauth/token endpoints to authorize a user and get this token.", "name": "token", "in": "query" },
googleapis__google-cloud-python-9604
Release google-cloud-storage Hi @tseaver, could you help cut a release for google-cloud-storage? cc: @JesseLovelace
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.21.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.22.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py" } ]
diff --git a/storage/CHANGELOG.md b/storage/CHANGELOG.md index ff61b97c9cfe..55169d4ed82e 100644 --- a/storage/CHANGELOG.md +++ b/storage/CHANGELOG.md @@ -4,6 +4,14 @@ [1]: https://pypi.org/project/google-cloud-storage/#history +## 1.22.0 + +11-05-2019 10:22 PST + + +### New Features +- Add UBLA attrs to IAMConfiguration. ([#9475](https://github.com/googleapis/google-cloud-python/pull/9475)) + ## 1.21.0 10-28-2019 21:52 PDT diff --git a/storage/setup.py b/storage/setup.py index bbf6890bb7c4..5b3ddfa70f09 100644 --- a/storage/setup.py +++ b/storage/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-storage" description = "Google Cloud Storage API client library" -version = "1.21.0" +version = "1.22.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta'
wagtail__wagtail-9369
Wagtail version number in admin has an extra dot This is tagged as a [**good first issue**](https://github.com/wagtail/wagtail/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22) – if you want to take it on, please leave a comment below to say so, no need to ask for permission, first come first serve! State your intent and share any questions / plans you have on how to approach the task. ### Issue Summary In the Settings menu, we display Wagtail’s version number at the bottom: <img width="409" alt="dot-menu" src="https://user-images.githubusercontent.com/877585/195653500-98c8c982-f39f-4f24-a8df-3ebbc7401af1.png"> This has been there for a while and I don’t think there is a specific reason for it. It’d be nice to display the version numbers the same as what we do in the docs. So rather than `v.2.16.1` – `v2.16.1`. ### Steps to Reproduce 1. Open the Wagtail admin 2. Open the Settings menu and look at the bottom ### Technical details - Wagtail version: tested in v.2.16.1 ### Proposed solution I believe this could be as simple as removing the "." here: https://github.com/wagtail/wagtail/blob/main/wagtail/admin/wagtail_hooks.py#L105 ```diff -footer_text="Wagtail v." + __version__, +footer_text="Wagtail v" + __version__, ``` We’d need to test that this doesn’t affect our other code relying on version numbers.
[ { "content": "from django.conf import settings\nfrom django.contrib.auth.models import Permission\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import gettext\nfrom django.utils.translation import gettext_lazy as _\nfrom draftjs_exporter.dom import DOM\n\nimport wagtail.admin.rich_text.editors.draftail.features as draftail_features\nfrom wagtail import __version__, hooks\nfrom wagtail.admin.admin_url_finder import (\n ModelAdminURLFinder,\n register_admin_url_finder,\n)\nfrom wagtail.admin.auth import user_has_any_page_permission\nfrom wagtail.admin.forms.collections import GroupCollectionManagementPermissionFormSet\nfrom wagtail.admin.menu import MenuItem, SubmenuMenuItem, reports_menu, settings_menu\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.rich_text.converters.contentstate import link_entity\nfrom wagtail.admin.rich_text.converters.editor_html import (\n LinkTypeRule,\n PageLinkHandler,\n WhitelistRule,\n)\nfrom wagtail.admin.rich_text.converters.html_to_contentstate import (\n BlockElementHandler,\n ExternalLinkElementHandler,\n HorizontalRuleHandler,\n InlineStyleElementHandler,\n ListElementHandler,\n ListItemElementHandler,\n PageLinkElementHandler,\n)\nfrom wagtail.admin.search import SearchArea\nfrom wagtail.admin.site_summary import PagesSummaryItem\nfrom wagtail.admin.ui.sidebar import (\n PageExplorerMenuItem as PageExplorerMenuItemComponent,\n)\nfrom wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent\nfrom wagtail.admin.views.pages.bulk_actions import (\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n)\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.models import Collection, Page, Task, UserPagePermissionsProxy, Workflow\nfrom wagtail.permissions import (\n collection_permission_policy,\n task_permission_policy,\n workflow_permission_policy,\n)\nfrom wagtail.whitelist import allow_without_attributes, attribute_rule, check_url\n\n\nclass ExplorerMenuItem(MenuItem):\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n def get_context(self, request):\n context = super().get_context(request)\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n context[\"start_page_id\"] = start_page.id\n\n return context\n\n def render_component(self, request):\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n return PageExplorerMenuItemComponent(\n self.name,\n self.label,\n self.url,\n start_page.id,\n icon_name=self.icon_name,\n classnames=self.classnames,\n )\n else:\n return super().render_component(request)\n\n\[email protected](\"register_admin_menu_item\")\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _(\"Pages\"),\n reverse(\"wagtailadmin_explore_root\"),\n name=\"explorer\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n def render_component(self, request):\n return SubMenuItemComponent(\n self.name,\n self.label,\n self.menu.render_component(request),\n icon_name=self.icon_name,\n classnames=self.classnames,\n footer_text=\"Wagtail v.\" + __version__,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_settings_menu():\n return SettingsMenuItem(_(\"Settings\"), settings_menu, icon_name=\"cogs\", order=10000)\n\n\[email protected](\"register_permissions\")\ndef register_permissions():\n return Permission.objects.filter(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n\n\nclass PageSearchArea(SearchArea):\n def __init__(self):\n super().__init__(\n _(\"Pages\"),\n reverse(\"wagtailadmin_pages:search\"),\n name=\"pages\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n\[email protected](\"register_admin_search_area\")\ndef register_pages_search_area():\n return PageSearchArea()\n\n\[email protected](\"register_group_permission_panel\")\ndef register_collection_permissions_panel():\n return GroupCollectionManagementPermissionFormSet\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_collections_menu_item():\n return CollectionsMenuItem(\n _(\"Collections\"),\n reverse(\"wagtailadmin_collections:index\"),\n icon_name=\"folder-open-1\",\n order=700,\n )\n\n\nclass WorkflowsMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return workflow_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\nclass WorkflowTasksMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return task_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflows_menu_item():\n return WorkflowsMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_workflows:index\"),\n icon_name=\"tasks\",\n order=100,\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflow_tasks_menu_item():\n return WorkflowTasksMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_workflows:task_index\"),\n icon_name=\"thumbtack\",\n order=150,\n )\n\n\[email protected](\"register_page_listing_buttons\")\ndef page_listing_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield PageListingButton(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page.has_unpublished_changes and page.is_previewable():\n yield PageListingButton(\n _(\"View draft\"),\n reverse(\"wagtailadmin_pages:view_draft\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Preview draft version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n \"rel\": \"noreferrer\",\n },\n priority=20,\n )\n if page.live and page.url:\n yield PageListingButton(\n _(\"View live\"),\n page.url,\n attrs={\n \"rel\": \"noreferrer\",\n \"aria-label\": _(\"View live version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield PageListingButton(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n\n yield ButtonWithDropdownFromHook(\n _(\"More\"),\n hook_name=\"register_page_listing_more_buttons\",\n page=page,\n page_perms=page_perms,\n next_url=next_url,\n attrs={\n \"target\": \"_blank\",\n \"rel\": \"noreferrer\",\n \"title\": _(\"View more options for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=50,\n )\n\n\[email protected](\"register_page_listing_more_buttons\")\ndef page_listing_more_buttons(page, page_perms, next_url=None):\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n if page_perms.can_view_revisions():\n yield Button(\n _(\"History\"),\n reverse(\"wagtailadmin_pages:history\", args=[page.id]),\n attrs={\n \"title\": _(\"View page history for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n\n if page_perms.can_reorder_children():\n yield Button(\n _(\"Sort menu order\"),\n \"?ordering=ord\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n\n\[email protected](\"register_page_header_buttons\")\ndef page_header_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield Button(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n icon_name=\"edit\",\n attrs={\n \"title\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n icon_name=\"arrow-right-full\",\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n icon_name=\"copy\",\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield Button(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n icon_name=\"circle-plus\",\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=40,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url == reverse(\"wagtailadmin_pages:edit\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n icon_name=\"bin\",\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n icon_name=\"download-alt\",\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n if page_perms.can_reorder_children():\n url = reverse(\"wagtailadmin_explore\", args=[page.id])\n url += \"?ordering=ord\"\n yield Button(\n _(\"Sort menu order\"),\n url,\n icon_name=\"list-ul\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=70,\n )\n\n\[email protected](\"register_admin_urls\")\ndef register_viewsets_urls():\n viewsets.populate()\n return viewsets.get_urlpatterns()\n\n\[email protected](\"register_rich_text_features\")\ndef register_core_features(features):\n features.register_converter_rule(\n \"editorhtml\",\n \"link\",\n [\n WhitelistRule(\"a\", attribute_rule({\"href\": check_url})),\n LinkTypeRule(\"page\", PageLinkHandler),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"bold\",\n [\n WhitelistRule(\"b\", allow_without_attributes),\n WhitelistRule(\"strong\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"italic\",\n [\n WhitelistRule(\"i\", allow_without_attributes),\n WhitelistRule(\"em\", allow_without_attributes),\n ],\n )\n\n headings_elements = [\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n for order, element in enumerate(headings_elements):\n features.register_converter_rule(\n \"editorhtml\", element, [WhitelistRule(element, allow_without_attributes)]\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ol\",\n [\n WhitelistRule(\"ol\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ul\",\n [\n WhitelistRule(\"ul\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n # Draftail\n features.register_editor_plugin(\n \"draftail\", \"hr\", draftail_features.BooleanFeature(\"enableHorizontalRule\")\n )\n features.register_converter_rule(\n \"contentstate\",\n \"hr\",\n {\n \"from_database_format\": {\n \"hr\": HorizontalRuleHandler(),\n },\n \"to_database_format\": {\n \"entity_decorators\": {\n \"HORIZONTAL_RULE\": lambda props: DOM.create_element(\"hr\")\n }\n },\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"h1\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h1\",\n \"type\": \"header-one\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 1},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h1\",\n {\n \"from_database_format\": {\n \"h1\": BlockElementHandler(\"header-one\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-one\": \"h1\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h2\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h2\",\n \"type\": \"header-two\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 2},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h2\",\n {\n \"from_database_format\": {\n \"h2\": BlockElementHandler(\"header-two\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-two\": \"h2\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h3\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h3\",\n \"type\": \"header-three\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 3},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h3\",\n {\n \"from_database_format\": {\n \"h3\": BlockElementHandler(\"header-three\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-three\": \"h3\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h4\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h4\",\n \"type\": \"header-four\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 4},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h4\",\n {\n \"from_database_format\": {\n \"h4\": BlockElementHandler(\"header-four\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-four\": \"h4\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h5\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h5\",\n \"type\": \"header-five\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 5},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h5\",\n {\n \"from_database_format\": {\n \"h5\": BlockElementHandler(\"header-five\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-five\": \"h5\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h6\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h6\",\n \"type\": \"header-six\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 6},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h6\",\n {\n \"from_database_format\": {\n \"h6\": BlockElementHandler(\"header-six\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-six\": \"h6\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ul\",\n draftail_features.BlockFeature(\n {\n \"type\": \"unordered-list-item\",\n \"icon\": \"list-ul\",\n \"description\": gettext(\"Bulleted list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ul\",\n {\n \"from_database_format\": {\n \"ul\": ListElementHandler(\"unordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"unordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ul\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ol\",\n draftail_features.BlockFeature(\n {\n \"type\": \"ordered-list-item\",\n \"icon\": \"list-ol\",\n \"description\": gettext(\"Numbered list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ol\",\n {\n \"from_database_format\": {\n \"ol\": ListElementHandler(\"ordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"ordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ol\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"blockquote\",\n draftail_features.BlockFeature(\n {\n \"type\": \"blockquote\",\n \"icon\": \"openquote\",\n \"description\": gettext(\"Blockquote\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"blockquote\",\n {\n \"from_database_format\": {\n \"blockquote\": BlockElementHandler(\"blockquote\"),\n },\n \"to_database_format\": {\"block_map\": {\"blockquote\": \"blockquote\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"bold\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"BOLD\",\n \"icon\": \"bold\",\n \"description\": gettext(\"Bold\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"bold\",\n {\n \"from_database_format\": {\n \"b\": InlineStyleElementHandler(\"BOLD\"),\n \"strong\": InlineStyleElementHandler(\"BOLD\"),\n },\n \"to_database_format\": {\"style_map\": {\"BOLD\": \"b\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"italic\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"ITALIC\",\n \"icon\": \"italic\",\n \"description\": gettext(\"Italic\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"italic\",\n {\n \"from_database_format\": {\n \"i\": InlineStyleElementHandler(\"ITALIC\"),\n \"em\": InlineStyleElementHandler(\"ITALIC\"),\n },\n \"to_database_format\": {\"style_map\": {\"ITALIC\": \"i\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"link\",\n draftail_features.EntityFeature(\n {\n \"type\": \"LINK\",\n \"icon\": \"link\",\n \"description\": gettext(\"Link\"),\n # We want to enforce constraints on which links can be pasted into rich text.\n # Keep only the attributes Wagtail needs.\n \"attributes\": [\"url\", \"id\", \"parentId\"],\n \"allowlist\": {\n # Keep pasted links with http/https protocol, and not-pasted links (href = undefined).\n \"href\": \"^(http:|https:|undefined$)\",\n },\n },\n js=[\n \"wagtailadmin/js/page-chooser-modal.js\",\n ],\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"link\",\n {\n \"from_database_format\": {\n \"a[href]\": ExternalLinkElementHandler(\"LINK\"),\n 'a[linktype=\"page\"]': PageLinkElementHandler(\"LINK\"),\n },\n \"to_database_format\": {\"entity_decorators\": {\"LINK\": link_entity}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"superscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUPERSCRIPT\",\n \"icon\": \"superscript\",\n \"description\": gettext(\"Superscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"superscript\",\n {\n \"from_database_format\": {\n \"sup\": InlineStyleElementHandler(\"SUPERSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUPERSCRIPT\": \"sup\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"subscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUBSCRIPT\",\n \"icon\": \"subscript\",\n \"description\": gettext(\"Subscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"subscript\",\n {\n \"from_database_format\": {\n \"sub\": InlineStyleElementHandler(\"SUBSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUBSCRIPT\": \"sub\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"strikethrough\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"STRIKETHROUGH\",\n \"icon\": \"strikethrough\",\n \"description\": gettext(\"Strikethrough\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"strikethrough\",\n {\n \"from_database_format\": {\n \"s\": InlineStyleElementHandler(\"STRIKETHROUGH\"),\n },\n \"to_database_format\": {\"style_map\": {\"STRIKETHROUGH\": \"s\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"code\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"CODE\",\n \"icon\": \"code\",\n \"description\": gettext(\"Code\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"code\",\n {\n \"from_database_format\": {\n \"code\": InlineStyleElementHandler(\"CODE\"),\n },\n \"to_database_format\": {\"style_map\": {\"CODE\": \"code\"}},\n },\n )\n\n\nclass LockedPagesMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).can_remove_locks()\n\n\nclass WorkflowReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True)\n\n\nclass SiteHistoryReportMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).explorable_pages().exists()\n\n\nclass AgingPagesReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_AGING_PAGES_ENABLED\", True)\n\n\[email protected](\"register_reports_menu_item\")\ndef register_locked_pages_menu_item():\n return LockedPagesMenuItem(\n _(\"Locked pages\"),\n reverse(\"wagtailadmin_reports:locked_pages\"),\n icon_name=\"lock\",\n order=700,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_reports:workflow\"),\n icon_name=\"tasks\",\n order=800,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_tasks_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_reports:workflow_tasks\"),\n icon_name=\"thumbtack\",\n order=900,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_site_history_report_menu_item():\n return SiteHistoryReportMenuItem(\n _(\"Site history\"),\n reverse(\"wagtailadmin_reports:site_history\"),\n icon_name=\"history\",\n order=1000,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_aging_pages_report_menu_item():\n return AgingPagesReportMenuItem(\n _(\"Aging pages\"),\n reverse(\"wagtailadmin_reports:aging_pages\"),\n icon_name=\"time\",\n order=1100,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_reports_menu():\n return SubmenuMenuItem(_(\"Reports\"), reports_menu, icon_name=\"site\", order=9000)\n\n\[email protected](\"register_icons\")\ndef register_icons(icons):\n for icon in [\n \"angle-double-left.svg\",\n \"angle-double-right.svg\",\n \"arrow-down-big.svg\",\n \"arrow-down.svg\",\n \"arrow-right-full.svg\",\n \"arrow-left.svg\",\n \"arrow-right.svg\",\n \"arrow-up-big.svg\",\n \"arrow-up.svg\",\n \"arrows-up-down.svg\",\n \"bars.svg\",\n \"bin.svg\",\n \"bold.svg\",\n \"breadcrumb-expand.svg\",\n \"calendar.svg\",\n \"calendar-alt.svg\",\n \"calendar-check.svg\",\n \"chain-broken.svg\",\n \"check.svg\",\n \"chevron-down.svg\",\n \"circle-check.svg\",\n \"circle-plus.svg\",\n \"clipboard-list.svg\",\n \"code.svg\",\n \"cog.svg\",\n \"cogs.svg\",\n \"copy.svg\",\n \"collapse-down.svg\",\n \"collapse-up.svg\",\n \"comment.svg\",\n \"comment-add.svg\",\n \"comment-add-reversed.svg\",\n \"cross.svg\",\n \"cut.svg\",\n \"date.svg\",\n \"desktop.svg\",\n \"doc-empty-inverse.svg\",\n \"doc-empty.svg\",\n \"doc-full-inverse.svg\",\n \"doc-full.svg\", # aka file-text-alt\n \"dots-vertical.svg\",\n \"dots-horizontal.svg\",\n \"download-alt.svg\",\n \"download.svg\",\n \"draft.svg\",\n \"duplicate.svg\",\n \"edit.svg\",\n \"ellipsis-v.svg\",\n \"expand-right.svg\",\n \"error.svg\",\n \"folder-inverse.svg\",\n \"folder-open-1.svg\",\n \"folder-open-inverse.svg\",\n \"folder.svg\",\n \"form.svg\",\n \"globe.svg\",\n \"grip.svg\",\n \"group.svg\",\n \"h1.svg\",\n \"h2.svg\",\n \"h3.svg\",\n \"h4.svg\",\n \"h5.svg\",\n \"h6.svg\",\n \"help.svg\",\n \"history.svg\",\n \"home.svg\",\n \"horizontalrule.svg\",\n \"image.svg\", # aka picture\n \"info-circle.svg\",\n \"italic.svg\",\n \"link.svg\",\n \"link-external.svg\",\n \"list-ol.svg\",\n \"list-ul.svg\",\n \"lock-open.svg\",\n \"lock.svg\",\n \"login.svg\",\n \"logout.svg\",\n \"mail.svg\",\n \"media.svg\",\n \"mobile-alt.svg\",\n \"no-view.svg\",\n \"openquote.svg\",\n \"order-down.svg\",\n \"order-up.svg\",\n \"order.svg\",\n \"password.svg\",\n \"pick.svg\",\n \"pilcrow.svg\",\n \"placeholder.svg\", # aka marquee\n \"plus-inverse.svg\",\n \"plus.svg\",\n \"radio-empty.svg\",\n \"radio-full.svg\",\n \"redirect.svg\",\n \"repeat.svg\",\n \"reset.svg\",\n \"resubmit.svg\",\n \"rotate.svg\",\n \"search.svg\",\n \"site.svg\",\n \"snippet.svg\",\n \"spinner.svg\",\n \"strikethrough.svg\",\n \"success.svg\",\n \"subscript.svg\",\n \"superscript.svg\",\n \"table.svg\",\n \"tablet-alt.svg\",\n \"tag.svg\",\n \"tasks.svg\",\n \"thumbtack.svg\",\n \"tick-inverse.svg\",\n \"tick.svg\",\n \"time.svg\",\n \"title.svg\",\n \"undo.svg\",\n \"uni52.svg\", # Is this a redundant icon?\n \"upload.svg\",\n \"user.svg\",\n \"view.svg\",\n \"wagtail-inverse.svg\",\n \"wagtail.svg\",\n \"warning.svg\",\n ]:\n icons.append(\"wagtailadmin/icons/{}\".format(icon))\n return icons\n\n\[email protected](\"construct_homepage_summary_items\")\ndef add_pages_summary_item(request, items):\n items.insert(0, PagesSummaryItem(request))\n\n\nclass PageAdminURLFinder:\n def __init__(self, user):\n self.page_perms = user and UserPagePermissionsProxy(user)\n\n def get_edit_url(self, instance):\n if self.page_perms and not self.page_perms.for_page(instance).can_edit():\n return None\n else:\n return reverse(\"wagtailadmin_pages:edit\", args=(instance.pk,))\n\n\nregister_admin_url_finder(Page, PageAdminURLFinder)\n\n\nclass CollectionAdminURLFinder(ModelAdminURLFinder):\n permission_policy = collection_permission_policy\n edit_url_name = \"wagtailadmin_collections:edit\"\n\n\nregister_admin_url_finder(Collection, CollectionAdminURLFinder)\n\n\nclass WorkflowAdminURLFinder(ModelAdminURLFinder):\n permission_policy = workflow_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit\"\n\n\nregister_admin_url_finder(Workflow, WorkflowAdminURLFinder)\n\n\nclass WorkflowTaskAdminURLFinder(ModelAdminURLFinder):\n permission_policy = task_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit_task\"\n\n\nregister_admin_url_finder(Task, WorkflowTaskAdminURLFinder)\n\n\nfor action_class in [\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n]:\n hooks.register(\"register_bulk_action\", action_class)\n", "path": "wagtail/admin/wagtail_hooks.py" } ]
[ { "content": "from django.conf import settings\nfrom django.contrib.auth.models import Permission\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import gettext\nfrom django.utils.translation import gettext_lazy as _\nfrom draftjs_exporter.dom import DOM\n\nimport wagtail.admin.rich_text.editors.draftail.features as draftail_features\nfrom wagtail import __version__, hooks\nfrom wagtail.admin.admin_url_finder import (\n ModelAdminURLFinder,\n register_admin_url_finder,\n)\nfrom wagtail.admin.auth import user_has_any_page_permission\nfrom wagtail.admin.forms.collections import GroupCollectionManagementPermissionFormSet\nfrom wagtail.admin.menu import MenuItem, SubmenuMenuItem, reports_menu, settings_menu\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.rich_text.converters.contentstate import link_entity\nfrom wagtail.admin.rich_text.converters.editor_html import (\n LinkTypeRule,\n PageLinkHandler,\n WhitelistRule,\n)\nfrom wagtail.admin.rich_text.converters.html_to_contentstate import (\n BlockElementHandler,\n ExternalLinkElementHandler,\n HorizontalRuleHandler,\n InlineStyleElementHandler,\n ListElementHandler,\n ListItemElementHandler,\n PageLinkElementHandler,\n)\nfrom wagtail.admin.search import SearchArea\nfrom wagtail.admin.site_summary import PagesSummaryItem\nfrom wagtail.admin.ui.sidebar import (\n PageExplorerMenuItem as PageExplorerMenuItemComponent,\n)\nfrom wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent\nfrom wagtail.admin.views.pages.bulk_actions import (\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n)\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.models import Collection, Page, Task, UserPagePermissionsProxy, Workflow\nfrom wagtail.permissions import (\n collection_permission_policy,\n task_permission_policy,\n workflow_permission_policy,\n)\nfrom wagtail.whitelist import allow_without_attributes, attribute_rule, check_url\n\n\nclass ExplorerMenuItem(MenuItem):\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n def get_context(self, request):\n context = super().get_context(request)\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n context[\"start_page_id\"] = start_page.id\n\n return context\n\n def render_component(self, request):\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n return PageExplorerMenuItemComponent(\n self.name,\n self.label,\n self.url,\n start_page.id,\n icon_name=self.icon_name,\n classnames=self.classnames,\n )\n else:\n return super().render_component(request)\n\n\[email protected](\"register_admin_menu_item\")\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _(\"Pages\"),\n reverse(\"wagtailadmin_explore_root\"),\n name=\"explorer\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n def render_component(self, request):\n return SubMenuItemComponent(\n self.name,\n self.label,\n self.menu.render_component(request),\n icon_name=self.icon_name,\n classnames=self.classnames,\n footer_text=\"Wagtail v\" + __version__,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_settings_menu():\n return SettingsMenuItem(_(\"Settings\"), settings_menu, icon_name=\"cogs\", order=10000)\n\n\[email protected](\"register_permissions\")\ndef register_permissions():\n return Permission.objects.filter(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n\n\nclass PageSearchArea(SearchArea):\n def __init__(self):\n super().__init__(\n _(\"Pages\"),\n reverse(\"wagtailadmin_pages:search\"),\n name=\"pages\",\n icon_name=\"folder-open-inverse\",\n order=100,\n )\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n\[email protected](\"register_admin_search_area\")\ndef register_pages_search_area():\n return PageSearchArea()\n\n\[email protected](\"register_group_permission_panel\")\ndef register_collection_permissions_panel():\n return GroupCollectionManagementPermissionFormSet\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_collections_menu_item():\n return CollectionsMenuItem(\n _(\"Collections\"),\n reverse(\"wagtailadmin_collections:index\"),\n icon_name=\"folder-open-1\",\n order=700,\n )\n\n\nclass WorkflowsMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return workflow_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\nclass WorkflowTasksMenuItem(MenuItem):\n def is_shown(self, request):\n if not getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n return False\n\n return task_permission_policy.user_has_any_permission(\n request.user, [\"add\", \"change\", \"delete\"]\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflows_menu_item():\n return WorkflowsMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_workflows:index\"),\n icon_name=\"tasks\",\n order=100,\n )\n\n\[email protected](\"register_settings_menu_item\")\ndef register_workflow_tasks_menu_item():\n return WorkflowTasksMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_workflows:task_index\"),\n icon_name=\"thumbtack\",\n order=150,\n )\n\n\[email protected](\"register_page_listing_buttons\")\ndef page_listing_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield PageListingButton(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page.has_unpublished_changes and page.is_previewable():\n yield PageListingButton(\n _(\"View draft\"),\n reverse(\"wagtailadmin_pages:view_draft\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Preview draft version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n \"rel\": \"noreferrer\",\n },\n priority=20,\n )\n if page.live and page.url:\n yield PageListingButton(\n _(\"View live\"),\n page.url,\n attrs={\n \"rel\": \"noreferrer\",\n \"aria-label\": _(\"View live version of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield PageListingButton(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n\n yield ButtonWithDropdownFromHook(\n _(\"More\"),\n hook_name=\"register_page_listing_more_buttons\",\n page=page,\n page_perms=page_perms,\n next_url=next_url,\n attrs={\n \"target\": \"_blank\",\n \"rel\": \"noreferrer\",\n \"title\": _(\"View more options for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=50,\n )\n\n\[email protected](\"register_page_listing_more_buttons\")\ndef page_listing_more_buttons(page, page_perms, next_url=None):\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=40,\n )\n if page_perms.can_view_revisions():\n yield Button(\n _(\"History\"),\n reverse(\"wagtailadmin_pages:history\", args=[page.id]),\n attrs={\n \"title\": _(\"View page history for '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n\n if page_perms.can_reorder_children():\n yield Button(\n _(\"Sort menu order\"),\n \"?ordering=ord\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n\n\[email protected](\"register_page_header_buttons\")\ndef page_header_buttons(page, page_perms, next_url=None):\n if page_perms.can_edit():\n yield Button(\n _(\"Edit\"),\n reverse(\"wagtailadmin_pages:edit\", args=[page.id]),\n icon_name=\"edit\",\n attrs={\n \"title\": _(\"Edit '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=10,\n )\n if page_perms.can_move():\n yield Button(\n _(\"Move\"),\n reverse(\"wagtailadmin_pages:move\", args=[page.id]),\n icon_name=\"arrow-right-full\",\n attrs={\n \"title\": _(\"Move page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=20,\n )\n if page_perms.can_copy():\n url = reverse(\"wagtailadmin_pages:copy\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Copy\"),\n url,\n icon_name=\"copy\",\n attrs={\n \"title\": _(\"Copy page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=30,\n )\n if page_perms.can_add_subpage():\n yield Button(\n _(\"Add child page\"),\n reverse(\"wagtailadmin_pages:add_subpage\", args=[page.id]),\n icon_name=\"circle-plus\",\n attrs={\n \"aria-label\": _(\"Add a child page to '%(title)s' \")\n % {\"title\": page.get_admin_display_title()},\n },\n priority=40,\n )\n if page_perms.can_delete():\n url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n\n include_next_url = True\n\n # After deleting the page, it is impossible to redirect to it.\n if next_url == reverse(\"wagtailadmin_explore\", args=[page.id]):\n include_next_url = False\n\n if next_url == reverse(\"wagtailadmin_pages:edit\", args=[page.id]):\n include_next_url = False\n\n if next_url and include_next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Delete\"),\n url,\n icon_name=\"bin\",\n attrs={\n \"title\": _(\"Delete page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=50,\n )\n if page_perms.can_unpublish():\n url = reverse(\"wagtailadmin_pages:unpublish\", args=[page.id])\n if next_url:\n url += \"?\" + urlencode({\"next\": next_url})\n\n yield Button(\n _(\"Unpublish\"),\n url,\n icon_name=\"download-alt\",\n attrs={\n \"title\": _(\"Unpublish page '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=60,\n )\n if page_perms.can_reorder_children():\n url = reverse(\"wagtailadmin_explore\", args=[page.id])\n url += \"?ordering=ord\"\n yield Button(\n _(\"Sort menu order\"),\n url,\n icon_name=\"list-ul\",\n attrs={\n \"title\": _(\"Change ordering of child pages of '%(title)s'\")\n % {\"title\": page.get_admin_display_title()}\n },\n priority=70,\n )\n\n\[email protected](\"register_admin_urls\")\ndef register_viewsets_urls():\n viewsets.populate()\n return viewsets.get_urlpatterns()\n\n\[email protected](\"register_rich_text_features\")\ndef register_core_features(features):\n features.register_converter_rule(\n \"editorhtml\",\n \"link\",\n [\n WhitelistRule(\"a\", attribute_rule({\"href\": check_url})),\n LinkTypeRule(\"page\", PageLinkHandler),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"bold\",\n [\n WhitelistRule(\"b\", allow_without_attributes),\n WhitelistRule(\"strong\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"italic\",\n [\n WhitelistRule(\"i\", allow_without_attributes),\n WhitelistRule(\"em\", allow_without_attributes),\n ],\n )\n\n headings_elements = [\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n for order, element in enumerate(headings_elements):\n features.register_converter_rule(\n \"editorhtml\", element, [WhitelistRule(element, allow_without_attributes)]\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ol\",\n [\n WhitelistRule(\"ol\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n features.register_converter_rule(\n \"editorhtml\",\n \"ul\",\n [\n WhitelistRule(\"ul\", allow_without_attributes),\n WhitelistRule(\"li\", allow_without_attributes),\n ],\n )\n\n # Draftail\n features.register_editor_plugin(\n \"draftail\", \"hr\", draftail_features.BooleanFeature(\"enableHorizontalRule\")\n )\n features.register_converter_rule(\n \"contentstate\",\n \"hr\",\n {\n \"from_database_format\": {\n \"hr\": HorizontalRuleHandler(),\n },\n \"to_database_format\": {\n \"entity_decorators\": {\n \"HORIZONTAL_RULE\": lambda props: DOM.create_element(\"hr\")\n }\n },\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"h1\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h1\",\n \"type\": \"header-one\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 1},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h1\",\n {\n \"from_database_format\": {\n \"h1\": BlockElementHandler(\"header-one\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-one\": \"h1\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h2\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h2\",\n \"type\": \"header-two\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 2},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h2\",\n {\n \"from_database_format\": {\n \"h2\": BlockElementHandler(\"header-two\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-two\": \"h2\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h3\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h3\",\n \"type\": \"header-three\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 3},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h3\",\n {\n \"from_database_format\": {\n \"h3\": BlockElementHandler(\"header-three\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-three\": \"h3\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h4\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h4\",\n \"type\": \"header-four\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 4},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h4\",\n {\n \"from_database_format\": {\n \"h4\": BlockElementHandler(\"header-four\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-four\": \"h4\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h5\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h5\",\n \"type\": \"header-five\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 5},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h5\",\n {\n \"from_database_format\": {\n \"h5\": BlockElementHandler(\"header-five\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-five\": \"h5\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"h6\",\n draftail_features.BlockFeature(\n {\n \"icon\": \"h6\",\n \"type\": \"header-six\",\n \"description\": gettext(\"Heading %(level)d\") % {\"level\": 6},\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"h6\",\n {\n \"from_database_format\": {\n \"h6\": BlockElementHandler(\"header-six\"),\n },\n \"to_database_format\": {\"block_map\": {\"header-six\": \"h6\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ul\",\n draftail_features.BlockFeature(\n {\n \"type\": \"unordered-list-item\",\n \"icon\": \"list-ul\",\n \"description\": gettext(\"Bulleted list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ul\",\n {\n \"from_database_format\": {\n \"ul\": ListElementHandler(\"unordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"unordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ul\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"ol\",\n draftail_features.BlockFeature(\n {\n \"type\": \"ordered-list-item\",\n \"icon\": \"list-ol\",\n \"description\": gettext(\"Numbered list\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"ol\",\n {\n \"from_database_format\": {\n \"ol\": ListElementHandler(\"ordered-list-item\"),\n \"li\": ListItemElementHandler(),\n },\n \"to_database_format\": {\n \"block_map\": {\"ordered-list-item\": {\"element\": \"li\", \"wrapper\": \"ol\"}}\n },\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"blockquote\",\n draftail_features.BlockFeature(\n {\n \"type\": \"blockquote\",\n \"icon\": \"openquote\",\n \"description\": gettext(\"Blockquote\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"blockquote\",\n {\n \"from_database_format\": {\n \"blockquote\": BlockElementHandler(\"blockquote\"),\n },\n \"to_database_format\": {\"block_map\": {\"blockquote\": \"blockquote\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"bold\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"BOLD\",\n \"icon\": \"bold\",\n \"description\": gettext(\"Bold\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"bold\",\n {\n \"from_database_format\": {\n \"b\": InlineStyleElementHandler(\"BOLD\"),\n \"strong\": InlineStyleElementHandler(\"BOLD\"),\n },\n \"to_database_format\": {\"style_map\": {\"BOLD\": \"b\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"italic\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"ITALIC\",\n \"icon\": \"italic\",\n \"description\": gettext(\"Italic\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"italic\",\n {\n \"from_database_format\": {\n \"i\": InlineStyleElementHandler(\"ITALIC\"),\n \"em\": InlineStyleElementHandler(\"ITALIC\"),\n },\n \"to_database_format\": {\"style_map\": {\"ITALIC\": \"i\"}},\n },\n )\n\n features.register_editor_plugin(\n \"draftail\",\n \"link\",\n draftail_features.EntityFeature(\n {\n \"type\": \"LINK\",\n \"icon\": \"link\",\n \"description\": gettext(\"Link\"),\n # We want to enforce constraints on which links can be pasted into rich text.\n # Keep only the attributes Wagtail needs.\n \"attributes\": [\"url\", \"id\", \"parentId\"],\n \"allowlist\": {\n # Keep pasted links with http/https protocol, and not-pasted links (href = undefined).\n \"href\": \"^(http:|https:|undefined$)\",\n },\n },\n js=[\n \"wagtailadmin/js/page-chooser-modal.js\",\n ],\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"link\",\n {\n \"from_database_format\": {\n \"a[href]\": ExternalLinkElementHandler(\"LINK\"),\n 'a[linktype=\"page\"]': PageLinkElementHandler(\"LINK\"),\n },\n \"to_database_format\": {\"entity_decorators\": {\"LINK\": link_entity}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"superscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUPERSCRIPT\",\n \"icon\": \"superscript\",\n \"description\": gettext(\"Superscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"superscript\",\n {\n \"from_database_format\": {\n \"sup\": InlineStyleElementHandler(\"SUPERSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUPERSCRIPT\": \"sup\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"subscript\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"SUBSCRIPT\",\n \"icon\": \"subscript\",\n \"description\": gettext(\"Subscript\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"subscript\",\n {\n \"from_database_format\": {\n \"sub\": InlineStyleElementHandler(\"SUBSCRIPT\"),\n },\n \"to_database_format\": {\"style_map\": {\"SUBSCRIPT\": \"sub\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"strikethrough\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"STRIKETHROUGH\",\n \"icon\": \"strikethrough\",\n \"description\": gettext(\"Strikethrough\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"strikethrough\",\n {\n \"from_database_format\": {\n \"s\": InlineStyleElementHandler(\"STRIKETHROUGH\"),\n },\n \"to_database_format\": {\"style_map\": {\"STRIKETHROUGH\": \"s\"}},\n },\n )\n features.register_editor_plugin(\n \"draftail\",\n \"code\",\n draftail_features.InlineStyleFeature(\n {\n \"type\": \"CODE\",\n \"icon\": \"code\",\n \"description\": gettext(\"Code\"),\n }\n ),\n )\n features.register_converter_rule(\n \"contentstate\",\n \"code\",\n {\n \"from_database_format\": {\n \"code\": InlineStyleElementHandler(\"CODE\"),\n },\n \"to_database_format\": {\"style_map\": {\"CODE\": \"code\"}},\n },\n )\n\n\nclass LockedPagesMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).can_remove_locks()\n\n\nclass WorkflowReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True)\n\n\nclass SiteHistoryReportMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).explorable_pages().exists()\n\n\nclass AgingPagesReportMenuItem(MenuItem):\n def is_shown(self, request):\n return getattr(settings, \"WAGTAIL_AGING_PAGES_ENABLED\", True)\n\n\[email protected](\"register_reports_menu_item\")\ndef register_locked_pages_menu_item():\n return LockedPagesMenuItem(\n _(\"Locked pages\"),\n reverse(\"wagtailadmin_reports:locked_pages\"),\n icon_name=\"lock\",\n order=700,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflows\"),\n reverse(\"wagtailadmin_reports:workflow\"),\n icon_name=\"tasks\",\n order=800,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_workflow_tasks_report_menu_item():\n return WorkflowReportMenuItem(\n _(\"Workflow tasks\"),\n reverse(\"wagtailadmin_reports:workflow_tasks\"),\n icon_name=\"thumbtack\",\n order=900,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_site_history_report_menu_item():\n return SiteHistoryReportMenuItem(\n _(\"Site history\"),\n reverse(\"wagtailadmin_reports:site_history\"),\n icon_name=\"history\",\n order=1000,\n )\n\n\[email protected](\"register_reports_menu_item\")\ndef register_aging_pages_report_menu_item():\n return AgingPagesReportMenuItem(\n _(\"Aging pages\"),\n reverse(\"wagtailadmin_reports:aging_pages\"),\n icon_name=\"time\",\n order=1100,\n )\n\n\[email protected](\"register_admin_menu_item\")\ndef register_reports_menu():\n return SubmenuMenuItem(_(\"Reports\"), reports_menu, icon_name=\"site\", order=9000)\n\n\[email protected](\"register_icons\")\ndef register_icons(icons):\n for icon in [\n \"angle-double-left.svg\",\n \"angle-double-right.svg\",\n \"arrow-down-big.svg\",\n \"arrow-down.svg\",\n \"arrow-right-full.svg\",\n \"arrow-left.svg\",\n \"arrow-right.svg\",\n \"arrow-up-big.svg\",\n \"arrow-up.svg\",\n \"arrows-up-down.svg\",\n \"bars.svg\",\n \"bin.svg\",\n \"bold.svg\",\n \"breadcrumb-expand.svg\",\n \"calendar.svg\",\n \"calendar-alt.svg\",\n \"calendar-check.svg\",\n \"chain-broken.svg\",\n \"check.svg\",\n \"chevron-down.svg\",\n \"circle-check.svg\",\n \"circle-plus.svg\",\n \"clipboard-list.svg\",\n \"code.svg\",\n \"cog.svg\",\n \"cogs.svg\",\n \"copy.svg\",\n \"collapse-down.svg\",\n \"collapse-up.svg\",\n \"comment.svg\",\n \"comment-add.svg\",\n \"comment-add-reversed.svg\",\n \"cross.svg\",\n \"cut.svg\",\n \"date.svg\",\n \"desktop.svg\",\n \"doc-empty-inverse.svg\",\n \"doc-empty.svg\",\n \"doc-full-inverse.svg\",\n \"doc-full.svg\", # aka file-text-alt\n \"dots-vertical.svg\",\n \"dots-horizontal.svg\",\n \"download-alt.svg\",\n \"download.svg\",\n \"draft.svg\",\n \"duplicate.svg\",\n \"edit.svg\",\n \"ellipsis-v.svg\",\n \"expand-right.svg\",\n \"error.svg\",\n \"folder-inverse.svg\",\n \"folder-open-1.svg\",\n \"folder-open-inverse.svg\",\n \"folder.svg\",\n \"form.svg\",\n \"globe.svg\",\n \"grip.svg\",\n \"group.svg\",\n \"h1.svg\",\n \"h2.svg\",\n \"h3.svg\",\n \"h4.svg\",\n \"h5.svg\",\n \"h6.svg\",\n \"help.svg\",\n \"history.svg\",\n \"home.svg\",\n \"horizontalrule.svg\",\n \"image.svg\", # aka picture\n \"info-circle.svg\",\n \"italic.svg\",\n \"link.svg\",\n \"link-external.svg\",\n \"list-ol.svg\",\n \"list-ul.svg\",\n \"lock-open.svg\",\n \"lock.svg\",\n \"login.svg\",\n \"logout.svg\",\n \"mail.svg\",\n \"media.svg\",\n \"mobile-alt.svg\",\n \"no-view.svg\",\n \"openquote.svg\",\n \"order-down.svg\",\n \"order-up.svg\",\n \"order.svg\",\n \"password.svg\",\n \"pick.svg\",\n \"pilcrow.svg\",\n \"placeholder.svg\", # aka marquee\n \"plus-inverse.svg\",\n \"plus.svg\",\n \"radio-empty.svg\",\n \"radio-full.svg\",\n \"redirect.svg\",\n \"repeat.svg\",\n \"reset.svg\",\n \"resubmit.svg\",\n \"rotate.svg\",\n \"search.svg\",\n \"site.svg\",\n \"snippet.svg\",\n \"spinner.svg\",\n \"strikethrough.svg\",\n \"success.svg\",\n \"subscript.svg\",\n \"superscript.svg\",\n \"table.svg\",\n \"tablet-alt.svg\",\n \"tag.svg\",\n \"tasks.svg\",\n \"thumbtack.svg\",\n \"tick-inverse.svg\",\n \"tick.svg\",\n \"time.svg\",\n \"title.svg\",\n \"undo.svg\",\n \"uni52.svg\", # Is this a redundant icon?\n \"upload.svg\",\n \"user.svg\",\n \"view.svg\",\n \"wagtail-inverse.svg\",\n \"wagtail.svg\",\n \"warning.svg\",\n ]:\n icons.append(\"wagtailadmin/icons/{}\".format(icon))\n return icons\n\n\[email protected](\"construct_homepage_summary_items\")\ndef add_pages_summary_item(request, items):\n items.insert(0, PagesSummaryItem(request))\n\n\nclass PageAdminURLFinder:\n def __init__(self, user):\n self.page_perms = user and UserPagePermissionsProxy(user)\n\n def get_edit_url(self, instance):\n if self.page_perms and not self.page_perms.for_page(instance).can_edit():\n return None\n else:\n return reverse(\"wagtailadmin_pages:edit\", args=(instance.pk,))\n\n\nregister_admin_url_finder(Page, PageAdminURLFinder)\n\n\nclass CollectionAdminURLFinder(ModelAdminURLFinder):\n permission_policy = collection_permission_policy\n edit_url_name = \"wagtailadmin_collections:edit\"\n\n\nregister_admin_url_finder(Collection, CollectionAdminURLFinder)\n\n\nclass WorkflowAdminURLFinder(ModelAdminURLFinder):\n permission_policy = workflow_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit\"\n\n\nregister_admin_url_finder(Workflow, WorkflowAdminURLFinder)\n\n\nclass WorkflowTaskAdminURLFinder(ModelAdminURLFinder):\n permission_policy = task_permission_policy\n edit_url_name = \"wagtailadmin_workflows:edit_task\"\n\n\nregister_admin_url_finder(Task, WorkflowTaskAdminURLFinder)\n\n\nfor action_class in [\n DeleteBulkAction,\n MoveBulkAction,\n PublishBulkAction,\n UnpublishBulkAction,\n]:\n hooks.register(\"register_bulk_action\", action_class)\n", "path": "wagtail/admin/wagtail_hooks.py" } ]
diff --git a/CHANGELOG.txt b/CHANGELOG.txt index 9c028178586a..abe90a8ad4be 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -72,6 +72,7 @@ Changelog * Fix: Ensure that buttons on custom chooser widgets are correctly shown on hover (Thibaud Colas) * Fix: Add missing asterisk to title field placeholder (Seremba Patrick) * Fix: Avoid creating an extra rich text block when inserting a new block at the end of the content (Matt Westcott) + * Fix: Removed the extra dot in the Wagtail version shown within the admin settings menu item (Loveth Omokaro) 4.0.3 (xx.xx.xxxx) - IN DEVELOPMENT diff --git a/CONTRIBUTORS.rst b/CONTRIBUTORS.rst index b7e7e19f38c2..040fe941b3d0 100644 --- a/CONTRIBUTORS.rst +++ b/CONTRIBUTORS.rst @@ -639,6 +639,7 @@ Contributors * Chizoba Nweke * Seremba Patrick * Ruqouyyah Muhammad +* Loveth Omokaro Translators diff --git a/docs/releases/4.1.md b/docs/releases/4.1.md index 8a45664103a0..c8b5fdf4244a 100644 --- a/docs/releases/4.1.md +++ b/docs/releases/4.1.md @@ -97,6 +97,7 @@ This feature was developed by Karl Hobley and Matt Westcott. * Ensure that buttons on custom chooser widgets are correctly shown on hover (Thibaud Colas) * Add missing asterisk to title field placeholder (Seremba Patrick) * Avoid creating an extra rich text block when inserting a new block at the end of the content (Matt Westcott) + * Removed the extra dot in the Wagtail version shown within the admin settings menu item (Loveth Omokaro) ## Upgrade considerations diff --git a/wagtail/admin/wagtail_hooks.py b/wagtail/admin/wagtail_hooks.py index eb358295182a..0e2d658ad131 100644 --- a/wagtail/admin/wagtail_hooks.py +++ b/wagtail/admin/wagtail_hooks.py @@ -102,7 +102,7 @@ def render_component(self, request): self.menu.render_component(request), icon_name=self.icon_name, classnames=self.classnames, - footer_text="Wagtail v." + __version__, + footer_text="Wagtail v" + __version__, )
spack__spack-19482
Installation issue: gcc <!-- Thanks for taking the time to report this build failure. To proceed with the report please: 1. Title the issue "Installation issue: gcc". 2. Provide the information required below. We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! --> ### Steps to reproduce the issue <!-- Fill in the exact spec you are trying to build and the relevant part of the error message --> ```console $ spack install gcc@master ... ==> No binary for gcc found: installing from source [190/630] Reversed (or previously applied) patch detected! Assume -R? [n] Apply anyway? [n] 2 out of 2 hunks ignored -- saving rejects to file gcc/Makefile.in.rej Reversed (or previously applied) patch detected! Assume -R? [n] Apply anyway? [n] 1 out of 1 hunk ignored -- saving rejects to file gcc/configure.ac.rej ==> Patch /lustre/home/ca-tgreen/Work/git/spack/var/spack/repos/builtin/packages/gcc/zstd.patch failed. ==> Error: ProcessError: Command exited with status 1: '/usr/bin/patch' '-s' '-p' '1' '-i' '/lustre/home/ca-tgreen/Work/git/spack/var/spack/repos/builtin/packages/gcc/zstd.patch' '-d' '.' ``` ### Information on your system <!-- Please include the output of `spack debug report` --> * **Spack:** 0.15.4-979-ee1725828 * **Python:** 3.6.8 * **Platform:** cray-rhel8-aarch64 <!-- If you have any relevant configuration detail (custom `packages.yaml` or `modules.yaml`, etc.) you can add that here as well. --> ### Additional information <!-- Please upload the following files. They should be present in the stage directory of the failing build. Also upload any config.log or similar file if one exists. --> * [spack-build-out.txt]() * [spack-build-env.txt]() Not present in staging directory. <!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. --> @michaelkuhn ### General information <!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. --> - [x] I have run `spack debug report` and reported the version of Spack/Python/Platform - [x] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers - [ ] I have uploaded the build log and environment files - [x] I have searched the issues of this repo and believe this is not a duplicate
[ { "content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\nimport glob\nimport itertools\nimport os\nimport re\nimport sys\n\nimport llnl.util.tty as tty\nimport spack.architecture\nimport spack.util.executable\n\nfrom spack.operating_systems.mac_os import macos_version, macos_sdk_path\n\n\nclass Gcc(AutotoolsPackage, GNUMirrorPackage):\n \"\"\"The GNU Compiler Collection includes front ends for C, C++, Objective-C,\n Fortran, Ada, and Go, as well as libraries for these languages.\"\"\"\n\n homepage = 'https://gcc.gnu.org'\n gnu_mirror_path = 'gcc/gcc-9.2.0/gcc-9.2.0.tar.xz'\n git = 'git://gcc.gnu.org/git/gcc.git'\n list_url = 'http://ftp.gnu.org/gnu/gcc/'\n list_depth = 1\n\n maintainers = ['michaelkuhn']\n\n version('master', branch='master')\n\n version('10.2.0', sha256='b8dd4368bb9c7f0b98188317ee0254dd8cc99d1e3a18d0ff146c855fe16c1d8c')\n version('10.1.0', sha256='b6898a23844b656f1b68691c5c012036c2e694ac4b53a8918d4712ad876e7ea2')\n\n version('9.3.0', sha256='71e197867611f6054aa1119b13a0c0abac12834765fe2d81f35ac57f84f742d1')\n version('9.2.0', sha256='ea6ef08f121239da5695f76c9b33637a118dcf63e24164422231917fa61fb206')\n version('9.1.0', sha256='79a66834e96a6050d8fe78db2c3b32fb285b230b855d0a66288235bc04b327a0')\n\n version('8.4.0', sha256='e30a6e52d10e1f27ed55104ad233c30bd1e99cfb5ff98ab022dc941edd1b2dd4')\n version('8.3.0', sha256='64baadfe6cc0f4947a84cb12d7f0dfaf45bb58b7e92461639596c21e02d97d2c')\n version('8.2.0', sha256='196c3c04ba2613f893283977e6011b2345d1cd1af9abeac58e916b1aab3e0080')\n version('8.1.0', sha256='1d1866f992626e61349a1ccd0b8d5253816222cdc13390dcfaa74b093aa2b153')\n\n version('7.5.0', sha256='b81946e7f01f90528a1f7352ab08cc602b9ccc05d4e44da4bd501c5a189ee661')\n version('7.4.0', sha256='eddde28d04f334aec1604456e536416549e9b1aa137fc69204e65eb0c009fe51')\n version('7.3.0', sha256='832ca6ae04636adbb430e865a1451adf6979ab44ca1c8374f61fba65645ce15c')\n version('7.2.0', sha256='1cf7adf8ff4b5aa49041c8734bbcf1ad18cc4c94d0029aae0f4e48841088479a')\n version('7.1.0', sha256='8a8136c235f64c6fef69cac0d73a46a1a09bb250776a050aec8f9fc880bebc17')\n\n version('6.5.0', sha256='7ef1796ce497e89479183702635b14bb7a46b53249209a5e0f999bebf4740945')\n version('6.4.0', sha256='850bf21eafdfe5cd5f6827148184c08c4a0852a37ccf36ce69855334d2c914d4')\n version('6.3.0', sha256='f06ae7f3f790fbf0f018f6d40e844451e6bc3b7bc96e128e63b09825c1f8b29f')\n version('6.2.0', sha256='9944589fc722d3e66308c0ce5257788ebd7872982a718aa2516123940671b7c5')\n version('6.1.0', sha256='09c4c85cabebb971b1de732a0219609f93fc0af5f86f6e437fd8d7f832f1a351')\n\n version('5.5.0', sha256='530cea139d82fe542b358961130c69cfde8b3d14556370b65823d2f91f0ced87')\n version('5.4.0', sha256='608df76dec2d34de6558249d8af4cbee21eceddbcb580d666f7a5a583ca3303a')\n version('5.3.0', sha256='b84f5592e9218b73dbae612b5253035a7b34a9a1f7688d2e1bfaaf7267d5c4db')\n version('5.2.0', sha256='5f835b04b5f7dd4f4d2dc96190ec1621b8d89f2dc6f638f9f8bc1b1014ba8cad')\n version('5.1.0', sha256='b7dafdf89cbb0e20333dbf5b5349319ae06e3d1a30bf3515b5488f7e89dca5ad')\n\n version('4.9.4', sha256='6c11d292cd01b294f9f84c9a59c230d80e9e4a47e5c6355f046bb36d4f358092')\n version('4.9.3', sha256='2332b2a5a321b57508b9031354a8503af6fdfb868b8c1748d33028d100a8b67e')\n version('4.9.2', sha256='2020c98295856aa13fda0f2f3a4794490757fc24bcca918d52cc8b4917b972dd')\n version('4.9.1', sha256='d334781a124ada6f38e63b545e2a3b8c2183049515a1abab6d513f109f1d717e')\n version('4.8.5', sha256='22fb1e7e0f68a63cee631d85b20461d1ea6bda162f03096350e38c8d427ecf23')\n version('4.8.4', sha256='4a80aa23798b8e9b5793494b8c976b39b8d9aa2e53cd5ed5534aff662a7f8695')\n version('4.7.4', sha256='92e61c6dc3a0a449e62d72a38185fda550168a86702dea07125ebd3ec3996282')\n version('4.6.4', sha256='35af16afa0b67af9b8eb15cafb76d2bc5f568540552522f5dc2c88dd45d977e8')\n version('4.5.4', sha256='eef3f0456db8c3d992cbb51d5d32558190bc14f3bc19383dd93acc27acc6befc')\n\n # We specifically do not add 'all' variant here because:\n # (i) Ada, Go, Jit, and Objective-C++ are not default languages.\n # In that respect, the name 'all' is rather misleading.\n # (ii) Languages other than c,c++,fortran are prone to configure bug in GCC\n # For example, 'java' appears to ignore custom location of zlib\n # (iii) meaning of 'all' changes with GCC version, i.e. 'java' is not part\n # of gcc7. Correctly specifying conflicts() and depends_on() in such a\n # case is a PITA.\n variant('languages',\n default='c,c++,fortran',\n values=('ada', 'brig', 'c', 'c++', 'fortran',\n 'go', 'java', 'jit', 'lto', 'objc', 'obj-c++'),\n multi=True,\n description='Compilers and runtime libraries to build')\n variant('binutils',\n default=False,\n description='Build via binutils')\n variant('piclibs',\n default=False,\n description='Build PIC versions of libgfortran.a and libstdc++.a')\n variant('strip',\n default=False,\n description='Strip executables to reduce installation size')\n variant('nvptx',\n default=False,\n description='Target nvptx offloading to NVIDIA GPUs')\n variant('bootstrap',\n default=False,\n description='add --enable-bootstrap flag for stage3 build')\n\n depends_on('flex', type='build', when='@master')\n\n # https://gcc.gnu.org/install/prerequisites.html\n depends_on('[email protected]:')\n # GCC 7.3 does not compile with newer releases on some platforms, see\n # https://github.com/spack/spack/issues/6902#issuecomment-433030376\n depends_on('[email protected]:3.1.6', when='@:9.9')\n depends_on('[email protected]:', when='@10:')\n depends_on('[email protected]:', when='@4.5:')\n # Already released GCC versions do not support any newer version of ISL\n # GCC 5.4 https://github.com/spack/spack/issues/6902#issuecomment-433072097\n # GCC 7.3 https://github.com/spack/spack/issues/6902#issuecomment-433030376\n # GCC 9+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86724\n depends_on('[email protected]', when='@5.0:5.2')\n depends_on('[email protected]', when='@5.3:5.9')\n depends_on('[email protected]:0.18', when='@6:8.9')\n depends_on('[email protected]:0.20', when='@9:9.9')\n depends_on('[email protected]:', when='@10:')\n depends_on('zlib', when='@6:')\n depends_on('zstd', when='@10:')\n depends_on('iconv', when='platform=darwin')\n depends_on('gnat', when='languages=ada')\n depends_on('binutils~libiberty', when='+binutils', type=('build', 'link', 'run'))\n depends_on('zip', type='build', when='languages=java')\n depends_on('cuda@:10', when='+nvptx')\n\n # The server is sometimes a bit slow to respond\n timeout = {'timeout': 60}\n\n resource(name='newlib',\n url='ftp://sourceware.org/pub/newlib/newlib-3.0.0.20180831.tar.gz',\n sha256='3ad3664f227357df15ff34e954bfd9f501009a647667cd307bf0658aefd6eb5b',\n destination='newlibsource',\n when='+nvptx',\n fetch_options=timeout)\n\n # nvptx-tools does not seem to work as a dependency,\n # but does fine when the source is inside the gcc build directory\n # nvptx-tools doesn't have any releases, so grabbing the last commit\n resource(name='nvptx-tools',\n git='https://github.com/MentorEmbedded/nvptx-tools',\n commit='5f6f343a302d620b0868edab376c00b15741e39e',\n when='+nvptx')\n\n # TODO: integrate these libraries.\n # depends_on('ppl')\n # depends_on('cloog')\n\n # https://gcc.gnu.org/install/test.html\n depends_on('[email protected]', type='test')\n depends_on('expect', type='test')\n depends_on('tcl', type='test')\n depends_on('[email protected]:', type='test')\n depends_on('[email protected]:', type='test')\n\n # See https://golang.org/doc/install/gccgo#Releases\n provides('golang', when='languages=go @4.6:')\n provides('golang@:1', when='languages=go @4.7.1:')\n provides('golang@:1.1', when='languages=go @4.8:')\n provides('golang@:1.1.2', when='languages=go @4.8.2:')\n provides('golang@:1.2', when='languages=go @4.9:')\n provides('golang@:1.4', when='languages=go @5:')\n provides('golang@:1.6.1', when='languages=go @6:')\n provides('golang@:1.8', when='languages=go @7:')\n\n # For a list of valid languages for a specific release,\n # run the following command in the GCC source directory:\n # $ grep ^language= gcc/*/config-lang.in\n # See https://gcc.gnu.org/install/configure.html\n\n # Support for processing BRIG 1.0 files was added in GCC 7\n # BRIG is a binary format for HSAIL:\n # (Heterogeneous System Architecture Intermediate Language).\n # See https://gcc.gnu.org/gcc-7/changes.html\n conflicts('languages=brig', when='@:6')\n\n # BRIG does not seem to be supported on macOS\n conflicts('languages=brig', when='platform=darwin')\n\n # GCC 4.8 added a 'c' language. I'm sure C was always built,\n # but this is the first version that accepts 'c' as a valid language.\n conflicts('languages=c', when='@:4.7')\n\n # GCC 4.6 added support for the Go programming language.\n # See https://gcc.gnu.org/gcc-4.6/changes.html\n conflicts('languages=go', when='@:4.5')\n\n # Go is not supported on macOS\n conflicts('languages=go', when='platform=darwin')\n\n # The GCC Java frontend and associated libjava runtime library\n # have been removed from GCC as of GCC 7.\n # See https://gcc.gnu.org/gcc-7/changes.html\n conflicts('languages=java', when='@7:')\n\n # GCC 5 added the ability to build GCC as a Just-In-Time compiler.\n # See https://gcc.gnu.org/gcc-5/changes.html\n conflicts('languages=jit', when='@:4')\n\n # NVPTX offloading supported in 7 and later by limited languages\n conflicts('+nvptx', when='@:6', msg='NVPTX only supported in gcc 7 and above')\n conflicts('languages=ada', when='+nvptx')\n conflicts('languages=brig', when='+nvptx')\n conflicts('languages=go', when='+nvptx')\n conflicts('languages=java', when='+nvptx')\n conflicts('languages=jit', when='+nvptx')\n conflicts('languages=objc', when='+nvptx')\n conflicts('languages=obj-c++', when='+nvptx')\n # NVPTX build disables bootstrap\n conflicts('+binutils', when='+nvptx')\n\n # Binutils can't build ld on macOS\n conflicts('+binutils', when='platform=darwin')\n\n # Newer binutils than RHEL's is required to run `as` on some instructions\n # generated by new GCC (see https://github.com/spack/spack/issues/12235)\n conflicts('~binutils', when='@7: os=rhel6',\n msg='New GCC cannot use system assembler on RHEL6')\n\n if sys.platform == 'darwin':\n # Fix parallel build on APFS filesystem\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81797\n if macos_version() >= Version('10.13'):\n patch('darwin/apfs.patch', when='@5.5.0,6.1:6.4,7.1:7.3')\n # from homebrew via macports\n # https://trac.macports.org/ticket/56502#no1\n # see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83531\n patch('darwin/headers-10.13-fix.patch', when='@5.5.0')\n if macos_version() >= Version('10.14'):\n # Fix system headers for Mojave SDK:\n # https://github.com/Homebrew/homebrew-core/pull/39041\n patch('https://raw.githubusercontent.com/Homebrew/formula-patches/master/gcc/8.3.0-xcode-bug-_Atomic-fix.patch',\n sha256='33ee92bf678586357ee8ab9d2faddf807e671ad37b97afdd102d5d153d03ca84',\n when='@6:8')\n if macos_version() >= Version('10.15'):\n # Fix system headers for Catalina SDK\n # (otherwise __OSX_AVAILABLE_STARTING ends up undefined)\n patch('https://raw.githubusercontent.com/Homebrew/formula-patches/b8b8e65e/gcc/9.2.0-catalina.patch',\n sha256='0b8d14a7f3c6a2f0d2498526e86e088926671b5da50a554ffa6b7f73ac4f132b', when='@9.2.0')\n # Use -headerpad_max_install_names in the build,\n # otherwise updated load commands won't fit in the Mach-O header.\n # This is needed because `gcc` avoids the superenv shim.\n patch('darwin/gcc-7.1.0-headerpad.patch', when='@5:')\n patch('darwin/gcc-6.1.0-jit.patch', when='@5:7')\n patch('darwin/gcc-4.9.patch1', when='@4.9.0:4.9.3')\n patch('darwin/gcc-4.9.patch2', when='@4.9.0:4.9.3')\n\n patch('piclibs.patch', when='+piclibs')\n patch('gcc-backport.patch', when='@4.7:4.9.2,5:5.3')\n\n # Backport libsanitizer patch for glibc >= 2.31 and 5.3.0 <= gcc <= 9.2.0\n # https://bugs.gentoo.org/708346\n patch('glibc-2.31-libsanitizer-1.patch', when='@7.1.0:7.5.0,8.1.0:8.3.0,9.0.0:9.2.0')\n patch('glibc-2.31-libsanitizer-1-gcc-6.patch', when='@5.3.0:5.5.0,6.1.0:6.5.0')\n patch('glibc-2.31-libsanitizer-2.patch', when='@8.1.0:8.3.0,9.0.0:9.2.0')\n patch('glibc-2.31-libsanitizer-2-gcc-6.patch', when='@5.3.0:5.5.0,6.1.0:6.5.0')\n patch('glibc-2.31-libsanitizer-2-gcc-7.patch', when='@7.1.0:7.5.0')\n # Older versions do not compile with newer versions of glibc\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81712\n patch('ucontext_t.patch', when='@4.9,5.1:5.4,6.1:6.4,7.1')\n patch('ucontext_t-java.patch', when='@4.9,5.1:5.4,6.1:6.4 languages=java')\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81066\n patch('stack_t-4.9.patch', when='@4.9')\n patch('stack_t.patch', when='@5.1:5.4,6.1:6.4,7.1')\n # https://bugs.busybox.net/show_bug.cgi?id=10061\n patch('signal.patch', when='@4.9,5.1:5.4')\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85835\n patch('sys_ustat.h.patch', when='@5.0:6.4,7.0:7.3,8.1')\n patch('sys_ustat-4.9.patch', when='@4.9')\n\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95005\n patch('zstd.patch', when='@10:')\n\n build_directory = 'spack-build'\n\n @property\n def executables(self):\n names = [r'gcc', r'[^\\w]?g\\+\\+', r'gfortran']\n suffixes = [r'', r'-mp-\\d+\\.\\d', r'-\\d+\\.\\d', r'-\\d+', r'\\d\\d']\n return [r''.join(x) for x in itertools.product(names, suffixes)]\n\n @classmethod\n def filter_detected_exes(cls, prefix, exes_in_prefix):\n result = []\n for exe in exes_in_prefix:\n # On systems like Ubuntu we might get multiple executables\n # with the string \"gcc\" in them. See:\n # https://helpmanual.io/packages/apt/gcc/\n basename = os.path.basename(exe)\n substring_to_be_filtered = [\n 'c99-gcc',\n 'c89-gcc',\n '-nm',\n '-ar',\n 'ranlib',\n 'clang' # clang++ matches g++ -> clan[g++]\n ]\n if any(x in basename for x in substring_to_be_filtered):\n continue\n # Filter out links in favor of real executables on\n # all systems but Cray\n host_platform = str(spack.architecture.platform())\n if os.path.islink(exe) and host_platform != 'cray':\n continue\n\n result.append(exe)\n\n return result\n\n @classmethod\n def determine_version(cls, exe):\n try:\n output = spack.compiler.get_compiler_version_output(\n exe, '--version'\n )\n except Exception:\n output = ''\n # Apple's gcc is actually apple clang, so skip it.\n # Users can add it manually to compilers.yaml at their own risk.\n if 'Apple' in output:\n return None\n\n version_regex = re.compile(r'([\\d\\.]+)')\n for vargs in ('-dumpfullversion', '-dumpversion'):\n try:\n output = spack.compiler.get_compiler_version_output(exe, vargs)\n match = version_regex.search(output)\n if match:\n return match.group(1)\n except spack.util.executable.ProcessError:\n pass\n except Exception as e:\n tty.debug(e)\n\n return None\n\n @classmethod\n def determine_variants(cls, exes, version_str):\n languages, compilers = set(), {}\n for exe in exes:\n basename = os.path.basename(exe)\n if 'g++' in basename:\n languages.add('c++')\n compilers['cxx'] = exe\n elif 'gfortran' in basename:\n languages.add('fortran')\n compilers['fortran'] = exe\n elif 'gcc' in basename:\n languages.add('c')\n compilers['c'] = exe\n variant_str = 'languages={0}'.format(','.join(languages))\n return variant_str, {'compilers': compilers}\n\n @classmethod\n def validate_detected_spec(cls, spec, extra_attributes):\n # For GCC 'compilers' is a mandatory attribute\n msg = ('the extra attribute \"compilers\" must be set for '\n 'the detected spec \"{0}\"'.format(spec))\n assert 'compilers' in extra_attributes, msg\n\n compilers = extra_attributes['compilers']\n for constraint, key in {\n 'languages=c': 'c',\n 'languages=c++': 'cxx',\n 'languages=fortran': 'fortran'\n }.items():\n if spec.satisfies(constraint, strict=True):\n msg = '{0} not in {1}'\n assert key in compilers, msg.format(key, spec)\n\n @property\n def cc(self):\n msg = \"cannot retrieve C compiler [spec is not concrete]\"\n assert self.spec.concrete, msg\n if self.spec.external:\n return self.spec.extra_attributes['compilers'].get('c', None)\n result = None\n if 'languages=c' in self.spec:\n result = str(self.spec.prefix.bin.gcc)\n return result\n\n @property\n def cxx(self):\n msg = \"cannot retrieve C++ compiler [spec is not concrete]\"\n assert self.spec.concrete, msg\n if self.spec.external:\n return self.spec.extra_attributes['compilers'].get('cxx', None)\n result = None\n if 'languages=c++' in self.spec:\n result = os.path.join(self.spec.prefix.bin, 'g++')\n return result\n\n @property\n def fortran(self):\n msg = \"cannot retrieve Fortran compiler [spec is not concrete]\"\n assert self.spec.concrete, msg\n if self.spec.external:\n return self.spec.extra_attributes['compilers'].get('fortran', None)\n result = None\n if 'languages=fortran' in self.spec:\n result = str(self.spec.prefix.bin.gfortran)\n return result\n\n def url_for_version(self, version):\n # This function will be called when trying to fetch from url, before\n # mirrors are tried. It takes care of modifying the suffix of gnu\n # mirror path so that Spack will also look for the correct file in\n # the mirrors\n if (version < Version('6.4.0') and version != Version('5.5.0')) \\\n or version == Version('7.1.0'):\n self.gnu_mirror_path = self.gnu_mirror_path.replace('xz', 'bz2')\n return super(Gcc, self).url_for_version(version)\n\n def patch(self):\n spec = self.spec\n prefix = self.spec.prefix\n\n # Fix a standard header file for OS X Yosemite that\n # is GCC incompatible by replacing non-GCC compliant macros\n if 'yosemite' in spec.architecture:\n if os.path.isfile('/usr/include/dispatch/object.h'):\n new_dispatch_dir = join_path(prefix, 'include', 'dispatch')\n mkdirp(new_dispatch_dir)\n new_header = join_path(new_dispatch_dir, 'object.h')\n install('/usr/include/dispatch/object.h', new_header)\n filter_file(r'typedef void \\(\\^dispatch_block_t\\)\\(void\\)',\n 'typedef void* dispatch_block_t',\n new_header)\n\n # Use installed libz\n if self.version >= Version('6'):\n filter_file('@zlibdir@',\n '-L{0}'.format(spec['zlib'].prefix.lib),\n 'gcc/Makefile.in')\n filter_file('@zlibinc@',\n '-I{0}'.format(spec['zlib'].prefix.include),\n 'gcc/Makefile.in')\n\n # https://gcc.gnu.org/install/configure.html\n def configure_args(self):\n spec = self.spec\n\n # Generic options to compile GCC\n options = [\n # Distributor options\n '--with-pkgversion=Spack GCC',\n '--with-bugurl=https://github.com/spack/spack/issues',\n # Xcode 10 dropped 32-bit support\n '--disable-multilib',\n '--enable-languages={0}'.format(\n ','.join(spec.variants['languages'].value)),\n # Drop gettext dependency\n '--disable-nls'\n ]\n\n # Use installed libz\n if self.version >= Version('6'):\n options.append('--with-system-zlib')\n\n if 'zstd' in spec:\n options.append('--with-zstd={0}'.format(spec['zstd'].prefix))\n\n # Enabling language \"jit\" requires --enable-host-shared.\n if 'languages=jit' in spec:\n options.append('--enable-host-shared')\n\n # Binutils\n if spec.satisfies('+binutils'):\n binutils = spec['binutils'].prefix.bin\n options.extend([\n '--with-gnu-ld',\n '--with-ld=' + binutils.ld,\n '--with-gnu-as',\n '--with-as=' + binutils.join('as'),\n ])\n\n # enable_bootstrap\n if spec.satisfies('+bootstrap'):\n options.extend([\n '--enable-bootstrap',\n ])\n\n # Configure include and lib directories explicitly for these\n # dependencies since the short GCC option assumes that libraries\n # are installed in \"/lib\" which might not be true on all OS\n # (see #10842)\n #\n # More info at: https://gcc.gnu.org/install/configure.html\n for dep_str in ('mpfr', 'gmp', 'mpc', 'isl'):\n if dep_str not in spec:\n continue\n\n dep_spec = spec[dep_str]\n include_dir = dep_spec.headers.directories[0]\n lib_dir = dep_spec.libs.directories[0]\n options.extend([\n '--with-{0}-include={1}'.format(dep_str, include_dir),\n '--with-{0}-lib={1}'.format(dep_str, lib_dir)\n ])\n\n # nvptx-none offloading for host compiler\n if spec.satisfies('+nvptx'):\n options.extend(['--enable-offload-targets=nvptx-none',\n '--with-cuda-driver-include={0}'.format(\n spec['cuda'].prefix.include),\n '--with-cuda-driver-lib={0}'.format(\n spec['cuda'].libs.directories[0]),\n '--disable-bootstrap',\n '--disable-multilib'])\n\n if sys.platform == 'darwin':\n options.extend([\n '--with-native-system-header-dir=/usr/include',\n '--with-sysroot={0}'.format(macos_sdk_path()),\n '--with-libiconv-prefix={0}'.format(spec['iconv'].prefix)\n ])\n\n # enable appropriate bootstrapping flags\n stage1_ldflags = str(self.rpath_args)\n boot_ldflags = stage1_ldflags + ' -static-libstdc++ -static-libgcc'\n options.append('--with-stage1-ldflags=' + stage1_ldflags)\n options.append('--with-boot-ldflags=' + boot_ldflags)\n\n return options\n\n # run configure/make/make(install) for the nvptx-none target\n # before running the host compiler phases\n @run_before('configure')\n def nvptx_install(self):\n spec = self.spec\n prefix = self.prefix\n\n if not spec.satisfies('+nvptx'):\n return\n\n # config.guess returns the host triple, e.g. \"x86_64-pc-linux-gnu\"\n guess = Executable('./config.guess')\n targetguess = guess(output=str).rstrip('\\n')\n\n options = getattr(self, 'configure_flag_args', [])\n options += ['--prefix={0}'.format(prefix)]\n\n options += [\n '--with-cuda-driver-include={0}'.format(\n spec['cuda'].prefix.include),\n '--with-cuda-driver-lib={0}'.format(\n spec['cuda'].libs.directories[0]),\n ]\n\n with working_dir('nvptx-tools'):\n configure = Executable(\"./configure\")\n configure(*options)\n make()\n make('install')\n\n pattern = join_path(self.stage.source_path, 'newlibsource', '*')\n files = glob.glob(pattern)\n\n if files:\n symlink(join_path(files[0], 'newlib'), 'newlib')\n\n # self.build_directory = 'spack-build-nvptx'\n with working_dir('spack-build-nvptx', create=True):\n\n options = ['--prefix={0}'.format(prefix),\n '--enable-languages={0}'.format(\n ','.join(spec.variants['languages'].value)),\n '--with-mpfr={0}'.format(spec['mpfr'].prefix),\n '--with-gmp={0}'.format(spec['gmp'].prefix),\n '--target=nvptx-none',\n '--with-build-time-tools={0}'.format(\n join_path(prefix,\n 'nvptx-none', 'bin')),\n '--enable-as-accelerator-for={0}'.format(\n targetguess),\n '--disable-sjlj-exceptions',\n '--enable-newlib-io-long-long',\n ]\n\n configure = Executable(\"../configure\")\n configure(*options)\n make()\n make('install')\n\n @property\n def install_targets(self):\n if '+strip' in self.spec:\n return ['install-strip']\n return ['install']\n\n @property\n def spec_dir(self):\n # e.g. lib/gcc/x86_64-unknown-linux-gnu/4.9.2\n spec_dir = glob.glob('{0}/gcc/*/*'.format(self.prefix.lib))\n return spec_dir[0] if spec_dir else None\n\n @run_after('install')\n def write_rpath_specs(self):\n \"\"\"Generate a spec file so the linker adds a rpath to the libs\n the compiler used to build the executable.\"\"\"\n if not self.spec_dir:\n tty.warn('Could not install specs for {0}.'.format(\n self.spec.format('{name}{@version}')))\n return\n\n gcc = self.spec['gcc'].command\n lines = gcc('-dumpspecs', output=str).strip().split('\\n')\n specs_file = join_path(self.spec_dir, 'specs')\n with open(specs_file, 'w') as out:\n for line in lines:\n out.write(line + '\\n')\n if line.startswith('*link:'):\n out.write('-rpath {0}:{1} '.format(\n self.prefix.lib, self.prefix.lib64))\n set_install_permissions(specs_file)\n\n def setup_run_environment(self, env):\n # Search prefix directory for possibly modified compiler names\n from spack.compilers.gcc import Gcc as Compiler\n\n # Get the contents of the installed binary directory\n bin_path = self.spec.prefix.bin\n\n if not os.path.isdir(bin_path):\n return\n\n bin_contents = os.listdir(bin_path)\n\n # Find the first non-symlink compiler binary present for each language\n for lang in ['cc', 'cxx', 'fc', 'f77']:\n for filename, regexp in itertools.product(\n bin_contents,\n Compiler.search_regexps(lang)\n ):\n if not regexp.match(filename):\n continue\n\n abspath = os.path.join(bin_path, filename)\n if os.path.islink(abspath):\n continue\n\n # Set the proper environment variable\n env.set(lang.upper(), abspath)\n # Stop searching filename/regex combos for this language\n break\n", "path": "var/spack/repos/builtin/packages/gcc/package.py" } ]
[ { "content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\nimport glob\nimport itertools\nimport os\nimport re\nimport sys\n\nimport llnl.util.tty as tty\nimport spack.architecture\nimport spack.util.executable\n\nfrom spack.operating_systems.mac_os import macos_version, macos_sdk_path\n\n\nclass Gcc(AutotoolsPackage, GNUMirrorPackage):\n \"\"\"The GNU Compiler Collection includes front ends for C, C++, Objective-C,\n Fortran, Ada, and Go, as well as libraries for these languages.\"\"\"\n\n homepage = 'https://gcc.gnu.org'\n gnu_mirror_path = 'gcc/gcc-9.2.0/gcc-9.2.0.tar.xz'\n git = 'git://gcc.gnu.org/git/gcc.git'\n list_url = 'http://ftp.gnu.org/gnu/gcc/'\n list_depth = 1\n\n maintainers = ['michaelkuhn']\n\n version('master', branch='master')\n\n version('10.2.0', sha256='b8dd4368bb9c7f0b98188317ee0254dd8cc99d1e3a18d0ff146c855fe16c1d8c')\n version('10.1.0', sha256='b6898a23844b656f1b68691c5c012036c2e694ac4b53a8918d4712ad876e7ea2')\n\n version('9.3.0', sha256='71e197867611f6054aa1119b13a0c0abac12834765fe2d81f35ac57f84f742d1')\n version('9.2.0', sha256='ea6ef08f121239da5695f76c9b33637a118dcf63e24164422231917fa61fb206')\n version('9.1.0', sha256='79a66834e96a6050d8fe78db2c3b32fb285b230b855d0a66288235bc04b327a0')\n\n version('8.4.0', sha256='e30a6e52d10e1f27ed55104ad233c30bd1e99cfb5ff98ab022dc941edd1b2dd4')\n version('8.3.0', sha256='64baadfe6cc0f4947a84cb12d7f0dfaf45bb58b7e92461639596c21e02d97d2c')\n version('8.2.0', sha256='196c3c04ba2613f893283977e6011b2345d1cd1af9abeac58e916b1aab3e0080')\n version('8.1.0', sha256='1d1866f992626e61349a1ccd0b8d5253816222cdc13390dcfaa74b093aa2b153')\n\n version('7.5.0', sha256='b81946e7f01f90528a1f7352ab08cc602b9ccc05d4e44da4bd501c5a189ee661')\n version('7.4.0', sha256='eddde28d04f334aec1604456e536416549e9b1aa137fc69204e65eb0c009fe51')\n version('7.3.0', sha256='832ca6ae04636adbb430e865a1451adf6979ab44ca1c8374f61fba65645ce15c')\n version('7.2.0', sha256='1cf7adf8ff4b5aa49041c8734bbcf1ad18cc4c94d0029aae0f4e48841088479a')\n version('7.1.0', sha256='8a8136c235f64c6fef69cac0d73a46a1a09bb250776a050aec8f9fc880bebc17')\n\n version('6.5.0', sha256='7ef1796ce497e89479183702635b14bb7a46b53249209a5e0f999bebf4740945')\n version('6.4.0', sha256='850bf21eafdfe5cd5f6827148184c08c4a0852a37ccf36ce69855334d2c914d4')\n version('6.3.0', sha256='f06ae7f3f790fbf0f018f6d40e844451e6bc3b7bc96e128e63b09825c1f8b29f')\n version('6.2.0', sha256='9944589fc722d3e66308c0ce5257788ebd7872982a718aa2516123940671b7c5')\n version('6.1.0', sha256='09c4c85cabebb971b1de732a0219609f93fc0af5f86f6e437fd8d7f832f1a351')\n\n version('5.5.0', sha256='530cea139d82fe542b358961130c69cfde8b3d14556370b65823d2f91f0ced87')\n version('5.4.0', sha256='608df76dec2d34de6558249d8af4cbee21eceddbcb580d666f7a5a583ca3303a')\n version('5.3.0', sha256='b84f5592e9218b73dbae612b5253035a7b34a9a1f7688d2e1bfaaf7267d5c4db')\n version('5.2.0', sha256='5f835b04b5f7dd4f4d2dc96190ec1621b8d89f2dc6f638f9f8bc1b1014ba8cad')\n version('5.1.0', sha256='b7dafdf89cbb0e20333dbf5b5349319ae06e3d1a30bf3515b5488f7e89dca5ad')\n\n version('4.9.4', sha256='6c11d292cd01b294f9f84c9a59c230d80e9e4a47e5c6355f046bb36d4f358092')\n version('4.9.3', sha256='2332b2a5a321b57508b9031354a8503af6fdfb868b8c1748d33028d100a8b67e')\n version('4.9.2', sha256='2020c98295856aa13fda0f2f3a4794490757fc24bcca918d52cc8b4917b972dd')\n version('4.9.1', sha256='d334781a124ada6f38e63b545e2a3b8c2183049515a1abab6d513f109f1d717e')\n version('4.8.5', sha256='22fb1e7e0f68a63cee631d85b20461d1ea6bda162f03096350e38c8d427ecf23')\n version('4.8.4', sha256='4a80aa23798b8e9b5793494b8c976b39b8d9aa2e53cd5ed5534aff662a7f8695')\n version('4.7.4', sha256='92e61c6dc3a0a449e62d72a38185fda550168a86702dea07125ebd3ec3996282')\n version('4.6.4', sha256='35af16afa0b67af9b8eb15cafb76d2bc5f568540552522f5dc2c88dd45d977e8')\n version('4.5.4', sha256='eef3f0456db8c3d992cbb51d5d32558190bc14f3bc19383dd93acc27acc6befc')\n\n # We specifically do not add 'all' variant here because:\n # (i) Ada, Go, Jit, and Objective-C++ are not default languages.\n # In that respect, the name 'all' is rather misleading.\n # (ii) Languages other than c,c++,fortran are prone to configure bug in GCC\n # For example, 'java' appears to ignore custom location of zlib\n # (iii) meaning of 'all' changes with GCC version, i.e. 'java' is not part\n # of gcc7. Correctly specifying conflicts() and depends_on() in such a\n # case is a PITA.\n variant('languages',\n default='c,c++,fortran',\n values=('ada', 'brig', 'c', 'c++', 'fortran',\n 'go', 'java', 'jit', 'lto', 'objc', 'obj-c++'),\n multi=True,\n description='Compilers and runtime libraries to build')\n variant('binutils',\n default=False,\n description='Build via binutils')\n variant('piclibs',\n default=False,\n description='Build PIC versions of libgfortran.a and libstdc++.a')\n variant('strip',\n default=False,\n description='Strip executables to reduce installation size')\n variant('nvptx',\n default=False,\n description='Target nvptx offloading to NVIDIA GPUs')\n variant('bootstrap',\n default=False,\n description='add --enable-bootstrap flag for stage3 build')\n\n depends_on('flex', type='build', when='@master')\n\n # https://gcc.gnu.org/install/prerequisites.html\n depends_on('[email protected]:')\n # GCC 7.3 does not compile with newer releases on some platforms, see\n # https://github.com/spack/spack/issues/6902#issuecomment-433030376\n depends_on('[email protected]:3.1.6', when='@:9.9')\n depends_on('[email protected]:', when='@10:')\n depends_on('[email protected]:', when='@4.5:')\n # Already released GCC versions do not support any newer version of ISL\n # GCC 5.4 https://github.com/spack/spack/issues/6902#issuecomment-433072097\n # GCC 7.3 https://github.com/spack/spack/issues/6902#issuecomment-433030376\n # GCC 9+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86724\n depends_on('[email protected]', when='@5.0:5.2')\n depends_on('[email protected]', when='@5.3:5.9')\n depends_on('[email protected]:0.18', when='@6:8.9')\n depends_on('[email protected]:0.20', when='@9:9.9')\n depends_on('[email protected]:', when='@10:')\n depends_on('zlib', when='@6:')\n depends_on('zstd', when='@10:')\n depends_on('iconv', when='platform=darwin')\n depends_on('gnat', when='languages=ada')\n depends_on('binutils~libiberty', when='+binutils', type=('build', 'link', 'run'))\n depends_on('zip', type='build', when='languages=java')\n depends_on('cuda', when='+nvptx')\n\n # The server is sometimes a bit slow to respond\n timeout = {'timeout': 60}\n\n resource(name='newlib',\n url='ftp://sourceware.org/pub/newlib/newlib-3.0.0.20180831.tar.gz',\n sha256='3ad3664f227357df15ff34e954bfd9f501009a647667cd307bf0658aefd6eb5b',\n destination='newlibsource',\n when='+nvptx',\n fetch_options=timeout)\n\n # nvptx-tools does not seem to work as a dependency,\n # but does fine when the source is inside the gcc build directory\n # nvptx-tools doesn't have any releases, so grabbing the last commit\n resource(name='nvptx-tools',\n git='https://github.com/MentorEmbedded/nvptx-tools',\n commit='5f6f343a302d620b0868edab376c00b15741e39e',\n when='+nvptx')\n\n # TODO: integrate these libraries.\n # depends_on('ppl')\n # depends_on('cloog')\n\n # https://gcc.gnu.org/install/test.html\n depends_on('[email protected]', type='test')\n depends_on('expect', type='test')\n depends_on('tcl', type='test')\n depends_on('[email protected]:', type='test')\n depends_on('[email protected]:', type='test')\n\n # See https://golang.org/doc/install/gccgo#Releases\n provides('golang', when='languages=go @4.6:')\n provides('golang@:1', when='languages=go @4.7.1:')\n provides('golang@:1.1', when='languages=go @4.8:')\n provides('golang@:1.1.2', when='languages=go @4.8.2:')\n provides('golang@:1.2', when='languages=go @4.9:')\n provides('golang@:1.4', when='languages=go @5:')\n provides('golang@:1.6.1', when='languages=go @6:')\n provides('golang@:1.8', when='languages=go @7:')\n\n # For a list of valid languages for a specific release,\n # run the following command in the GCC source directory:\n # $ grep ^language= gcc/*/config-lang.in\n # See https://gcc.gnu.org/install/configure.html\n\n # Support for processing BRIG 1.0 files was added in GCC 7\n # BRIG is a binary format for HSAIL:\n # (Heterogeneous System Architecture Intermediate Language).\n # See https://gcc.gnu.org/gcc-7/changes.html\n conflicts('languages=brig', when='@:6')\n\n # BRIG does not seem to be supported on macOS\n conflicts('languages=brig', when='platform=darwin')\n\n # GCC 4.8 added a 'c' language. I'm sure C was always built,\n # but this is the first version that accepts 'c' as a valid language.\n conflicts('languages=c', when='@:4.7')\n\n # GCC 4.6 added support for the Go programming language.\n # See https://gcc.gnu.org/gcc-4.6/changes.html\n conflicts('languages=go', when='@:4.5')\n\n # Go is not supported on macOS\n conflicts('languages=go', when='platform=darwin')\n\n # The GCC Java frontend and associated libjava runtime library\n # have been removed from GCC as of GCC 7.\n # See https://gcc.gnu.org/gcc-7/changes.html\n conflicts('languages=java', when='@7:')\n\n # GCC 5 added the ability to build GCC as a Just-In-Time compiler.\n # See https://gcc.gnu.org/gcc-5/changes.html\n conflicts('languages=jit', when='@:4')\n\n # NVPTX offloading supported in 7 and later by limited languages\n conflicts('+nvptx', when='@:6', msg='NVPTX only supported in gcc 7 and above')\n conflicts('languages=ada', when='+nvptx')\n conflicts('languages=brig', when='+nvptx')\n conflicts('languages=go', when='+nvptx')\n conflicts('languages=java', when='+nvptx')\n conflicts('languages=jit', when='+nvptx')\n conflicts('languages=objc', when='+nvptx')\n conflicts('languages=obj-c++', when='+nvptx')\n # NVPTX build disables bootstrap\n conflicts('+binutils', when='+nvptx')\n\n # Binutils can't build ld on macOS\n conflicts('+binutils', when='platform=darwin')\n\n # Newer binutils than RHEL's is required to run `as` on some instructions\n # generated by new GCC (see https://github.com/spack/spack/issues/12235)\n conflicts('~binutils', when='@7: os=rhel6',\n msg='New GCC cannot use system assembler on RHEL6')\n\n if sys.platform == 'darwin':\n # Fix parallel build on APFS filesystem\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81797\n if macos_version() >= Version('10.13'):\n patch('darwin/apfs.patch', when='@5.5.0,6.1:6.4,7.1:7.3')\n # from homebrew via macports\n # https://trac.macports.org/ticket/56502#no1\n # see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83531\n patch('darwin/headers-10.13-fix.patch', when='@5.5.0')\n if macos_version() >= Version('10.14'):\n # Fix system headers for Mojave SDK:\n # https://github.com/Homebrew/homebrew-core/pull/39041\n patch('https://raw.githubusercontent.com/Homebrew/formula-patches/master/gcc/8.3.0-xcode-bug-_Atomic-fix.patch',\n sha256='33ee92bf678586357ee8ab9d2faddf807e671ad37b97afdd102d5d153d03ca84',\n when='@6:8')\n if macos_version() >= Version('10.15'):\n # Fix system headers for Catalina SDK\n # (otherwise __OSX_AVAILABLE_STARTING ends up undefined)\n patch('https://raw.githubusercontent.com/Homebrew/formula-patches/b8b8e65e/gcc/9.2.0-catalina.patch',\n sha256='0b8d14a7f3c6a2f0d2498526e86e088926671b5da50a554ffa6b7f73ac4f132b', when='@9.2.0')\n # Use -headerpad_max_install_names in the build,\n # otherwise updated load commands won't fit in the Mach-O header.\n # This is needed because `gcc` avoids the superenv shim.\n patch('darwin/gcc-7.1.0-headerpad.patch', when='@5:')\n patch('darwin/gcc-6.1.0-jit.patch', when='@5:7')\n patch('darwin/gcc-4.9.patch1', when='@4.9.0:4.9.3')\n patch('darwin/gcc-4.9.patch2', when='@4.9.0:4.9.3')\n\n patch('piclibs.patch', when='+piclibs')\n patch('gcc-backport.patch', when='@4.7:4.9.2,5:5.3')\n\n # Backport libsanitizer patch for glibc >= 2.31 and 5.3.0 <= gcc <= 9.2.0\n # https://bugs.gentoo.org/708346\n patch('glibc-2.31-libsanitizer-1.patch', when='@7.1.0:7.5.0,8.1.0:8.3.0,9.0.0:9.2.0')\n patch('glibc-2.31-libsanitizer-1-gcc-6.patch', when='@5.3.0:5.5.0,6.1.0:6.5.0')\n patch('glibc-2.31-libsanitizer-2.patch', when='@8.1.0:8.3.0,9.0.0:9.2.0')\n patch('glibc-2.31-libsanitizer-2-gcc-6.patch', when='@5.3.0:5.5.0,6.1.0:6.5.0')\n patch('glibc-2.31-libsanitizer-2-gcc-7.patch', when='@7.1.0:7.5.0')\n # Older versions do not compile with newer versions of glibc\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81712\n patch('ucontext_t.patch', when='@4.9,5.1:5.4,6.1:6.4,7.1')\n patch('ucontext_t-java.patch', when='@4.9,5.1:5.4,6.1:6.4 languages=java')\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81066\n patch('stack_t-4.9.patch', when='@4.9')\n patch('stack_t.patch', when='@5.1:5.4,6.1:6.4,7.1')\n # https://bugs.busybox.net/show_bug.cgi?id=10061\n patch('signal.patch', when='@4.9,5.1:5.4')\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85835\n patch('sys_ustat.h.patch', when='@5.0:6.4,7.0:7.3,8.1')\n patch('sys_ustat-4.9.patch', when='@4.9')\n\n # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95005\n patch('zstd.patch', when='@10.0:10.2')\n\n build_directory = 'spack-build'\n\n @property\n def executables(self):\n names = [r'gcc', r'[^\\w]?g\\+\\+', r'gfortran']\n suffixes = [r'', r'-mp-\\d+\\.\\d', r'-\\d+\\.\\d', r'-\\d+', r'\\d\\d']\n return [r''.join(x) for x in itertools.product(names, suffixes)]\n\n @classmethod\n def filter_detected_exes(cls, prefix, exes_in_prefix):\n result = []\n for exe in exes_in_prefix:\n # On systems like Ubuntu we might get multiple executables\n # with the string \"gcc\" in them. See:\n # https://helpmanual.io/packages/apt/gcc/\n basename = os.path.basename(exe)\n substring_to_be_filtered = [\n 'c99-gcc',\n 'c89-gcc',\n '-nm',\n '-ar',\n 'ranlib',\n 'clang' # clang++ matches g++ -> clan[g++]\n ]\n if any(x in basename for x in substring_to_be_filtered):\n continue\n # Filter out links in favor of real executables on\n # all systems but Cray\n host_platform = str(spack.architecture.platform())\n if os.path.islink(exe) and host_platform != 'cray':\n continue\n\n result.append(exe)\n\n return result\n\n @classmethod\n def determine_version(cls, exe):\n try:\n output = spack.compiler.get_compiler_version_output(\n exe, '--version'\n )\n except Exception:\n output = ''\n # Apple's gcc is actually apple clang, so skip it.\n # Users can add it manually to compilers.yaml at their own risk.\n if 'Apple' in output:\n return None\n\n version_regex = re.compile(r'([\\d\\.]+)')\n for vargs in ('-dumpfullversion', '-dumpversion'):\n try:\n output = spack.compiler.get_compiler_version_output(exe, vargs)\n match = version_regex.search(output)\n if match:\n return match.group(1)\n except spack.util.executable.ProcessError:\n pass\n except Exception as e:\n tty.debug(e)\n\n return None\n\n @classmethod\n def determine_variants(cls, exes, version_str):\n languages, compilers = set(), {}\n for exe in exes:\n basename = os.path.basename(exe)\n if 'g++' in basename:\n languages.add('c++')\n compilers['cxx'] = exe\n elif 'gfortran' in basename:\n languages.add('fortran')\n compilers['fortran'] = exe\n elif 'gcc' in basename:\n languages.add('c')\n compilers['c'] = exe\n variant_str = 'languages={0}'.format(','.join(languages))\n return variant_str, {'compilers': compilers}\n\n @classmethod\n def validate_detected_spec(cls, spec, extra_attributes):\n # For GCC 'compilers' is a mandatory attribute\n msg = ('the extra attribute \"compilers\" must be set for '\n 'the detected spec \"{0}\"'.format(spec))\n assert 'compilers' in extra_attributes, msg\n\n compilers = extra_attributes['compilers']\n for constraint, key in {\n 'languages=c': 'c',\n 'languages=c++': 'cxx',\n 'languages=fortran': 'fortran'\n }.items():\n if spec.satisfies(constraint, strict=True):\n msg = '{0} not in {1}'\n assert key in compilers, msg.format(key, spec)\n\n @property\n def cc(self):\n msg = \"cannot retrieve C compiler [spec is not concrete]\"\n assert self.spec.concrete, msg\n if self.spec.external:\n return self.spec.extra_attributes['compilers'].get('c', None)\n result = None\n if 'languages=c' in self.spec:\n result = str(self.spec.prefix.bin.gcc)\n return result\n\n @property\n def cxx(self):\n msg = \"cannot retrieve C++ compiler [spec is not concrete]\"\n assert self.spec.concrete, msg\n if self.spec.external:\n return self.spec.extra_attributes['compilers'].get('cxx', None)\n result = None\n if 'languages=c++' in self.spec:\n result = os.path.join(self.spec.prefix.bin, 'g++')\n return result\n\n @property\n def fortran(self):\n msg = \"cannot retrieve Fortran compiler [spec is not concrete]\"\n assert self.spec.concrete, msg\n if self.spec.external:\n return self.spec.extra_attributes['compilers'].get('fortran', None)\n result = None\n if 'languages=fortran' in self.spec:\n result = str(self.spec.prefix.bin.gfortran)\n return result\n\n def url_for_version(self, version):\n # This function will be called when trying to fetch from url, before\n # mirrors are tried. It takes care of modifying the suffix of gnu\n # mirror path so that Spack will also look for the correct file in\n # the mirrors\n if (version < Version('6.4.0') and version != Version('5.5.0')) \\\n or version == Version('7.1.0'):\n self.gnu_mirror_path = self.gnu_mirror_path.replace('xz', 'bz2')\n return super(Gcc, self).url_for_version(version)\n\n def patch(self):\n spec = self.spec\n prefix = self.spec.prefix\n\n # Fix a standard header file for OS X Yosemite that\n # is GCC incompatible by replacing non-GCC compliant macros\n if 'yosemite' in spec.architecture:\n if os.path.isfile('/usr/include/dispatch/object.h'):\n new_dispatch_dir = join_path(prefix, 'include', 'dispatch')\n mkdirp(new_dispatch_dir)\n new_header = join_path(new_dispatch_dir, 'object.h')\n install('/usr/include/dispatch/object.h', new_header)\n filter_file(r'typedef void \\(\\^dispatch_block_t\\)\\(void\\)',\n 'typedef void* dispatch_block_t',\n new_header)\n\n # Use installed libz\n if self.version >= Version('6'):\n filter_file('@zlibdir@',\n '-L{0}'.format(spec['zlib'].prefix.lib),\n 'gcc/Makefile.in')\n filter_file('@zlibinc@',\n '-I{0}'.format(spec['zlib'].prefix.include),\n 'gcc/Makefile.in')\n\n # https://gcc.gnu.org/install/configure.html\n def configure_args(self):\n spec = self.spec\n\n # Generic options to compile GCC\n options = [\n # Distributor options\n '--with-pkgversion=Spack GCC',\n '--with-bugurl=https://github.com/spack/spack/issues',\n # Xcode 10 dropped 32-bit support\n '--disable-multilib',\n '--enable-languages={0}'.format(\n ','.join(spec.variants['languages'].value)),\n # Drop gettext dependency\n '--disable-nls'\n ]\n\n # Use installed libz\n if self.version >= Version('6'):\n options.append('--with-system-zlib')\n\n if 'zstd' in spec:\n options.append('--with-zstd={0}'.format(spec['zstd'].prefix))\n\n # Enabling language \"jit\" requires --enable-host-shared.\n if 'languages=jit' in spec:\n options.append('--enable-host-shared')\n\n # Binutils\n if spec.satisfies('+binutils'):\n binutils = spec['binutils'].prefix.bin\n options.extend([\n '--with-gnu-ld',\n '--with-ld=' + binutils.ld,\n '--with-gnu-as',\n '--with-as=' + binutils.join('as'),\n ])\n\n # enable_bootstrap\n if spec.satisfies('+bootstrap'):\n options.extend([\n '--enable-bootstrap',\n ])\n\n # Configure include and lib directories explicitly for these\n # dependencies since the short GCC option assumes that libraries\n # are installed in \"/lib\" which might not be true on all OS\n # (see #10842)\n #\n # More info at: https://gcc.gnu.org/install/configure.html\n for dep_str in ('mpfr', 'gmp', 'mpc', 'isl'):\n if dep_str not in spec:\n continue\n\n dep_spec = spec[dep_str]\n include_dir = dep_spec.headers.directories[0]\n lib_dir = dep_spec.libs.directories[0]\n options.extend([\n '--with-{0}-include={1}'.format(dep_str, include_dir),\n '--with-{0}-lib={1}'.format(dep_str, lib_dir)\n ])\n\n # nvptx-none offloading for host compiler\n if spec.satisfies('+nvptx'):\n options.extend(['--enable-offload-targets=nvptx-none',\n '--with-cuda-driver-include={0}'.format(\n spec['cuda'].prefix.include),\n '--with-cuda-driver-lib={0}'.format(\n spec['cuda'].libs.directories[0]),\n '--disable-bootstrap',\n '--disable-multilib'])\n\n if sys.platform == 'darwin':\n options.extend([\n '--with-native-system-header-dir=/usr/include',\n '--with-sysroot={0}'.format(macos_sdk_path()),\n '--with-libiconv-prefix={0}'.format(spec['iconv'].prefix)\n ])\n\n # enable appropriate bootstrapping flags\n stage1_ldflags = str(self.rpath_args)\n boot_ldflags = stage1_ldflags + ' -static-libstdc++ -static-libgcc'\n options.append('--with-stage1-ldflags=' + stage1_ldflags)\n options.append('--with-boot-ldflags=' + boot_ldflags)\n\n return options\n\n # run configure/make/make(install) for the nvptx-none target\n # before running the host compiler phases\n @run_before('configure')\n def nvptx_install(self):\n spec = self.spec\n prefix = self.prefix\n\n if not spec.satisfies('+nvptx'):\n return\n\n # config.guess returns the host triple, e.g. \"x86_64-pc-linux-gnu\"\n guess = Executable('./config.guess')\n targetguess = guess(output=str).rstrip('\\n')\n\n options = getattr(self, 'configure_flag_args', [])\n options += ['--prefix={0}'.format(prefix)]\n\n options += [\n '--with-cuda-driver-include={0}'.format(\n spec['cuda'].prefix.include),\n '--with-cuda-driver-lib={0}'.format(\n spec['cuda'].libs.directories[0]),\n ]\n\n with working_dir('nvptx-tools'):\n configure = Executable(\"./configure\")\n configure(*options)\n make()\n make('install')\n\n pattern = join_path(self.stage.source_path, 'newlibsource', '*')\n files = glob.glob(pattern)\n\n if files:\n symlink(join_path(files[0], 'newlib'), 'newlib')\n\n # self.build_directory = 'spack-build-nvptx'\n with working_dir('spack-build-nvptx', create=True):\n\n options = ['--prefix={0}'.format(prefix),\n '--enable-languages={0}'.format(\n ','.join(spec.variants['languages'].value)),\n '--with-mpfr={0}'.format(spec['mpfr'].prefix),\n '--with-gmp={0}'.format(spec['gmp'].prefix),\n '--target=nvptx-none',\n '--with-build-time-tools={0}'.format(\n join_path(prefix,\n 'nvptx-none', 'bin')),\n '--enable-as-accelerator-for={0}'.format(\n targetguess),\n '--disable-sjlj-exceptions',\n '--enable-newlib-io-long-long',\n ]\n\n configure = Executable(\"../configure\")\n configure(*options)\n make()\n make('install')\n\n @property\n def install_targets(self):\n if '+strip' in self.spec:\n return ['install-strip']\n return ['install']\n\n @property\n def spec_dir(self):\n # e.g. lib/gcc/x86_64-unknown-linux-gnu/4.9.2\n spec_dir = glob.glob('{0}/gcc/*/*'.format(self.prefix.lib))\n return spec_dir[0] if spec_dir else None\n\n @run_after('install')\n def write_rpath_specs(self):\n \"\"\"Generate a spec file so the linker adds a rpath to the libs\n the compiler used to build the executable.\"\"\"\n if not self.spec_dir:\n tty.warn('Could not install specs for {0}.'.format(\n self.spec.format('{name}{@version}')))\n return\n\n gcc = self.spec['gcc'].command\n lines = gcc('-dumpspecs', output=str).strip().split('\\n')\n specs_file = join_path(self.spec_dir, 'specs')\n with open(specs_file, 'w') as out:\n for line in lines:\n out.write(line + '\\n')\n if line.startswith('*link:'):\n out.write('-rpath {0}:{1} '.format(\n self.prefix.lib, self.prefix.lib64))\n set_install_permissions(specs_file)\n\n def setup_run_environment(self, env):\n # Search prefix directory for possibly modified compiler names\n from spack.compilers.gcc import Gcc as Compiler\n\n # Get the contents of the installed binary directory\n bin_path = self.spec.prefix.bin\n\n if not os.path.isdir(bin_path):\n return\n\n bin_contents = os.listdir(bin_path)\n\n # Find the first non-symlink compiler binary present for each language\n for lang in ['cc', 'cxx', 'fc', 'f77']:\n for filename, regexp in itertools.product(\n bin_contents,\n Compiler.search_regexps(lang)\n ):\n if not regexp.match(filename):\n continue\n\n abspath = os.path.join(bin_path, filename)\n if os.path.islink(abspath):\n continue\n\n # Set the proper environment variable\n env.set(lang.upper(), abspath)\n # Stop searching filename/regex combos for this language\n break\n", "path": "var/spack/repos/builtin/packages/gcc/package.py" } ]
diff --git a/var/spack/repos/builtin/packages/gcc/package.py b/var/spack/repos/builtin/packages/gcc/package.py index 991f0fadb01bc1..6677961af68a01 100644 --- a/var/spack/repos/builtin/packages/gcc/package.py +++ b/var/spack/repos/builtin/packages/gcc/package.py @@ -270,7 +270,7 @@ class Gcc(AutotoolsPackage, GNUMirrorPackage): patch('sys_ustat-4.9.patch', when='@4.9') # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95005 - patch('zstd.patch', when='@10:') + patch('zstd.patch', when='@10.0:10.2') build_directory = 'spack-build'
psf__black-1892
s390x: test_python2/test_python2_unicode_literals can't assign to () INTERNAL ERROR During the build of 19.10b0 in Fedora, the following test failure occurs on s390x (Big Endian) architecture: ``` ====================================================================== FAIL: test_python2 (tests.test_black.BlackTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "/builddir/build/BUILD/black-19.10b0/black.py", line 3754, in assert_equivalent src_ast = parse_ast(src) File "/builddir/build/BUILD/black-19.10b0/black.py", line 3686, in parse_ast return ast27.parse(src) File "/usr/lib64/python3.8/site-packages/typed_ast/ast27.py", line 50, in parse return _ast27.parse(source, filename, mode) File "<unknown>", line 10 SyntaxError: can't assign to () During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib64/python3.8/unittest/mock.py", line 1342, in patched return func(*newargs, **newkeywargs) File "/builddir/build/BUILD/black-19.10b0/tests/test_black.py", line 543, in test_python2 black.assert_equivalent(source, actual) File "/builddir/build/BUILD/black-19.10b0/black.py", line 3756, in assert_equivalent raise AssertionError( AssertionError: cannot use --safe with this file; failed to parse source file. AST error message: can't assign to () (<unknown>, line 10) ====================================================================== FAIL: test_python2_unicode_literals (tests.test_black.BlackTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "/usr/lib64/python3.8/unittest/mock.py", line 1342, in patched return func(*newargs, **newkeywargs) File "/builddir/build/BUILD/black-19.10b0/tests/test_black.py", line 560, in test_python2_unicode_literals black.assert_equivalent(source, actual) File "/builddir/build/BUILD/black-19.10b0/black.py", line 3775, in assert_equivalent raise AssertionError( AssertionError: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful: --- src +++ dst @@ -1,4 +1,70 @@ Module( body= + ImportFrom( + level= + 0, # int + module= + '__future__', # str + names= + alias( + asname= + '_unicode_literals', # str + name= + 'unicode_literals', # str + ) # /alias + ) # /ImportFrom + ImportFrom( + level= + 0, # int + module= + '__future__', # str + names= + alias( + asname= + None, # NoneType + name= + 'absolute_import', # str + ) # /alias + ) # /ImportFrom + ImportFrom( + level= + 0, # int + module= + '__future__', # str + names= + alias( + asname= + 'lol', # str + name= + 'print_function', # str + ) # /alias + alias( + asname= + None, # NoneType + name= + 'with_function', # str + ) # /alias + ) # /ImportFrom + Expr( + value= + Constant( + value= + 'hello', # str + ) # /Constant + ) # /Expr + Expr( + value= + Constant( + value= + 'hello', # str + ) # /Constant + ) # /Expr + Expr( + value= + Constant( + value= + 'hello', # str + ) # /Constant + ) # /Expr type_ignores= ) # /Module ---------------------------------------------------------------------- Ran 119 tests in 18.012s FAILED (failures=2) ``` **To Reproduce**, run the test suite on s390x. Here is the build log with all the commands: [build.log](https://github.com/psf/black/files/3782557/build.log) Here is the root log with all the package versions: [root.log](https://github.com/psf/black/files/3782561/root.log) **Expected behavior** Test succeed an all architectures. **Environment:** - Version: 19.10b0 - OS and Python version: Linux, Fedora 32 on s390x, Python 3.8.0 **Does this bug also happen on master?** yes, on 6bedb5c58a7d8c25aa9509f8217bc24e9797e90d **Additional context** The problem does not happen on the same build system with armv7hl or ppc64le.
[ { "content": "# Copyright (C) 2020 Łukasz Langa\nfrom setuptools import setup\nimport sys\nimport os\n\nassert sys.version_info >= (3, 6, 0), \"black requires Python 3.6+\"\nfrom pathlib import Path # noqa E402\n\nCURRENT_DIR = Path(__file__).parent\nsys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta\n\n\ndef get_long_description() -> str:\n return (\n (CURRENT_DIR / \"README.md\").read_text(encoding=\"utf8\")\n + \"\\n\\n\"\n + (CURRENT_DIR / \"CHANGES.md\").read_text(encoding=\"utf8\")\n )\n\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"BLACK_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"src/black/__init__.py\",\n \"src/blib2to3/pytree.py\",\n \"src/blib2to3/pygram.py\",\n \"src/blib2to3/pgen2/parse.py\",\n \"src/blib2to3/pgen2/grammar.py\",\n \"src/blib2to3/pgen2/token.py\",\n \"src/blib2to3/pgen2/driver.py\",\n \"src/blib2to3/pgen2/pgen.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"black\",\n use_scm_version={\n \"write_to\": \"src/_black_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n description=\"The uncompromising code formatter.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n keywords=\"automation formatter yapf autopep8 pyfmt gofmt rustfmt\",\n author=\"Łukasz Langa\",\n author_email=\"[email protected]\",\n url=\"https://github.com/psf/black\",\n project_urls={\"Changelog\": \"https://github.com/psf/black/blob/master/CHANGES.md\"},\n license=\"MIT\",\n py_modules=[\"_black_version\"],\n ext_modules=ext_modules,\n packages=[\"blackd\", \"black\", \"blib2to3\", \"blib2to3.pgen2\", \"black_primer\"],\n package_dir={\"\": \"src\"},\n package_data={\"blib2to3\": [\"*.txt\"], \"black\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n zip_safe=False,\n install_requires=[\n \"click>=7.1.2\",\n \"appdirs\",\n \"toml>=0.10.1\",\n \"typed-ast>=1.4.0\",\n \"regex>=2020.1.8\",\n \"pathspec>=0.6, <1\",\n \"dataclasses>=0.6; python_version < '3.7'\",\n \"typing_extensions>=3.7.4\",\n \"mypy_extensions>=0.4.3\",\n ],\n extras_require={\n \"d\": [\"aiohttp>=3.3.2\", \"aiohttp-cors\"],\n \"colorama\": [\"colorama>=0.4.3\"],\n },\n test_suite=\"tests.test_black\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Quality Assurance\",\n ],\n entry_points={\n \"console_scripts\": [\n \"black=black:patched_main\",\n \"blackd=blackd:patched_main [d]\",\n \"black-primer=black_primer.cli:main\",\n ]\n },\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright (C) 2020 Łukasz Langa\nfrom setuptools import setup\nimport sys\nimport os\n\nassert sys.version_info >= (3, 6, 0), \"black requires Python 3.6+\"\nfrom pathlib import Path # noqa E402\n\nCURRENT_DIR = Path(__file__).parent\nsys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta\n\n\ndef get_long_description() -> str:\n return (\n (CURRENT_DIR / \"README.md\").read_text(encoding=\"utf8\")\n + \"\\n\\n\"\n + (CURRENT_DIR / \"CHANGES.md\").read_text(encoding=\"utf8\")\n )\n\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"BLACK_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"src/black/__init__.py\",\n \"src/blib2to3/pytree.py\",\n \"src/blib2to3/pygram.py\",\n \"src/blib2to3/pgen2/parse.py\",\n \"src/blib2to3/pgen2/grammar.py\",\n \"src/blib2to3/pgen2/token.py\",\n \"src/blib2to3/pgen2/driver.py\",\n \"src/blib2to3/pgen2/pgen.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"black\",\n use_scm_version={\n \"write_to\": \"src/_black_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n description=\"The uncompromising code formatter.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n keywords=\"automation formatter yapf autopep8 pyfmt gofmt rustfmt\",\n author=\"Łukasz Langa\",\n author_email=\"[email protected]\",\n url=\"https://github.com/psf/black\",\n project_urls={\"Changelog\": \"https://github.com/psf/black/blob/master/CHANGES.md\"},\n license=\"MIT\",\n py_modules=[\"_black_version\"],\n ext_modules=ext_modules,\n packages=[\"blackd\", \"black\", \"blib2to3\", \"blib2to3.pgen2\", \"black_primer\"],\n package_dir={\"\": \"src\"},\n package_data={\"blib2to3\": [\"*.txt\"], \"black\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n zip_safe=False,\n install_requires=[\n \"click>=7.1.2\",\n \"appdirs\",\n \"toml>=0.10.1\",\n \"typed-ast>=1.4.2\",\n \"regex>=2020.1.8\",\n \"pathspec>=0.6, <1\",\n \"dataclasses>=0.6; python_version < '3.7'\",\n \"typing_extensions>=3.7.4\",\n \"mypy_extensions>=0.4.3\",\n ],\n extras_require={\n \"d\": [\"aiohttp>=3.3.2\", \"aiohttp-cors\"],\n \"colorama\": [\"colorama>=0.4.3\"],\n },\n test_suite=\"tests.test_black\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Quality Assurance\",\n ],\n entry_points={\n \"console_scripts\": [\n \"black=black:patched_main\",\n \"blackd=blackd:patched_main [d]\",\n \"black-primer=black_primer.cli:main\",\n ]\n },\n)\n", "path": "setup.py" } ]
diff --git a/Pipfile b/Pipfile index ba596b3d738..9a4d5bd7c1b 100644 --- a/Pipfile +++ b/Pipfile @@ -28,7 +28,7 @@ mypy_extensions = ">=0.4.3" pathspec = ">=0.6" regex = ">=2020.1.8" toml = ">=0.10.1" -typed-ast = "==1.4.1" +typed-ast = "==1.4.2" typing_extensions = ">=3.7.4" black = {editable = true,extras = ["d"],path = "."} dataclasses = {"python_version <" = "3.7","version >" = "0.6"} diff --git a/Pipfile.lock b/Pipfile.lock index a5c38aa0777..dd78a3c3178 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "21836c0a63b6e3e1eacd0adec7dea61d2d5989e38225edd976ff144e499f0426" + "sha256": "3c4e23d0b6e49bac5ff2347dcb07bb4dd084d39b78c93a32359842dda401e7bf" }, "pipfile-spec": 6, "requires": {}, @@ -259,39 +259,39 @@ }, "typed-ast": { "hashes": [ - "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355", - "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919", - "sha256:0d8110d78a5736e16e26213114a38ca35cb15b6515d535413b090bd50951556d", - "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa", - "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652", - "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75", - "sha256:3742b32cf1c6ef124d57f95be609c473d7ec4c14d0090e5a5e05a15269fb4d0c", - "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01", - "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d", - "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1", - "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907", - "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c", - "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3", - "sha256:7e4c9d7658aaa1fc80018593abdf8598bf91325af6af5cce4ce7c73bc45ea53d", - "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b", - "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614", - "sha256:92c325624e304ebf0e025d1224b77dd4e6393f18aab8d829b5b7e04afe9b7a2c", - "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb", - "sha256:b52ccf7cfe4ce2a1064b18594381bccf4179c2ecf7f513134ec2f993dd4ab395", - "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b", - "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41", - "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6", - "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34", - "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe", - "sha256:d648b8e3bf2fe648745c8ffcee3db3ff903d0817a01a12dd6a6ea7a8f4889072", - "sha256:f208eb7aff048f6bea9586e61af041ddf7f9ade7caed625742af423f6bae3298", - "sha256:fac11badff8313e23717f3dada86a15389d0708275bddf766cca67a84ead3e91", - "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4", - "sha256:fcf135e17cc74dbfbc05894ebca928ffeb23d9790b3167a674921db19082401f", - "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7" + "sha256:07d49388d5bf7e863f7fa2f124b1b1d89d8aa0e2f7812faff0a5658c01c59aa1", + "sha256:14bf1522cdee369e8f5581238edac09150c765ec1cb33615855889cf33dcb92d", + "sha256:240296b27397e4e37874abb1df2a608a92df85cf3e2a04d0d4d61055c8305ba6", + "sha256:36d829b31ab67d6fcb30e185ec996e1f72b892255a745d3a82138c97d21ed1cd", + "sha256:37f48d46d733d57cc70fd5f30572d11ab8ed92da6e6b28e024e4a3edfb456e37", + "sha256:4c790331247081ea7c632a76d5b2a265e6d325ecd3179d06e9cf8d46d90dd151", + "sha256:5dcfc2e264bd8a1db8b11a892bd1647154ce03eeba94b461effe68790d8b8e07", + "sha256:7147e2a76c75f0f64c4319886e7639e490fee87c9d25cb1d4faef1d8cf83a440", + "sha256:7703620125e4fb79b64aa52427ec192822e9f45d37d4b6625ab37ef403e1df70", + "sha256:8368f83e93c7156ccd40e49a783a6a6850ca25b556c0fa0240ed0f659d2fe496", + "sha256:84aa6223d71012c68d577c83f4e7db50d11d6b1399a9c779046d75e24bed74ea", + "sha256:85f95aa97a35bdb2f2f7d10ec5bbdac0aeb9dafdaf88e17492da0504de2e6400", + "sha256:8db0e856712f79c45956da0c9a40ca4246abc3485ae0d7ecc86a20f5e4c09abc", + "sha256:9044ef2df88d7f33692ae3f18d3be63dec69c4fb1b5a4a9ac950f9b4ba571606", + "sha256:963c80b583b0661918718b095e02303d8078950b26cc00b5e5ea9ababe0de1fc", + "sha256:987f15737aba2ab5f3928c617ccf1ce412e2e321c77ab16ca5a293e7bbffd581", + "sha256:9ec45db0c766f196ae629e509f059ff05fc3148f9ffd28f3cfe75d4afb485412", + "sha256:9fc0b3cb5d1720e7141d103cf4819aea239f7d136acf9ee4a69b047b7986175a", + "sha256:a2c927c49f2029291fbabd673d51a2180038f8cd5a5b2f290f78c4516be48be2", + "sha256:a38878a223bdd37c9709d07cd357bb79f4c760b29210e14ad0fb395294583787", + "sha256:b4fcdcfa302538f70929eb7b392f536a237cbe2ed9cba88e3bf5027b39f5f77f", + "sha256:c0c74e5579af4b977c8b932f40a5464764b2f86681327410aa028a22d2f54937", + "sha256:c1c876fd795b36126f773db9cbb393f19808edd2637e00fd6caba0e25f2c7b64", + "sha256:c9aadc4924d4b5799112837b226160428524a9a45f830e0d0f184b19e4090487", + "sha256:cc7b98bf58167b7f2db91a4327da24fb93368838eb84a44c472283778fc2446b", + "sha256:cf54cfa843f297991b7388c281cb3855d911137223c6b6d2dd82a47ae5125a41", + "sha256:d003156bb6a59cda9050e983441b7fa2487f7800d76bdc065566b7d728b4581a", + "sha256:d175297e9533d8d37437abc14e8a83cbc68af93cc9c1c59c2c292ec59a0697a3", + "sha256:d746a437cdbca200622385305aedd9aef68e8a645e385cc483bdc5e488f07166", + "sha256:e683e409e5c45d5c9082dc1daf13f6374300806240719f95dc783d1fc942af10" ], "index": "pypi", - "version": "==1.4.1" + "version": "==1.4.2" }, "typing-extensions": { "hashes": [ @@ -499,43 +499,58 @@ }, "coverage": { "hashes": [ - "sha256:0203acd33d2298e19b57451ebb0bed0ab0c602e5cf5a818591b4918b1f97d516", - "sha256:0f313707cdecd5cd3e217fc68c78a960b616604b559e9ea60cc16795c4304259", - "sha256:1c6703094c81fa55b816f5ae542c6ffc625fec769f22b053adb42ad712d086c9", - "sha256:1d44bb3a652fed01f1f2c10d5477956116e9b391320c94d36c6bf13b088a1097", - "sha256:280baa8ec489c4f542f8940f9c4c2181f0306a8ee1a54eceba071a449fb870a0", - "sha256:29a6272fec10623fcbe158fdf9abc7a5fa032048ac1d8631f14b50fbfc10d17f", - "sha256:2b31f46bf7b31e6aa690d4c7a3d51bb262438c6dcb0d528adde446531d0d3bb7", - "sha256:2d43af2be93ffbad25dd959899b5b809618a496926146ce98ee0b23683f8c51c", - "sha256:381ead10b9b9af5f64646cd27107fb27b614ee7040bb1226f9c07ba96625cbb5", - "sha256:47a11bdbd8ada9b7ee628596f9d97fbd3851bd9999d398e9436bd67376dbece7", - "sha256:4d6a42744139a7fa5b46a264874a781e8694bb32f1d76d8137b68138686f1729", - "sha256:50691e744714856f03a86df3e2bff847c2acede4c191f9a1da38f088df342978", - "sha256:530cc8aaf11cc2ac7430f3614b04645662ef20c348dce4167c22d99bec3480e9", - "sha256:582ddfbe712025448206a5bc45855d16c2e491c2dd102ee9a2841418ac1c629f", - "sha256:63808c30b41f3bbf65e29f7280bf793c79f54fb807057de7e5238ffc7cc4d7b9", - "sha256:71b69bd716698fa62cd97137d6f2fdf49f534decb23a2c6fc80813e8b7be6822", - "sha256:7858847f2d84bf6e64c7f66498e851c54de8ea06a6f96a32a1d192d846734418", - "sha256:78e93cc3571fd928a39c0b26767c986188a4118edc67bc0695bc7a284da22e82", - "sha256:7f43286f13d91a34fadf61ae252a51a130223c52bfefb50310d5b2deb062cf0f", - "sha256:86e9f8cd4b0cdd57b4ae71a9c186717daa4c5a99f3238a8723f416256e0b064d", - "sha256:8f264ba2701b8c9f815b272ad568d555ef98dfe1576802ab3149c3629a9f2221", - "sha256:9342dd70a1e151684727c9c91ea003b2fb33523bf19385d4554f7897ca0141d4", - "sha256:9361de40701666b034c59ad9e317bae95c973b9ff92513dd0eced11c6adf2e21", - "sha256:9669179786254a2e7e57f0ecf224e978471491d660aaca833f845b72a2df3709", - "sha256:aac1ba0a253e17889550ddb1b60a2063f7474155465577caa2a3b131224cfd54", - "sha256:aef72eae10b5e3116bac6957de1df4d75909fc76d1499a53fb6387434b6bcd8d", - "sha256:bd3166bb3b111e76a4f8e2980fa1addf2920a4ca9b2b8ca36a3bc3dedc618270", - "sha256:c1b78fb9700fc961f53386ad2fd86d87091e06ede5d118b8a50dea285a071c24", - "sha256:c3888a051226e676e383de03bf49eb633cd39fc829516e5334e69b8d81aae751", - "sha256:c5f17ad25d2c1286436761b462e22b5020d83316f8e8fcb5deb2b3151f8f1d3a", - "sha256:c851b35fc078389bc16b915a0a7c1d5923e12e2c5aeec58c52f4aa8085ac8237", - "sha256:cb7df71de0af56000115eafd000b867d1261f786b5eebd88a0ca6360cccfaca7", - "sha256:cedb2f9e1f990918ea061f28a0f0077a07702e3819602d3507e2ff98c8d20636", - "sha256:e8caf961e1b1a945db76f1b5fa9c91498d15f545ac0ababbe575cfab185d3bd8" + "sha256:08b3ba72bd981531fd557f67beee376d6700fba183b167857038997ba30dd297", + "sha256:2757fa64e11ec12220968f65d086b7a29b6583d16e9a544c889b22ba98555ef1", + "sha256:3102bb2c206700a7d28181dbe04d66b30780cde1d1c02c5f3c165cf3d2489497", + "sha256:3498b27d8236057def41de3585f317abae235dd3a11d33e01736ffedb2ef8606", + "sha256:378ac77af41350a8c6b8801a66021b52da8a05fd77e578b7380e876c0ce4f528", + "sha256:38f16b1317b8dd82df67ed5daa5f5e7c959e46579840d77a67a4ceb9cef0a50b", + "sha256:3911c2ef96e5ddc748a3c8b4702c61986628bb719b8378bf1e4a6184bbd48fe4", + "sha256:3a3c3f8863255f3c31db3889f8055989527173ef6192a283eb6f4db3c579d830", + "sha256:3b14b1da110ea50c8bcbadc3b82c3933974dbeea1832e814aab93ca1163cd4c1", + "sha256:535dc1e6e68fad5355f9984d5637c33badbdc987b0c0d303ee95a6c979c9516f", + "sha256:6f61319e33222591f885c598e3e24f6a4be3533c1d70c19e0dc59e83a71ce27d", + "sha256:723d22d324e7997a651478e9c5a3120a0ecbc9a7e94071f7e1954562a8806cf3", + "sha256:76b2775dda7e78680d688daabcb485dc87cf5e3184a0b3e012e1d40e38527cc8", + "sha256:782a5c7df9f91979a7a21792e09b34a658058896628217ae6362088b123c8500", + "sha256:7e4d159021c2029b958b2363abec4a11db0ce8cd43abb0d9ce44284cb97217e7", + "sha256:8dacc4073c359f40fcf73aede8428c35f84639baad7e1b46fce5ab7a8a7be4bb", + "sha256:8f33d1156241c43755137288dea619105477961cfa7e47f48dbf96bc2c30720b", + "sha256:8ffd4b204d7de77b5dd558cdff986a8274796a1e57813ed005b33fd97e29f059", + "sha256:93a280c9eb736a0dcca19296f3c30c720cb41a71b1f9e617f341f0a8e791a69b", + "sha256:9a4f66259bdd6964d8cf26142733c81fb562252db74ea367d9beb4f815478e72", + "sha256:9a9d4ff06804920388aab69c5ea8a77525cf165356db70131616acd269e19b36", + "sha256:a2070c5affdb3a5e751f24208c5c4f3d5f008fa04d28731416e023c93b275277", + "sha256:a4857f7e2bc6921dbd487c5c88b84f5633de3e7d416c4dc0bb70256775551a6c", + "sha256:a607ae05b6c96057ba86c811d9c43423f35e03874ffb03fbdcd45e0637e8b631", + "sha256:a66ca3bdf21c653e47f726ca57f46ba7fc1f260ad99ba783acc3e58e3ebdb9ff", + "sha256:ab110c48bc3d97b4d19af41865e14531f300b482da21783fdaacd159251890e8", + "sha256:b239711e774c8eb910e9b1ac719f02f5ae4bf35fa0420f438cdc3a7e4e7dd6ec", + "sha256:be0416074d7f253865bb67630cf7210cbc14eb05f4099cc0f82430135aaa7a3b", + "sha256:c46643970dff9f5c976c6512fd35768c4a3819f01f61169d8cdac3f9290903b7", + "sha256:c5ec71fd4a43b6d84ddb88c1df94572479d9a26ef3f150cef3dacefecf888105", + "sha256:c6e5174f8ca585755988bc278c8bb5d02d9dc2e971591ef4a1baabdf2d99589b", + "sha256:c89b558f8a9a5a6f2cfc923c304d49f0ce629c3bd85cb442ca258ec20366394c", + "sha256:cc44e3545d908ecf3e5773266c487ad1877be718d9dc65fc7eb6e7d14960985b", + "sha256:cc6f8246e74dd210d7e2b56c76ceaba1cc52b025cd75dbe96eb48791e0250e98", + "sha256:cd556c79ad665faeae28020a0ab3bda6cd47d94bec48e36970719b0b86e4dcf4", + "sha256:ce6f3a147b4b1a8b09aae48517ae91139b1b010c5f36423fa2b866a8b23df879", + "sha256:ceb499d2b3d1d7b7ba23abe8bf26df5f06ba8c71127f188333dddcf356b4b63f", + "sha256:cef06fb382557f66d81d804230c11ab292d94b840b3cb7bf4450778377b592f4", + "sha256:e448f56cfeae7b1b3b5bcd99bb377cde7c4eb1970a525c770720a352bc4c8044", + "sha256:e52d3d95df81c8f6b2a1685aabffadf2d2d9ad97203a40f8d61e51b70f191e4e", + "sha256:ee2f1d1c223c3d2c24e3afbb2dd38be3f03b1a8d6a83ee3d9eb8c36a52bee899", + "sha256:f2c6888eada180814b8583c3e793f3f343a692fc802546eed45f40a001b1169f", + "sha256:f51dbba78d68a44e99d484ca8c8f604f17e957c1ca09c3ebc2c7e3bbd9ba0448", + "sha256:f54de00baf200b4539a5a092a759f000b5f45fd226d6d25a76b0dff71177a714", + "sha256:fa10fee7e32213f5c7b0d6428ea92e3a3fdd6d725590238a3f92c0de1c78b9d2", + "sha256:fabeeb121735d47d8eab8671b6b031ce08514c86b7ad8f7d5490a7b6dcd6267d", + "sha256:fac3c432851038b3e6afe086f777732bcf7f6ebbfd90951fa04ee53db6d0bcdd", + "sha256:fda29412a66099af6d6de0baa6bd7c52674de177ec2ad2630ca264142d69c6c7", + "sha256:ff1330e8bc996570221b450e2d539134baa9465f5cb98aff0e0f73f34172e0ae" ], "index": "pypi", - "version": "==5.3" + "version": "==5.3.1" }, "distlib": { "hashes": [ @@ -610,11 +625,11 @@ }, "keyring": { "hashes": [ - "sha256:12de23258a95f3b13e5b167f7a641a878e91eab8ef16fafc077720a95e6115bb", - "sha256:207bd66f2a9881c835dad653da04e196c678bf104f8252141d2d3c4f31051579" + "sha256:1746d3ac913d449a090caf11e9e4af00e26c3f7f7e81027872192b2398b98675", + "sha256:4be9cbaaaf83e61d6399f733d113ede7d1c73bc75cb6aeb64eee0f6ac39b30ea" ], "markers": "python_version >= '3.6'", - "version": "==21.5.0" + "version": "==21.8.0" }, "markupsafe": { "hashes": [ @@ -805,10 +820,10 @@ }, "pytz": { "hashes": [ - "sha256:3e6b7dd2d1e0a59084bcee14a17af60c5c562cdc16d828e8eba2e683d3a7e268", - "sha256:5c55e189b682d420be27c6995ba6edce0c0a77dd67bfbe2ae6607134d5851ffd" + "sha256:16962c5fb8db4a8f63a26646d8886e9d769b6c511543557bc84e9569fb9a9cb4", + "sha256:180befebb1927b16f6b57101720075a984c019ac16b1b7575673bea42c6c3da5" ], - "version": "==2020.4" + "version": "==2020.5" }, "pyyaml": { "hashes": [ @@ -838,11 +853,11 @@ }, "recommonmark": { "hashes": [ - "sha256:29cd4faeb6c5268c633634f2d69aef9431e0f4d347f90659fd0aab20e541efeb", - "sha256:2ec4207a574289355d5b6ae4ae4abb29043346ca12cdd5f07d374dc5987d2852" + "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f", + "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67" ], "index": "pypi", - "version": "==0.6.0" + "version": "==0.7.1" }, "regex": { "hashes": [ @@ -893,11 +908,11 @@ }, "requests": { "hashes": [ - "sha256:7f1a0b932f4a60a1a65caa4263921bb7d9ee911957e0ae4a23a6dd08185ad5f8", - "sha256:e786fa28d8c9154e6a4de5d46a1d921b8749f8b74e28bde23768e5e16eece998" + "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", + "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.25.0" + "version": "==2.25.1" }, "requests-toolbelt": { "hashes": [ @@ -944,11 +959,11 @@ }, "sphinx": { "hashes": [ - "sha256:1e8d592225447104d1172be415bc2972bd1357e3e12fdc76edf2261105db4300", - "sha256:d4e59ad4ea55efbb3c05cde3bfc83bfc14f0c95aa95c3d75346fcce186a47960" + "sha256:aeef652b14629431c82d3fe994ce39ead65b3fe87cf41b9a3714168ff8b83376", + "sha256:e450cb205ff8924611085183bf1353da26802ae73d9251a8fcdf220a8f8712ef" ], "index": "pypi", - "version": "==3.3.1" + "version": "==3.4.1" }, "sphinxcontrib-applehelp": { "hashes": [ @@ -1008,55 +1023,55 @@ }, "tqdm": { "hashes": [ - "sha256:38b658a3e4ecf9b4f6f8ff75ca16221ae3378b2e175d846b6b33ea3a20852cf5", - "sha256:d4f413aecb61c9779888c64ddf0c62910ad56dcbe857d8922bb505d4dbff0df1" + "sha256:0cd81710de29754bf17b6fee07bdb86f956b4fa20d3078f02040f83e64309416", + "sha256:f4f80b96e2ceafea69add7bf971b8403b9cba8fb4451c1220f91c79be4ebd208" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==4.54.1" + "version": "==4.55.0" }, "twine": { "hashes": [ - "sha256:34352fd52ec3b9d29837e6072d5a2a7c6fe4290e97bba46bb8d478b5c598f7ab", - "sha256:ba9ff477b8d6de0c89dd450e70b2185da190514e91c42cc62f96850025c10472" + "sha256:2f6942ec2a17417e19d2dd372fc4faa424c87ee9ce49b4e20c427eb00a0f3f41", + "sha256:fcffa8fc37e8083a5be0728371f299598870ee1eccc94e9a25cef7b1dcfa8297" ], "index": "pypi", - "version": "==3.2.0" + "version": "==3.3.0" }, "typed-ast": { "hashes": [ - "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355", - "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919", - "sha256:0d8110d78a5736e16e26213114a38ca35cb15b6515d535413b090bd50951556d", - "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa", - "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652", - "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75", - "sha256:3742b32cf1c6ef124d57f95be609c473d7ec4c14d0090e5a5e05a15269fb4d0c", - "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01", - "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d", - "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1", - "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907", - "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c", - "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3", - "sha256:7e4c9d7658aaa1fc80018593abdf8598bf91325af6af5cce4ce7c73bc45ea53d", - "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b", - "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614", - "sha256:92c325624e304ebf0e025d1224b77dd4e6393f18aab8d829b5b7e04afe9b7a2c", - "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb", - "sha256:b52ccf7cfe4ce2a1064b18594381bccf4179c2ecf7f513134ec2f993dd4ab395", - "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b", - "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41", - "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6", - "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34", - "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe", - "sha256:d648b8e3bf2fe648745c8ffcee3db3ff903d0817a01a12dd6a6ea7a8f4889072", - "sha256:f208eb7aff048f6bea9586e61af041ddf7f9ade7caed625742af423f6bae3298", - "sha256:fac11badff8313e23717f3dada86a15389d0708275bddf766cca67a84ead3e91", - "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4", - "sha256:fcf135e17cc74dbfbc05894ebca928ffeb23d9790b3167a674921db19082401f", - "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7" + "sha256:07d49388d5bf7e863f7fa2f124b1b1d89d8aa0e2f7812faff0a5658c01c59aa1", + "sha256:14bf1522cdee369e8f5581238edac09150c765ec1cb33615855889cf33dcb92d", + "sha256:240296b27397e4e37874abb1df2a608a92df85cf3e2a04d0d4d61055c8305ba6", + "sha256:36d829b31ab67d6fcb30e185ec996e1f72b892255a745d3a82138c97d21ed1cd", + "sha256:37f48d46d733d57cc70fd5f30572d11ab8ed92da6e6b28e024e4a3edfb456e37", + "sha256:4c790331247081ea7c632a76d5b2a265e6d325ecd3179d06e9cf8d46d90dd151", + "sha256:5dcfc2e264bd8a1db8b11a892bd1647154ce03eeba94b461effe68790d8b8e07", + "sha256:7147e2a76c75f0f64c4319886e7639e490fee87c9d25cb1d4faef1d8cf83a440", + "sha256:7703620125e4fb79b64aa52427ec192822e9f45d37d4b6625ab37ef403e1df70", + "sha256:8368f83e93c7156ccd40e49a783a6a6850ca25b556c0fa0240ed0f659d2fe496", + "sha256:84aa6223d71012c68d577c83f4e7db50d11d6b1399a9c779046d75e24bed74ea", + "sha256:85f95aa97a35bdb2f2f7d10ec5bbdac0aeb9dafdaf88e17492da0504de2e6400", + "sha256:8db0e856712f79c45956da0c9a40ca4246abc3485ae0d7ecc86a20f5e4c09abc", + "sha256:9044ef2df88d7f33692ae3f18d3be63dec69c4fb1b5a4a9ac950f9b4ba571606", + "sha256:963c80b583b0661918718b095e02303d8078950b26cc00b5e5ea9ababe0de1fc", + "sha256:987f15737aba2ab5f3928c617ccf1ce412e2e321c77ab16ca5a293e7bbffd581", + "sha256:9ec45db0c766f196ae629e509f059ff05fc3148f9ffd28f3cfe75d4afb485412", + "sha256:9fc0b3cb5d1720e7141d103cf4819aea239f7d136acf9ee4a69b047b7986175a", + "sha256:a2c927c49f2029291fbabd673d51a2180038f8cd5a5b2f290f78c4516be48be2", + "sha256:a38878a223bdd37c9709d07cd357bb79f4c760b29210e14ad0fb395294583787", + "sha256:b4fcdcfa302538f70929eb7b392f536a237cbe2ed9cba88e3bf5027b39f5f77f", + "sha256:c0c74e5579af4b977c8b932f40a5464764b2f86681327410aa028a22d2f54937", + "sha256:c1c876fd795b36126f773db9cbb393f19808edd2637e00fd6caba0e25f2c7b64", + "sha256:c9aadc4924d4b5799112837b226160428524a9a45f830e0d0f184b19e4090487", + "sha256:cc7b98bf58167b7f2db91a4327da24fb93368838eb84a44c472283778fc2446b", + "sha256:cf54cfa843f297991b7388c281cb3855d911137223c6b6d2dd82a47ae5125a41", + "sha256:d003156bb6a59cda9050e983441b7fa2487f7800d76bdc065566b7d728b4581a", + "sha256:d175297e9533d8d37437abc14e8a83cbc68af93cc9c1c59c2c292ec59a0697a3", + "sha256:d746a437cdbca200622385305aedd9aef68e8a645e385cc483bdc5e488f07166", + "sha256:e683e409e5c45d5c9082dc1daf13f6374300806240719f95dc783d1fc942af10" ], "index": "pypi", - "version": "==1.4.1" + "version": "==1.4.2" }, "typing-extensions": { "hashes": [ diff --git a/setup.py b/setup.py index 14bc1ef586a..c97dd35fe28 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,7 @@ def get_long_description() -> str: "click>=7.1.2", "appdirs", "toml>=0.10.1", - "typed-ast>=1.4.0", + "typed-ast>=1.4.2", "regex>=2020.1.8", "pathspec>=0.6, <1", "dataclasses>=0.6; python_version < '3.7'",
googleapis__google-cloud-python-5683
Release 'datastore 1.7.0' Major changes are: - Add support for Python 3.7. - Drop support for Python 3.4. - Bugfix: query offsets (#4675).
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-datastore'\ndescription = 'Google Cloud Datastore API client library'\nversion = '1.6.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'google-cloud-core<0.29dev,>=0.28.0',\n 'google-api-core[grpc]<2.0.0dev,>=1.0.0',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "datastore/setup.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-datastore'\ndescription = 'Google Cloud Datastore API client library'\nversion = '1.7.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'google-cloud-core<0.29dev,>=0.28.0',\n 'google-api-core[grpc]<2.0.0dev,>=1.0.0',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "datastore/setup.py" } ]
diff --git a/datastore/CHANGELOG.md b/datastore/CHANGELOG.md index 4c47905a4a53..2ad75b1ff3b5 100644 --- a/datastore/CHANGELOG.md +++ b/datastore/CHANGELOG.md @@ -4,6 +4,25 @@ [1]: https://pypi.org/project/google-cloud-datastore/#history +## 1.7.0 + +### Implementation Changes + +- Do not pass 'offset' once the query iterator has a cursor (#5503) +- Add test runs for Python 3.7 and remove run for 3.4 (#5295) + +### Documentation + +- minor fix to datastore example (#5452) +- Add example showing explicit unicode for text values in entities. (#5263) + +### Internal / Testing Changes + +- Modify system tests to use prerelease versions of grpcio (#5304) +- Avoid overwriting '__module__' of messages from shared modules. (#5364) +- Attempt again to reproduce #4264. (#5403) +- Fix bad trove classifier + ## 1.6.0 ### Implementation changes diff --git a/datastore/setup.py b/datastore/setup.py index 139aae2e6852..d53e4fbdc4ad 100644 --- a/datastore/setup.py +++ b/datastore/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-datastore' description = 'Google Cloud Datastore API client library' -version = '1.6.0' +version = '1.7.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta'
dynaconf__dynaconf-809
How to use dynaconf in pytest when declared custom settings object Hi. I need to override the environment for settings in conftest.py. As I found I can do it by adding: ```python from dynaconf import settings @pytest.fixture(scope="session", autouse=True) def set_test_settings(): settings.configure(FORCE_ENV_FOR_DYNACONF="testing") ``` but in my case I' ve got my own settings object under in the `confg.py` module and it looks like this: ```python settings = Dynaconf( envvar_prefix='MY_PREFIX', settings_files=['settings.toml', '.secrets.toml'], environments=True, env_switcher='ENVIRONMENT_NAME', ) ``` and when I'm trying to do ```python from config import settings @pytest.fixture(scope="session", autouse=True) def set_test_settings(): settings.configure(FORCE_ENV_FOR_DYNACONF="testing") ``` it does not work. It only overrides settings imported directly from dynaconf module. This is a problem because in my app I'm using settings from my `config.py` and my tests are failing when I override settings from `dynaconf` and use `from dynaconf import settings` in tests - because in app I'm using `from config import settings`
[ { "content": "from __future__ import annotations\n\nimport copy\nimport glob\nimport importlib\nimport inspect\nimport os\nimport warnings\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom contextlib import suppress\nfrom pathlib import Path\n\nfrom dynaconf import default_settings\nfrom dynaconf.loaders import default_loader\nfrom dynaconf.loaders import enable_external_loaders\nfrom dynaconf.loaders import env_loader\nfrom dynaconf.loaders import execute_hooks\nfrom dynaconf.loaders import py_loader\nfrom dynaconf.loaders import settings_loader\nfrom dynaconf.loaders import yaml_loader\nfrom dynaconf.utils import BANNER\nfrom dynaconf.utils import compat_kwargs\nfrom dynaconf.utils import ensure_a_list\nfrom dynaconf.utils import missing\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils import recursively_evaluate_lazy_format\nfrom dynaconf.utils import RENAMED_VARS\nfrom dynaconf.utils import upperfy\nfrom dynaconf.utils.boxing import DynaBox\nfrom dynaconf.utils.files import find_file\nfrom dynaconf.utils.functional import empty\nfrom dynaconf.utils.functional import LazyObject\nfrom dynaconf.utils.parse_conf import converters\nfrom dynaconf.utils.parse_conf import get_converter\nfrom dynaconf.utils.parse_conf import parse_conf_data\nfrom dynaconf.utils.parse_conf import true_values\nfrom dynaconf.validator import ValidatorList\nfrom dynaconf.vendor.box.box_list import BoxList\n\n\nclass LazySettings(LazyObject):\n \"\"\"Loads settings lazily from multiple sources::\n\n settings = Dynaconf(\n settings_files=[\"settings.toml\"], # path/glob\n environments=True, # activate layered environments\n envvar_prefix=\"MYAPP\", # `export MYAPP_FOO=bar`\n env_switcher=\"MYAPP_MODE\", # `export MYAPP_MODE=production`\n load_dotenv=True, # read a .env file\n )\n\n More options available on https://www.dynaconf.com/configuration/\n \"\"\"\n\n def __init__(self, wrapped=None, **kwargs):\n \"\"\"\n handle initialization for the customization cases\n\n :param wrapped: a deepcopy of this object will be wrapped (issue #596)\n :param kwargs: values that overrides default_settings\n \"\"\"\n\n self._warn_dynaconf_global_settings = kwargs.pop(\n \"warn_dynaconf_global_settings\", None\n ) # in 3.0.0 global settings is deprecated\n\n self.__resolve_config_aliases(kwargs)\n compat_kwargs(kwargs)\n self._kwargs = kwargs\n super().__init__()\n\n if wrapped:\n if self._django_override:\n # This fixes django issue #596\n self._wrapped = copy.deepcopy(wrapped)\n else:\n self._wrapped = wrapped\n\n def __resolve_config_aliases(self, kwargs):\n \"\"\"takes aliases for _FOR_DYNACONF configurations\n\n e.g: ROOT_PATH='/' is transformed into `ROOT_PATH_FOR_DYNACONF`\n \"\"\"\n\n mispells = {\n \"settings_files\": \"settings_file\",\n \"SETTINGS_FILES\": \"SETTINGS_FILE\",\n \"environment\": \"environments\",\n \"ENVIRONMENT\": \"ENVIRONMENTS\",\n }\n for misspell, correct in mispells.items():\n if misspell in kwargs:\n kwargs[correct] = kwargs.pop(misspell)\n\n for_dynaconf_keys = {\n key\n for key in UPPER_DEFAULT_SETTINGS\n if key.endswith(\"_FOR_DYNACONF\")\n }\n aliases = {\n key.upper()\n for key in kwargs\n if f\"{key.upper()}_FOR_DYNACONF\" in for_dynaconf_keys\n }\n for alias in aliases:\n value = kwargs.pop(alias, empty)\n if value is empty:\n value = kwargs.pop(alias.lower())\n kwargs[f\"{alias}_FOR_DYNACONF\"] = value\n\n def __getattr__(self, name):\n \"\"\"Allow getting keys from self.store using dot notation\"\"\"\n if self._wrapped is empty:\n self._setup()\n if name in self._wrapped._deleted: # noqa\n raise AttributeError(\n f\"Attribute {name} was deleted, \" \"or belongs to different env\"\n )\n\n if name not in RESERVED_ATTRS:\n lowercase_mode = self._kwargs.get(\n \"LOWERCASE_READ_FOR_DYNACONF\",\n default_settings.LOWERCASE_READ_FOR_DYNACONF,\n )\n if lowercase_mode is True:\n name = name.upper()\n\n if (\n name.isupper()\n and (\n self._wrapped._fresh\n or name in self._wrapped.FRESH_VARS_FOR_DYNACONF\n )\n and name not in UPPER_DEFAULT_SETTINGS\n ):\n return self._wrapped.get_fresh(name)\n value = getattr(self._wrapped, name)\n if name not in RESERVED_ATTRS:\n return recursively_evaluate_lazy_format(value, self)\n return value\n\n def __call__(self, *args, **kwargs):\n \"\"\"Allow direct call of settings('val')\n in place of settings.get('val')\n \"\"\"\n return self.get(*args, **kwargs)\n\n @property\n def _should_load_dotenv(self):\n \"\"\"Chicken and egg problem, we must manually check envvar\n before deciding if we are loading envvars :)\"\"\"\n _environ_load_dotenv = parse_conf_data(\n os.environ.get(\"LOAD_DOTENV_FOR_DYNACONF\"), tomlfy=True\n )\n return self._kwargs.get(\"load_dotenv\", _environ_load_dotenv)\n\n def _setup(self):\n \"\"\"Initial setup, run once.\"\"\"\n\n if self._warn_dynaconf_global_settings:\n warnings.warn(\n \"Usage of `from dynaconf import settings` is now \"\n \"DEPRECATED in 3.0.0+. You are encouraged to change it to \"\n \"your own instance e.g: `settings = Dynaconf(*options)`\",\n DeprecationWarning,\n )\n\n default_settings.reload(self._should_load_dotenv)\n environment_variable = self._kwargs.get(\n \"ENVVAR_FOR_DYNACONF\", default_settings.ENVVAR_FOR_DYNACONF\n )\n settings_module = os.environ.get(environment_variable)\n self._wrapped = Settings(\n settings_module=settings_module, **self._kwargs\n )\n\n def configure(self, settings_module=None, **kwargs):\n \"\"\"\n Allows user to reconfigure settings object passing a new settings\n module or separated kwargs\n\n :param settings_module: defines the settings file\n :param kwargs: override default settings\n \"\"\"\n default_settings.reload(self._should_load_dotenv)\n environment_var = self._kwargs.get(\n \"ENVVAR_FOR_DYNACONF\", default_settings.ENVVAR_FOR_DYNACONF\n )\n settings_module = settings_module or os.environ.get(environment_var)\n compat_kwargs(kwargs)\n kwargs.update(self._kwargs)\n self._wrapped = Settings(settings_module=settings_module, **kwargs)\n\n @property\n def configured(self):\n \"\"\"If wrapped is configured\"\"\"\n return self._wrapped is not empty\n\n\nclass Settings:\n \"\"\"\n Common logic for settings whether set by a module or by the user.\n \"\"\"\n\n dynaconf_banner = BANNER\n _store = DynaBox()\n\n def __init__(self, settings_module=None, **kwargs): # pragma: no cover\n \"\"\"Execute loaders and custom initialization\n\n :param settings_module: defines the settings file\n :param kwargs: override default settings\n \"\"\"\n self._fresh = False\n self._loaded_envs = []\n self._loaded_hooks = defaultdict(dict)\n self._loaded_py_modules = []\n self._loaded_files = []\n self._deleted = set()\n self._store = DynaBox(box_settings=self)\n self._env_cache = {}\n self._loaded_by_loaders = {}\n self._loaders = []\n self._defaults = DynaBox(box_settings=self)\n self.environ = os.environ\n self.SETTINGS_MODULE = None\n self.filter_strategy = kwargs.get(\"filter_strategy\", None)\n self._not_installed_warnings = []\n self._validate_only = kwargs.pop(\"validate_only\", None)\n self._validate_exclude = kwargs.pop(\"validate_exclude\", None)\n self._validate_only_current_env = kwargs.pop(\n \"validate_only_current_env\", False\n )\n\n self.validators = ValidatorList(\n self, validators=kwargs.pop(\"validators\", None)\n )\n\n compat_kwargs(kwargs)\n if settings_module:\n self.set(\"SETTINGS_FILE_FOR_DYNACONF\", settings_module)\n for key, value in kwargs.items():\n self.set(key, value)\n # execute loaders only after setting defaults got from kwargs\n self._defaults = kwargs\n\n # The following flags are used for when copying of settings is done\n skip_loaders = kwargs.get(\"dynaconf_skip_loaders\", False)\n skip_validators = kwargs.get(\"dynaconf_skip_validators\", False)\n\n if not skip_loaders:\n self.execute_loaders()\n\n if not skip_validators:\n self.validators.validate(\n only=self._validate_only,\n exclude=self._validate_exclude,\n only_current_env=self._validate_only_current_env,\n )\n\n def __call__(self, *args, **kwargs):\n \"\"\"Allow direct call of `settings('val')`\n in place of `settings.get('val')`\n \"\"\"\n return self.get(*args, **kwargs)\n\n def __setattr__(self, name, value):\n \"\"\"Allow `settings.FOO = 'value'` while keeping internal attrs.\"\"\"\n\n if name in RESERVED_ATTRS:\n super().__setattr__(name, value)\n else:\n self.set(name, value)\n\n def __delattr__(self, name):\n \"\"\"stores reference in `_deleted` for proper error management\"\"\"\n self._deleted.add(name)\n if hasattr(self, name):\n super().__delattr__(name)\n\n def __contains__(self, item):\n \"\"\"Respond to `item in settings`\"\"\"\n return item.upper() in self.store or item.lower() in self.store\n\n def __getattribute__(self, name):\n if name not in RESERVED_ATTRS and name not in UPPER_DEFAULT_SETTINGS:\n with suppress(KeyError):\n # self._store has Lazy values already evaluated\n if (\n name.islower()\n and self._store.get(\"LOWERCASE_READ_FOR_DYNACONF\", empty)\n is False\n ):\n # only matches exact casing, first levels always upper\n return self._store.to_dict()[name]\n # perform lookups for upper, and casefold\n return self._store[name]\n # in case of RESERVED_ATTRS or KeyError above, keep default behaviour\n return super().__getattribute__(name)\n\n def __getitem__(self, item):\n \"\"\"Allow getting variables as dict keys `settings['KEY']`\"\"\"\n value = self.get(item, default=empty)\n if value is empty:\n raise KeyError(f\"{item} does not exist\")\n return value\n\n def __setitem__(self, key, value):\n \"\"\"Allow `settings['KEY'] = 'value'`\"\"\"\n self.set(key, value)\n\n @property\n def store(self):\n \"\"\"Gets internal storage\"\"\"\n return self._store\n\n def __dir__(self):\n \"\"\"Enable auto-complete for code editors\"\"\"\n return (\n RESERVED_ATTRS\n + [k.lower() for k in self.keys()]\n + list(self.keys())\n )\n\n def __iter__(self):\n \"\"\"Redirects to store object\"\"\"\n yield from self._store\n\n def items(self):\n \"\"\"Redirects to store object\"\"\"\n return self._store.items()\n\n def keys(self):\n \"\"\"Redirects to store object\"\"\"\n return self.store.keys()\n\n def values(self):\n \"\"\"Redirects to store object\"\"\"\n return self.store.values()\n\n def setdefault(self, item, default, apply_default_on_none=False):\n \"\"\"Returns value if exists or set it as the given default\n\n apply_default_on_none: if True, default is set when value is None\n \"\"\"\n value = self.get(item, empty)\n\n # Yaml loader reads empty values as None, would we apply defaults?\n global_apply_default = (\n self.get(\"APPLY_DEFAULT_ON_NONE_FOR_DYNACONF\") is not None\n )\n apply_default = default is not empty and (\n value is empty\n or (\n value is None\n and (\n apply_default_on_none is True\n or global_apply_default is True\n )\n )\n )\n\n if apply_default:\n self.set(\n item,\n default,\n loader_identifier=\"setdefault\",\n tomlfy=True,\n )\n return default\n\n return value\n\n def as_dict(self, env=None, internal=False):\n \"\"\"Returns a dictionary with set key and values.\n\n :param env: Str env name, default self.current_env `DEVELOPMENT`\n :param internal: bool - should include dynaconf internal vars?\n \"\"\"\n ctx_mgr = suppress() if env is None else self.using_env(env)\n with ctx_mgr:\n data = self.store.to_dict().copy()\n # if not internal remove internal settings\n if not internal:\n for name in UPPER_DEFAULT_SETTINGS:\n data.pop(name, None)\n return data\n\n to_dict = as_dict # backwards compatibility\n\n def _dotted_get(\n self, dotted_key, default=None, parent=None, cast=None, **kwargs\n ):\n \"\"\"\n Perform dotted key lookups and keep track of where we are.\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param parent: Is there a pre-loaded parent in a nested data?\n \"\"\"\n split_key = dotted_key.split(\".\")\n name, keys = split_key[0], split_key[1:]\n result = self.get(name, default=default, parent=parent, **kwargs)\n\n # If we've reached the end, or parent key not found, then return result\n if not keys or result == default:\n if cast and cast in converters:\n return get_converter(cast, result, box_settings=self)\n elif cast is True:\n return parse_conf_data(result, tomlfy=True, box_settings=self)\n return result\n\n # If we've still got key elements to traverse, let's do that.\n return self._dotted_get(\n \".\".join(keys), default=default, parent=result, cast=cast, **kwargs\n )\n\n def get(\n self,\n key,\n default=None,\n cast=None,\n fresh=False,\n dotted_lookup=empty,\n parent=None,\n ):\n \"\"\"\n Get a value from settings store, this is the preferred way to access::\n\n >>> from dynaconf import settings\n >>> settings.get('KEY')\n\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param cast: Should cast in to @int, @float, @bool or @json ?\n :param fresh: Should reload from loaders store before access?\n :param dotted_lookup: Should perform dotted-path lookup?\n :param parent: Is there a pre-loaded parent in a nested data?\n :return: The value if found, default or None\n \"\"\"\n nested_sep = self._store.get(\"NESTED_SEPARATOR_FOR_DYNACONF\")\n if nested_sep and nested_sep in key:\n # turn FOO__bar__ZAZ in `FOO.bar.ZAZ`\n key = key.replace(nested_sep, \".\")\n\n if dotted_lookup is empty:\n dotted_lookup = self._store.get(\"DOTTED_LOOKUP_FOR_DYNACONF\")\n\n if \".\" in key and dotted_lookup:\n return self._dotted_get(\n dotted_key=key,\n default=default,\n cast=cast,\n fresh=fresh,\n parent=parent,\n )\n\n if default is not None:\n # default values should behave exactly Dynaconf parsed values\n if isinstance(default, list):\n default = BoxList(default)\n elif isinstance(default, dict):\n default = DynaBox(default)\n\n key = upperfy(key)\n if key in self._deleted:\n return default\n\n if (\n fresh\n or self._fresh\n or key in getattr(self, \"FRESH_VARS_FOR_DYNACONF\", ())\n ) and key not in UPPER_DEFAULT_SETTINGS:\n self.unset(key)\n self.execute_loaders(key=key)\n\n data = (parent or self.store).get(key, default)\n if cast:\n data = get_converter(cast, data, box_settings=self)\n return data\n\n def exists(self, key, fresh=False):\n \"\"\"Check if key exists\n\n :param key: the name of setting variable\n :param fresh: if key should be taken from source directly\n :return: Boolean\n \"\"\"\n key = upperfy(key)\n if key in self._deleted:\n return False\n return self.get(key, fresh=fresh, default=missing) is not missing\n\n def get_fresh(self, key, default=None, cast=None):\n \"\"\"This is a shortcut to `get(key, fresh=True)`. always reload from\n loaders store before getting the var.\n\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param cast: Should cast in to @int, @float, @bool or @json ?\n :return: The value if found, default or None\n \"\"\"\n return self.get(key, default=default, cast=cast, fresh=True)\n\n def get_environ(self, key, default=None, cast=None):\n \"\"\"Get value from environment variable using os.environ.get\n\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param cast: Should cast in to @int, @float, @bool or @json ?\n or cast must be true to use cast inference\n :return: The value if found, default or None\n \"\"\"\n key = upperfy(key)\n data = self.environ.get(key, default)\n if data:\n if cast in converters:\n data = get_converter(cast, data, box_settings=self)\n elif cast is True:\n data = parse_conf_data(data, tomlfy=True, box_settings=self)\n return data\n\n def exists_in_environ(self, key):\n \"\"\"Return True if env variable is exported\"\"\"\n return upperfy(key) in self.environ\n\n def as_bool(self, key):\n \"\"\"Partial method for get with bool cast\"\"\"\n return self.get(key, cast=\"@bool\")\n\n def as_int(self, key):\n \"\"\"Partial method for get with int cast\"\"\"\n return self.get(key, cast=\"@int\")\n\n def as_float(self, key):\n \"\"\"Partial method for get with float cast\"\"\"\n return self.get(key, cast=\"@float\")\n\n def as_json(self, key):\n \"\"\"Partial method for get with json cast\"\"\"\n return self.get(key, cast=\"@json\")\n\n @property\n def loaded_envs(self):\n \"\"\"Get or create internal loaded envs list\"\"\"\n if not self._loaded_envs:\n self._loaded_envs = []\n return self._loaded_envs\n\n @loaded_envs.setter\n def loaded_envs(self, value):\n \"\"\"Setter for env list\"\"\"\n self._loaded_envs = value\n\n # compat\n loaded_namespaces = loaded_envs\n\n @property\n def loaded_by_loaders(self):\n \"\"\"Gets the internal mapping of LOADER -> values\"\"\"\n return self._loaded_by_loaders\n\n def from_env(self, env=\"\", keep=False, **kwargs):\n \"\"\"Return a new isolated settings object pointing to specified env.\n\n Example of settings.toml::\n\n [development]\n message = 'This is in dev'\n [other]\n message = 'this is in other env'\n\n Program::\n\n >>> from dynaconf import settings\n >>> print(settings.MESSAGE)\n 'This is in dev'\n >>> print(settings.from_env('other').MESSAGE)\n 'This is in other env'\n # The existing settings object remains the same.\n >>> print(settings.MESSAGE)\n 'This is in dev'\n\n Arguments:\n env {str} -- Env to load (development, production, custom)\n\n Keyword Arguments:\n keep {bool} -- Keep pre-existing values (default: {False})\n kwargs {dict} -- Passed directly to new instance.\n \"\"\"\n cache_key = f\"{env}_{keep}_{kwargs}\"\n if cache_key in self._env_cache:\n return self._env_cache[cache_key]\n\n new_data = {\n key: self.get(key)\n for key in UPPER_DEFAULT_SETTINGS\n if key not in RENAMED_VARS\n }\n\n if self.filter_strategy:\n # Retain the filtering strategy when switching environments\n new_data[\"filter_strategy\"] = self.filter_strategy\n\n # This is here for backwards compatibility\n # To be removed on 4.x.x\n default_settings_paths = self.get(\"default_settings_paths\")\n if default_settings_paths: # pragma: no cover\n new_data[\"default_settings_paths\"] = default_settings_paths\n\n if keep:\n # keep existing values from current env\n new_data.update(\n {\n key: value\n for key, value in self.store.to_dict().copy().items()\n if key.isupper() and key not in RENAMED_VARS\n }\n )\n\n new_data.update(kwargs)\n new_data[\"FORCE_ENV_FOR_DYNACONF\"] = env\n new_settings = LazySettings(**new_data)\n self._env_cache[cache_key] = new_settings\n return new_settings\n\n @contextmanager\n def using_env(self, env, clean=True, silent=True, filename=None):\n \"\"\"\n This context manager allows the contextual use of a different env\n Example of settings.toml::\n\n [development]\n message = 'This is in dev'\n [other]\n message = 'this is in other env'\n\n Program::\n\n >>> from dynaconf import settings\n >>> print settings.MESSAGE\n 'This is in dev'\n >>> with settings.using_env('OTHER'):\n ... print settings.MESSAGE\n 'this is in other env'\n\n :param env: Upper case name of env without any _\n :param clean: If preloaded vars should be cleaned\n :param silent: Silence errors\n :param filename: Custom filename to load (optional)\n :return: context\n \"\"\"\n try:\n self.setenv(env, clean=clean, silent=silent, filename=filename)\n yield\n finally:\n if env.lower() != self.ENV_FOR_DYNACONF.lower():\n del self.loaded_envs[-1]\n self.setenv(self.current_env, clean=clean, filename=filename)\n\n # compat\n using_namespace = using_env\n\n @contextmanager\n def fresh(self):\n \"\"\"\n this context manager force the load of a key direct from the store::\n\n $ export DYNACONF_VALUE='Original'\n >>> from dynaconf import settings\n >>> print settings.VALUE\n 'Original'\n $ export DYNACONF_VALUE='Changed Value'\n >>> print settings.VALUE # will not be reloaded from env vars\n 'Original\n >>> with settings.fresh(): # inside this context all is reloaded\n ... print settings.VALUE\n 'Changed Value'\n\n an alternative is using `settings.get_fresh(key)`\n\n :return: context\n \"\"\"\n\n self._fresh = True\n yield\n self._fresh = False\n\n @property\n def current_env(self):\n \"\"\"Return the current active env\"\"\"\n\n if self.ENVIRONMENTS_FOR_DYNACONF is False:\n return self.MAIN_ENV_FOR_DYNACONF.lower()\n\n if self.FORCE_ENV_FOR_DYNACONF is not None:\n return self.FORCE_ENV_FOR_DYNACONF\n\n try:\n return self.loaded_envs[-1]\n except IndexError:\n return self.ENV_FOR_DYNACONF\n\n # compat\n current_namespace = current_env\n\n @property\n def settings_module(self):\n \"\"\"Gets SETTINGS_MODULE variable\"\"\"\n settings_module = parse_conf_data(\n os.environ.get(\n self.ENVVAR_FOR_DYNACONF, self.SETTINGS_FILE_FOR_DYNACONF\n ),\n tomlfy=True,\n box_settings=self,\n )\n if settings_module != getattr(self, \"SETTINGS_MODULE\", None):\n self.set(\"SETTINGS_MODULE\", settings_module)\n\n # This is for backewards compatibility, to be removed on 4.x.x\n if not self.SETTINGS_MODULE and self.get(\"default_settings_paths\"):\n self.SETTINGS_MODULE = self.get(\"default_settings_paths\")\n\n return self.SETTINGS_MODULE\n\n # Backwards compatibility see #169\n settings_file = settings_module\n\n def setenv(self, env=None, clean=True, silent=True, filename=None):\n \"\"\"Used to interactively change the env\n Example of settings.toml::\n\n [development]\n message = 'This is in dev'\n [other]\n message = 'this is in other env'\n\n Program::\n\n >>> from dynaconf import settings\n >>> print settings.MESSAGE\n 'This is in dev'\n >>> with settings.using_env('OTHER'):\n ... print settings.MESSAGE\n 'this is in other env'\n\n :param env: Upper case name of env without any _\n :param clean: If preloaded vars should be cleaned\n :param silent: Silence errors\n :param filename: Custom filename to load (optional)\n :return: context\n \"\"\"\n env = env or self.ENV_FOR_DYNACONF\n\n if not isinstance(env, str):\n raise AttributeError(\"env should be a string\")\n\n env = env.upper()\n\n if env != self.ENV_FOR_DYNACONF:\n self.loaded_envs.append(env)\n else:\n self.loaded_envs = []\n\n if clean:\n self.clean(env=env)\n self.execute_loaders(env=env, silent=silent, filename=filename)\n\n # compat\n namespace = setenv\n\n def clean(self, *args, **kwargs):\n \"\"\"Clean all loaded values to reload when switching envs\"\"\"\n for key in list(self.store.keys()):\n self.unset(key)\n\n def unset(self, key, force=False):\n \"\"\"Unset on all references\n\n :param key: The key to be unset\n :param force: Bypass default checks and force unset\n \"\"\"\n key = upperfy(key.strip())\n if (\n key not in UPPER_DEFAULT_SETTINGS\n and key not in self._defaults\n or force\n ):\n with suppress(KeyError, AttributeError):\n # AttributeError can happen when a LazyValue consumes\n # a previously deleted key\n delattr(self, key)\n del self.store[key]\n\n def unset_all(self, keys, force=False): # pragma: no cover\n \"\"\"Unset based on a list of keys\n\n :param keys: a list of keys\n :param force: Bypass default checks and force unset\n \"\"\"\n for key in keys:\n self.unset(key, force=force)\n\n def _dotted_set(self, dotted_key, value, tomlfy=False, **kwargs):\n \"\"\"Sets dotted keys as nested dictionaries.\n\n Dotted set will always reassign the value, to merge use `@merge` token\n\n Arguments:\n dotted_key {str} -- A traversal name e.g: foo.bar.zaz\n value {Any} -- The value to set to the nested value.\n\n Keyword Arguments:\n tomlfy {bool} -- Perform toml parsing (default: {False})\n \"\"\"\n\n split_keys = dotted_key.split(\".\")\n existing_data = self.get(split_keys[0], {})\n new_data = tree = DynaBox(box_settings=self)\n\n for k in split_keys[:-1]:\n tree = tree.setdefault(k, {})\n\n value = parse_conf_data(value, tomlfy=tomlfy, box_settings=self)\n tree[split_keys[-1]] = value\n\n if existing_data:\n new_data = object_merge(\n old=DynaBox({split_keys[0]: existing_data}),\n new=new_data,\n full_path=split_keys,\n )\n self.update(data=new_data, tomlfy=tomlfy, **kwargs)\n\n def set(\n self,\n key,\n value,\n loader_identifier=None,\n tomlfy=False,\n dotted_lookup=empty,\n is_secret=\"DeprecatedArgument\", # noqa\n merge=False,\n ):\n \"\"\"Set a value storing references for the loader\n\n :param key: The key to store\n :param value: The value to store\n :param loader_identifier: Optional loader name e.g: toml, yaml etc.\n :param tomlfy: Bool define if value is parsed by toml (defaults False)\n :param merge: Bool define if existing nested data will be merged.\n \"\"\"\n if dotted_lookup is empty:\n dotted_lookup = self.get(\"DOTTED_LOOKUP_FOR_DYNACONF\")\n\n nested_sep = self.get(\"NESTED_SEPARATOR_FOR_DYNACONF\")\n if nested_sep and nested_sep in key:\n # turn FOO__bar__ZAZ in `FOO.bar.ZAZ`\n key = key.replace(nested_sep, \".\")\n\n if \".\" in key and dotted_lookup is True:\n return self._dotted_set(\n key, value, loader_identifier=loader_identifier, tomlfy=tomlfy\n )\n\n value = parse_conf_data(value, tomlfy=tomlfy, box_settings=self)\n key = upperfy(key.strip())\n existing = getattr(self, key, None)\n\n if getattr(value, \"_dynaconf_del\", None):\n # just in case someone use a `@del` in a first level var.\n self.unset(key, force=True)\n return\n\n if getattr(value, \"_dynaconf_reset\", False): # pragma: no cover\n # just in case someone use a `@reset` in a first level var.\n value = value.unwrap()\n\n if getattr(value, \"_dynaconf_merge_unique\", False):\n # just in case someone use a `@merge_unique` in a first level var\n if existing:\n value = object_merge(existing, value.unwrap(), unique=True)\n else:\n value = value.unwrap()\n\n if getattr(value, \"_dynaconf_merge\", False):\n # just in case someone use a `@merge` in a first level var\n if existing:\n value = object_merge(existing, value.unwrap())\n else:\n value = value.unwrap()\n\n if existing is not None and existing != value:\n # `dynaconf_merge` used in file root `merge=True`\n if merge:\n value = object_merge(existing, value)\n else:\n # `dynaconf_merge` may be used within the key structure\n # Or merge_enabled is set to True\n value = self._merge_before_set(existing, value)\n\n if isinstance(value, dict):\n value = DynaBox(value, box_settings=self)\n\n self.store[key] = value\n self._deleted.discard(key)\n super().__setattr__(key, value)\n\n # set loader identifiers so cleaners know which keys to clean\n if loader_identifier and loader_identifier in self.loaded_by_loaders:\n self.loaded_by_loaders[loader_identifier][key] = value\n elif loader_identifier:\n self.loaded_by_loaders[loader_identifier] = {key: value}\n elif loader_identifier is None:\n # if .set is called without loader identifier it becomes\n # a default value and goes away only when explicitly unset\n self._defaults[key] = value\n\n def update(\n self,\n data=None,\n loader_identifier=None,\n tomlfy=False,\n merge=False,\n is_secret=\"DeprecatedArgument\", # noqa\n dotted_lookup=empty,\n **kwargs,\n ):\n \"\"\"\n Update values in the current settings object without saving in stores::\n\n >>> from dynaconf import settings\n >>> print settings.NAME\n 'Bruno'\n >>> settings.update({'NAME': 'John'}, other_value=1)\n >>> print settings.NAME\n 'John'\n >>> print settings.OTHER_VALUE\n 1\n\n :param data: Data to be updated\n :param loader_identifier: Only to be used by custom loaders\n :param tomlfy: Bool define if value is parsed by toml (defaults False)\n :param merge: Bool define if existing nested data will be merged.\n :param kwargs: extra values to update\n :return: None\n \"\"\"\n data = data or {}\n data.update(kwargs)\n for key, value in data.items():\n self.set(\n key,\n value,\n loader_identifier=loader_identifier,\n tomlfy=tomlfy,\n merge=merge,\n dotted_lookup=dotted_lookup,\n )\n\n def _merge_before_set(self, existing, value):\n \"\"\"Merge the new value being set with the existing value before set\"\"\"\n global_merge = getattr(self, \"MERGE_ENABLED_FOR_DYNACONF\", False)\n if isinstance(value, dict):\n local_merge = value.pop(\n \"dynaconf_merge\", value.pop(\"dynaconf_merge_unique\", None)\n )\n if local_merge not in (True, False, None) and not value:\n # In case `dynaconf_merge:` holds value not boolean - ref #241\n value = local_merge\n\n if global_merge or local_merge:\n value = object_merge(existing, value)\n\n if isinstance(value, (list, tuple)):\n local_merge = (\n \"dynaconf_merge\" in value or \"dynaconf_merge_unique\" in value\n )\n if global_merge or local_merge:\n value = list(value)\n unique = False\n if local_merge:\n try:\n value.remove(\"dynaconf_merge\")\n except ValueError: # EAFP\n value.remove(\"dynaconf_merge_unique\")\n unique = True\n value = object_merge(existing, value, unique=unique)\n return value\n\n @property\n def loaders(self): # pragma: no cover\n \"\"\"Return available loaders\"\"\"\n if self.LOADERS_FOR_DYNACONF in (None, 0, \"0\", \"false\", False):\n return []\n\n if not self._loaders:\n self._loaders = self.LOADERS_FOR_DYNACONF\n\n return [importlib.import_module(loader) for loader in self._loaders]\n\n def reload(self, env=None, silent=None): # pragma: no cover\n \"\"\"Clean end Execute all loaders\"\"\"\n self.clean()\n self.execute_loaders(env, silent)\n\n def execute_loaders(\n self, env=None, silent=None, key=None, filename=None, loaders=None\n ):\n \"\"\"Execute all internal and registered loaders\n\n :param env: The environment to load\n :param silent: If loading errors is silenced\n :param key: if provided load a single key\n :param filename: optional custom filename to load\n :param loaders: optional list of loader modules\n \"\"\"\n if key is None:\n default_loader(self, self._defaults)\n\n env = (env or self.current_env).upper()\n silent = silent or self.SILENT_ERRORS_FOR_DYNACONF\n\n if loaders is None:\n self.pre_load(env, silent=silent, key=key)\n settings_loader(\n self, env=env, silent=silent, key=key, filename=filename\n )\n self.load_extra_yaml(env, silent, key) # DEPRECATED\n enable_external_loaders(self)\n\n loaders = self.loaders\n\n for core_loader in loaders:\n core_loader.load(self, env, silent=silent, key=key)\n\n self.load_includes(env, silent=silent, key=key)\n execute_hooks(\"post\", self, env, silent=silent, key=key)\n\n def pre_load(self, env, silent, key):\n \"\"\"Do we have any file to pre-load before main settings file?\"\"\"\n preloads = self.get(\"PRELOAD_FOR_DYNACONF\", [])\n if preloads:\n self.load_file(path=preloads, env=env, silent=silent, key=key)\n\n def load_includes(self, env, silent, key):\n \"\"\"Do we have any nested includes we need to process?\"\"\"\n includes = self.get(\"DYNACONF_INCLUDE\", [])\n includes.extend(ensure_a_list(self.get(\"INCLUDES_FOR_DYNACONF\")))\n if includes:\n self.load_file(path=includes, env=env, silent=silent, key=key)\n # ensure env vars are the last thing loaded after all includes\n last_loader = self.loaders and self.loaders[-1]\n if last_loader and last_loader == env_loader:\n last_loader.load(self, env, silent, key)\n\n def load_file(self, path=None, env=None, silent=True, key=None):\n \"\"\"Programmatically load files from ``path``.\n\n :param path: A single filename or a file list\n :param env: Which env to load from file (default current_env)\n :param silent: Should raise errors?\n :param key: Load a single key?\n \"\"\"\n env = (env or self.current_env).upper()\n files = ensure_a_list(path)\n if files:\n already_loaded = set()\n for _filename in files:\n\n if py_loader.try_to_load_from_py_module_name(\n obj=self, name=_filename, silent=True\n ):\n # if it was possible to load from module name\n # continue the loop.\n continue\n\n root_dir = str(self._root_path or os.getcwd())\n\n # Issue #494\n if (\n isinstance(_filename, Path)\n and str(_filename.parent) in root_dir\n ): # pragma: no cover\n filepath = str(_filename)\n else:\n filepath = os.path.join(root_dir, str(_filename))\n\n paths = [\n p\n for p in sorted(glob.glob(filepath))\n if \".local.\" not in p\n ]\n local_paths = [\n p for p in sorted(glob.glob(filepath)) if \".local.\" in p\n ]\n\n # Handle possible *.globs sorted alphanumeric\n for path in paths + local_paths:\n if path in already_loaded: # pragma: no cover\n continue\n settings_loader(\n obj=self,\n env=env,\n silent=silent,\n key=key,\n filename=path,\n )\n already_loaded.add(path)\n\n @property\n def _root_path(self):\n \"\"\"ROOT_PATH_FOR_DYNACONF or the path of first loaded file or '.'\"\"\"\n\n if self.ROOT_PATH_FOR_DYNACONF is not None:\n return self.ROOT_PATH_FOR_DYNACONF\n\n if self._loaded_files: # called once\n root_path = os.path.dirname(self._loaded_files[0])\n self.set(\"ROOT_PATH_FOR_DYNACONF\", root_path)\n return root_path\n\n def load_extra_yaml(self, env, silent, key):\n \"\"\"This is deprecated, kept for compat\n\n .. deprecated:: 1.0.0\n Use multiple settings or INCLUDES_FOR_DYNACONF files instead.\n \"\"\"\n if self.get(\"YAML\") is not None:\n warnings.warn(\n \"The use of YAML var is deprecated, please define multiple \"\n \"filepaths instead: \"\n \"e.g: SETTINGS_FILE_FOR_DYNACONF = \"\n \"'settings.py,settings.yaml,settings.toml' or \"\n \"INCLUDES_FOR_DYNACONF=['path.toml', 'folder/*']\"\n )\n yaml_loader.load(\n self,\n env=env,\n filename=self.find_file(self.get(\"YAML\")),\n silent=silent,\n key=key,\n )\n\n def path_for(self, *args):\n \"\"\"Path containing _root_path\"\"\"\n if args and args[0].startswith(os.path.sep):\n return os.path.join(*args)\n return os.path.join(self._root_path or os.getcwd(), *args)\n\n def find_file(self, *args, **kwargs):\n kwargs.setdefault(\"project_root\", self._root_path)\n kwargs.setdefault(\n \"skip_files\", self.get(\"SKIP_FILES_FOR_DYNACONF\", [])\n )\n return find_file(*args, **kwargs)\n\n def flag(self, key, env=None):\n \"\"\"Feature flagging system\n write flags to redis\n $ dynaconf write redis -s DASHBOARD=1 -e premiumuser\n meaning: Any premium user has DASHBOARD feature enabled\n\n In your program do::\n\n # premium user has access to dashboard?\n >>> if settings.flag('dashboard', 'premiumuser'):\n ... activate_dashboard()\n\n The value is ensured to be loaded fresh from redis server\n\n It also works with file settings but the recommended is redis\n as the data can be loaded once it is updated.\n\n :param key: The flag name\n :param env: The env to look for\n \"\"\"\n env = env or self.ENVVAR_PREFIX_FOR_DYNACONF or \"DYNACONF\"\n with self.using_env(env):\n value = self.get_fresh(key)\n return value is True or value in true_values\n\n def populate_obj(self, obj, keys=None, ignore=None):\n \"\"\"Given the `obj` populate it using self.store items.\n\n :param obj: An object to be populated, a class instance.\n :param keys: A list of keys to be included.\n :param ignore: A list of keys to be excluded.\n \"\"\"\n keys = keys or self.keys()\n for key in keys:\n key = upperfy(key)\n if ignore and key in ignore:\n continue\n value = self.get(key, empty)\n if value is not empty:\n setattr(obj, key, value)\n\n def dynaconf_clone(self):\n \"\"\"Clone the current settings object.\"\"\"\n try:\n return copy.deepcopy(self)\n except TypeError:\n # can't deepcopy settings object because of module object\n # being set as value in the settings dict\n new_data = self.to_dict(internal=True)\n new_data[\"dynaconf_skip_loaders\"] = True\n new_data[\"dynaconf_skip_validators\"] = True\n return Settings(**new_data)\n\n @property\n def dynaconf(self):\n \"\"\"A proxy to access internal methods and attributes\n\n Starting in 3.0.0 Dynaconf now allows first level lower case\n keys that are not reserved keyword, so this is a proxy to\n internal methods and attrs.\n \"\"\"\n\n class AttrProxy:\n def __init__(self, obj):\n self.obj = obj\n\n def __getattr__(self, name):\n return getattr(self.obj, f\"dynaconf_{name}\")\n\n return AttrProxy(self)\n\n @property\n def logger(self): # pragma: no cover\n \"\"\"backwards compatibility with pre 3.0 loaders\n In dynaconf 3.0.0 logger and debug messages has been removed.\n \"\"\"\n warnings.warn(\n \"logger and DEBUG messages has been removed on dynaconf 3.0.0\"\n )\n import logging # noqa\n\n return logging.getLogger(\"dynaconf\")\n\n def is_overridden(self, setting): # noqa\n \"\"\"This is to provide Django DJDT support: issue 382\"\"\"\n return False\n\n\n\"\"\"Upper case default settings\"\"\"\nUPPER_DEFAULT_SETTINGS = [k for k in dir(default_settings) if k.isupper()]\n\n\"\"\"Attributes created on Settings before 3.0.0\"\"\"\nRESERVED_ATTRS = (\n [\n item[0]\n for item in inspect.getmembers(LazySettings)\n if not item[0].startswith(\"__\")\n ]\n + [\n item[0]\n for item in inspect.getmembers(Settings)\n if not item[0].startswith(\"__\")\n ]\n + [\n \"_defaults\",\n \"_deleted\",\n \"_env_cache\",\n \"_fresh\",\n \"_kwargs\",\n \"_loaded_by_loaders\",\n \"_loaded_envs\",\n \"_loaded_hooks\",\n \"_loaded_py_modules\",\n \"_loaded_files\",\n \"_loaders\",\n \"_not_installed_warnings\",\n \"_store\",\n \"_warn_dynaconf_global_settings\",\n \"_should_load_dotenv\",\n \"environ\",\n \"SETTINGS_MODULE\",\n \"filter_strategy\",\n \"validators\",\n \"_validate_only\",\n \"_validate_exclude\",\n \"_validate_only_current_env\",\n ]\n)\n", "path": "dynaconf/base.py" } ]
[ { "content": "from __future__ import annotations\n\nimport copy\nimport glob\nimport importlib\nimport inspect\nimport os\nimport warnings\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom contextlib import suppress\nfrom pathlib import Path\n\nfrom dynaconf import default_settings\nfrom dynaconf.loaders import default_loader\nfrom dynaconf.loaders import enable_external_loaders\nfrom dynaconf.loaders import env_loader\nfrom dynaconf.loaders import execute_hooks\nfrom dynaconf.loaders import py_loader\nfrom dynaconf.loaders import settings_loader\nfrom dynaconf.loaders import yaml_loader\nfrom dynaconf.utils import BANNER\nfrom dynaconf.utils import compat_kwargs\nfrom dynaconf.utils import ensure_a_list\nfrom dynaconf.utils import missing\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils import recursively_evaluate_lazy_format\nfrom dynaconf.utils import RENAMED_VARS\nfrom dynaconf.utils import upperfy\nfrom dynaconf.utils.boxing import DynaBox\nfrom dynaconf.utils.files import find_file\nfrom dynaconf.utils.functional import empty\nfrom dynaconf.utils.functional import LazyObject\nfrom dynaconf.utils.parse_conf import converters\nfrom dynaconf.utils.parse_conf import get_converter\nfrom dynaconf.utils.parse_conf import parse_conf_data\nfrom dynaconf.utils.parse_conf import true_values\nfrom dynaconf.validator import ValidatorList\nfrom dynaconf.vendor.box.box_list import BoxList\n\n\nclass LazySettings(LazyObject):\n \"\"\"Loads settings lazily from multiple sources::\n\n settings = Dynaconf(\n settings_files=[\"settings.toml\"], # path/glob\n environments=True, # activate layered environments\n envvar_prefix=\"MYAPP\", # `export MYAPP_FOO=bar`\n env_switcher=\"MYAPP_MODE\", # `export MYAPP_MODE=production`\n load_dotenv=True, # read a .env file\n )\n\n More options available on https://www.dynaconf.com/configuration/\n \"\"\"\n\n def __init__(self, wrapped=None, **kwargs):\n \"\"\"\n handle initialization for the customization cases\n\n :param wrapped: a deepcopy of this object will be wrapped (issue #596)\n :param kwargs: values that overrides default_settings\n \"\"\"\n\n self._warn_dynaconf_global_settings = kwargs.pop(\n \"warn_dynaconf_global_settings\", None\n ) # in 3.0.0 global settings is deprecated\n\n self.__resolve_config_aliases(kwargs)\n compat_kwargs(kwargs)\n self._kwargs = kwargs\n super().__init__()\n\n if wrapped:\n if self._django_override:\n # This fixes django issue #596\n self._wrapped = copy.deepcopy(wrapped)\n else:\n self._wrapped = wrapped\n\n def __resolve_config_aliases(self, kwargs):\n \"\"\"takes aliases for _FOR_DYNACONF configurations\n\n e.g: ROOT_PATH='/' is transformed into `ROOT_PATH_FOR_DYNACONF`\n \"\"\"\n\n mispells = {\n \"settings_files\": \"settings_file\",\n \"SETTINGS_FILES\": \"SETTINGS_FILE\",\n \"environment\": \"environments\",\n \"ENVIRONMENT\": \"ENVIRONMENTS\",\n }\n for misspell, correct in mispells.items():\n if misspell in kwargs:\n kwargs[correct] = kwargs.pop(misspell)\n\n for_dynaconf_keys = {\n key\n for key in UPPER_DEFAULT_SETTINGS\n if key.endswith(\"_FOR_DYNACONF\")\n }\n aliases = {\n key.upper()\n for key in kwargs\n if f\"{key.upper()}_FOR_DYNACONF\" in for_dynaconf_keys\n }\n for alias in aliases:\n value = kwargs.pop(alias, empty)\n if value is empty:\n value = kwargs.pop(alias.lower())\n kwargs[f\"{alias}_FOR_DYNACONF\"] = value\n\n def __getattr__(self, name):\n \"\"\"Allow getting keys from self.store using dot notation\"\"\"\n if self._wrapped is empty:\n self._setup()\n if name in self._wrapped._deleted: # noqa\n raise AttributeError(\n f\"Attribute {name} was deleted, \" \"or belongs to different env\"\n )\n\n if name not in RESERVED_ATTRS:\n lowercase_mode = self._kwargs.get(\n \"LOWERCASE_READ_FOR_DYNACONF\",\n default_settings.LOWERCASE_READ_FOR_DYNACONF,\n )\n if lowercase_mode is True:\n name = name.upper()\n\n if (\n name.isupper()\n and (\n self._wrapped._fresh\n or name in self._wrapped.FRESH_VARS_FOR_DYNACONF\n )\n and name not in UPPER_DEFAULT_SETTINGS\n ):\n return self._wrapped.get_fresh(name)\n value = getattr(self._wrapped, name)\n if name not in RESERVED_ATTRS:\n return recursively_evaluate_lazy_format(value, self)\n return value\n\n def __call__(self, *args, **kwargs):\n \"\"\"Allow direct call of settings('val')\n in place of settings.get('val')\n \"\"\"\n return self.get(*args, **kwargs)\n\n @property\n def _should_load_dotenv(self):\n \"\"\"Chicken and egg problem, we must manually check envvar\n before deciding if we are loading envvars :)\"\"\"\n _environ_load_dotenv = parse_conf_data(\n os.environ.get(\"LOAD_DOTENV_FOR_DYNACONF\"), tomlfy=True\n )\n return self._kwargs.get(\"load_dotenv\", _environ_load_dotenv)\n\n def _setup(self):\n \"\"\"Initial setup, run once.\"\"\"\n\n if self._warn_dynaconf_global_settings:\n warnings.warn(\n \"Usage of `from dynaconf import settings` is now \"\n \"DEPRECATED in 3.0.0+. You are encouraged to change it to \"\n \"your own instance e.g: `settings = Dynaconf(*options)`\",\n DeprecationWarning,\n )\n\n default_settings.reload(self._should_load_dotenv)\n environment_variable = self._kwargs.get(\n \"ENVVAR_FOR_DYNACONF\", default_settings.ENVVAR_FOR_DYNACONF\n )\n settings_module = os.environ.get(environment_variable)\n self._wrapped = Settings(\n settings_module=settings_module, **self._kwargs\n )\n\n def configure(self, settings_module=None, **kwargs):\n \"\"\"\n Allows user to reconfigure settings object passing a new settings\n module or separated kwargs\n\n :param settings_module: defines the settings file\n :param kwargs: override default settings\n \"\"\"\n default_settings.reload(self._should_load_dotenv)\n environment_var = self._kwargs.get(\n \"ENVVAR_FOR_DYNACONF\", default_settings.ENVVAR_FOR_DYNACONF\n )\n settings_module = settings_module or os.environ.get(environment_var)\n compat_kwargs(kwargs)\n kwargs.update(self._kwargs)\n self._wrapped = Settings(settings_module=settings_module, **kwargs)\n\n @property\n def configured(self):\n \"\"\"If wrapped is configured\"\"\"\n return self._wrapped is not empty\n\n\nclass Settings:\n \"\"\"\n Common logic for settings whether set by a module or by the user.\n \"\"\"\n\n dynaconf_banner = BANNER\n _store = DynaBox()\n\n def __init__(self, settings_module=None, **kwargs): # pragma: no cover\n \"\"\"Execute loaders and custom initialization\n\n :param settings_module: defines the settings file\n :param kwargs: override default settings\n \"\"\"\n self._fresh = False\n self._loaded_envs = []\n self._loaded_hooks = defaultdict(dict)\n self._loaded_py_modules = []\n self._loaded_files = []\n self._deleted = set()\n self._store = DynaBox(box_settings=self)\n self._env_cache = {}\n self._loaded_by_loaders = {}\n self._loaders = []\n self._defaults = DynaBox(box_settings=self)\n self.environ = os.environ\n self.SETTINGS_MODULE = None\n self.filter_strategy = kwargs.get(\"filter_strategy\", None)\n self._not_installed_warnings = []\n self._validate_only = kwargs.pop(\"validate_only\", None)\n self._validate_exclude = kwargs.pop(\"validate_exclude\", None)\n self._validate_only_current_env = kwargs.pop(\n \"validate_only_current_env\", False\n )\n\n self.validators = ValidatorList(\n self, validators=kwargs.pop(\"validators\", None)\n )\n\n compat_kwargs(kwargs)\n if settings_module:\n self.set(\"SETTINGS_FILE_FOR_DYNACONF\", settings_module)\n for key, value in kwargs.items():\n self.set(key, value)\n # execute loaders only after setting defaults got from kwargs\n self._defaults = kwargs\n\n # The following flags are used for when copying of settings is done\n skip_loaders = kwargs.get(\"dynaconf_skip_loaders\", False)\n skip_validators = kwargs.get(\"dynaconf_skip_validators\", False)\n\n if not skip_loaders:\n self.execute_loaders()\n\n if not skip_validators:\n self.validators.validate(\n only=self._validate_only,\n exclude=self._validate_exclude,\n only_current_env=self._validate_only_current_env,\n )\n\n def __call__(self, *args, **kwargs):\n \"\"\"Allow direct call of `settings('val')`\n in place of `settings.get('val')`\n \"\"\"\n return self.get(*args, **kwargs)\n\n def __setattr__(self, name, value):\n \"\"\"Allow `settings.FOO = 'value'` while keeping internal attrs.\"\"\"\n\n if name in RESERVED_ATTRS:\n super().__setattr__(name, value)\n else:\n self.set(name, value)\n\n def __delattr__(self, name):\n \"\"\"stores reference in `_deleted` for proper error management\"\"\"\n self._deleted.add(name)\n if hasattr(self, name):\n super().__delattr__(name)\n\n def __contains__(self, item):\n \"\"\"Respond to `item in settings`\"\"\"\n return item.upper() in self.store or item.lower() in self.store\n\n def __getattribute__(self, name):\n if name not in RESERVED_ATTRS and name not in UPPER_DEFAULT_SETTINGS:\n with suppress(KeyError):\n # self._store has Lazy values already evaluated\n if (\n name.islower()\n and self._store.get(\"LOWERCASE_READ_FOR_DYNACONF\", empty)\n is False\n ):\n # only matches exact casing, first levels always upper\n return self._store.to_dict()[name]\n # perform lookups for upper, and casefold\n return self._store[name]\n # in case of RESERVED_ATTRS or KeyError above, keep default behaviour\n return super().__getattribute__(name)\n\n def __getitem__(self, item):\n \"\"\"Allow getting variables as dict keys `settings['KEY']`\"\"\"\n value = self.get(item, default=empty)\n if value is empty:\n raise KeyError(f\"{item} does not exist\")\n return value\n\n def __setitem__(self, key, value):\n \"\"\"Allow `settings['KEY'] = 'value'`\"\"\"\n self.set(key, value)\n\n @property\n def store(self):\n \"\"\"Gets internal storage\"\"\"\n return self._store\n\n def __dir__(self):\n \"\"\"Enable auto-complete for code editors\"\"\"\n return (\n RESERVED_ATTRS\n + [k.lower() for k in self.keys()]\n + list(self.keys())\n )\n\n def __iter__(self):\n \"\"\"Redirects to store object\"\"\"\n yield from self._store\n\n def items(self):\n \"\"\"Redirects to store object\"\"\"\n return self._store.items()\n\n def keys(self):\n \"\"\"Redirects to store object\"\"\"\n return self.store.keys()\n\n def values(self):\n \"\"\"Redirects to store object\"\"\"\n return self.store.values()\n\n def setdefault(self, item, default, apply_default_on_none=False):\n \"\"\"Returns value if exists or set it as the given default\n\n apply_default_on_none: if True, default is set when value is None\n \"\"\"\n value = self.get(item, empty)\n\n # Yaml loader reads empty values as None, would we apply defaults?\n global_apply_default = (\n self.get(\"APPLY_DEFAULT_ON_NONE_FOR_DYNACONF\") is not None\n )\n apply_default = default is not empty and (\n value is empty\n or (\n value is None\n and (\n apply_default_on_none is True\n or global_apply_default is True\n )\n )\n )\n\n if apply_default:\n self.set(\n item,\n default,\n loader_identifier=\"setdefault\",\n tomlfy=True,\n )\n return default\n\n return value\n\n def as_dict(self, env=None, internal=False):\n \"\"\"Returns a dictionary with set key and values.\n\n :param env: Str env name, default self.current_env `DEVELOPMENT`\n :param internal: bool - should include dynaconf internal vars?\n \"\"\"\n ctx_mgr = suppress() if env is None else self.using_env(env)\n with ctx_mgr:\n data = self.store.to_dict().copy()\n # if not internal remove internal settings\n if not internal:\n for name in UPPER_DEFAULT_SETTINGS:\n data.pop(name, None)\n return data\n\n to_dict = as_dict # backwards compatibility\n\n def _dotted_get(\n self, dotted_key, default=None, parent=None, cast=None, **kwargs\n ):\n \"\"\"\n Perform dotted key lookups and keep track of where we are.\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param parent: Is there a pre-loaded parent in a nested data?\n \"\"\"\n split_key = dotted_key.split(\".\")\n name, keys = split_key[0], split_key[1:]\n result = self.get(name, default=default, parent=parent, **kwargs)\n\n # If we've reached the end, or parent key not found, then return result\n if not keys or result == default:\n if cast and cast in converters:\n return get_converter(cast, result, box_settings=self)\n elif cast is True:\n return parse_conf_data(result, tomlfy=True, box_settings=self)\n return result\n\n # If we've still got key elements to traverse, let's do that.\n return self._dotted_get(\n \".\".join(keys), default=default, parent=result, cast=cast, **kwargs\n )\n\n def get(\n self,\n key,\n default=None,\n cast=None,\n fresh=False,\n dotted_lookup=empty,\n parent=None,\n ):\n \"\"\"\n Get a value from settings store, this is the preferred way to access::\n\n >>> from dynaconf import settings\n >>> settings.get('KEY')\n\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param cast: Should cast in to @int, @float, @bool or @json ?\n :param fresh: Should reload from loaders store before access?\n :param dotted_lookup: Should perform dotted-path lookup?\n :param parent: Is there a pre-loaded parent in a nested data?\n :return: The value if found, default or None\n \"\"\"\n nested_sep = self._store.get(\"NESTED_SEPARATOR_FOR_DYNACONF\")\n if nested_sep and nested_sep in key:\n # turn FOO__bar__ZAZ in `FOO.bar.ZAZ`\n key = key.replace(nested_sep, \".\")\n\n if dotted_lookup is empty:\n dotted_lookup = self._store.get(\"DOTTED_LOOKUP_FOR_DYNACONF\")\n\n if \".\" in key and dotted_lookup:\n return self._dotted_get(\n dotted_key=key,\n default=default,\n cast=cast,\n fresh=fresh,\n parent=parent,\n )\n\n if default is not None:\n # default values should behave exactly Dynaconf parsed values\n if isinstance(default, list):\n default = BoxList(default)\n elif isinstance(default, dict):\n default = DynaBox(default)\n\n key = upperfy(key)\n if key in self._deleted:\n return default\n\n if (\n fresh\n or self._fresh\n or key in getattr(self, \"FRESH_VARS_FOR_DYNACONF\", ())\n ) and key not in UPPER_DEFAULT_SETTINGS:\n self.unset(key)\n self.execute_loaders(key=key)\n\n data = (parent or self.store).get(key, default)\n if cast:\n data = get_converter(cast, data, box_settings=self)\n return data\n\n def exists(self, key, fresh=False):\n \"\"\"Check if key exists\n\n :param key: the name of setting variable\n :param fresh: if key should be taken from source directly\n :return: Boolean\n \"\"\"\n key = upperfy(key)\n if key in self._deleted:\n return False\n return self.get(key, fresh=fresh, default=missing) is not missing\n\n def get_fresh(self, key, default=None, cast=None):\n \"\"\"This is a shortcut to `get(key, fresh=True)`. always reload from\n loaders store before getting the var.\n\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param cast: Should cast in to @int, @float, @bool or @json ?\n :return: The value if found, default or None\n \"\"\"\n return self.get(key, default=default, cast=cast, fresh=True)\n\n def get_environ(self, key, default=None, cast=None):\n \"\"\"Get value from environment variable using os.environ.get\n\n :param key: The name of the setting value, will always be upper case\n :param default: In case of not found it will be returned\n :param cast: Should cast in to @int, @float, @bool or @json ?\n or cast must be true to use cast inference\n :return: The value if found, default or None\n \"\"\"\n key = upperfy(key)\n data = self.environ.get(key, default)\n if data:\n if cast in converters:\n data = get_converter(cast, data, box_settings=self)\n elif cast is True:\n data = parse_conf_data(data, tomlfy=True, box_settings=self)\n return data\n\n def exists_in_environ(self, key):\n \"\"\"Return True if env variable is exported\"\"\"\n return upperfy(key) in self.environ\n\n def as_bool(self, key):\n \"\"\"Partial method for get with bool cast\"\"\"\n return self.get(key, cast=\"@bool\")\n\n def as_int(self, key):\n \"\"\"Partial method for get with int cast\"\"\"\n return self.get(key, cast=\"@int\")\n\n def as_float(self, key):\n \"\"\"Partial method for get with float cast\"\"\"\n return self.get(key, cast=\"@float\")\n\n def as_json(self, key):\n \"\"\"Partial method for get with json cast\"\"\"\n return self.get(key, cast=\"@json\")\n\n @property\n def loaded_envs(self):\n \"\"\"Get or create internal loaded envs list\"\"\"\n if not self._loaded_envs:\n self._loaded_envs = []\n return self._loaded_envs\n\n @loaded_envs.setter\n def loaded_envs(self, value):\n \"\"\"Setter for env list\"\"\"\n self._loaded_envs = value\n\n # compat\n loaded_namespaces = loaded_envs\n\n @property\n def loaded_by_loaders(self):\n \"\"\"Gets the internal mapping of LOADER -> values\"\"\"\n return self._loaded_by_loaders\n\n def from_env(self, env=\"\", keep=False, **kwargs):\n \"\"\"Return a new isolated settings object pointing to specified env.\n\n Example of settings.toml::\n\n [development]\n message = 'This is in dev'\n [other]\n message = 'this is in other env'\n\n Program::\n\n >>> from dynaconf import settings\n >>> print(settings.MESSAGE)\n 'This is in dev'\n >>> print(settings.from_env('other').MESSAGE)\n 'This is in other env'\n # The existing settings object remains the same.\n >>> print(settings.MESSAGE)\n 'This is in dev'\n\n Arguments:\n env {str} -- Env to load (development, production, custom)\n\n Keyword Arguments:\n keep {bool} -- Keep pre-existing values (default: {False})\n kwargs {dict} -- Passed directly to new instance.\n \"\"\"\n cache_key = f\"{env}_{keep}_{kwargs}\"\n if cache_key in self._env_cache:\n return self._env_cache[cache_key]\n\n new_data = {\n key: self.get(key)\n for key in UPPER_DEFAULT_SETTINGS\n if key not in RENAMED_VARS\n }\n\n if self.filter_strategy:\n # Retain the filtering strategy when switching environments\n new_data[\"filter_strategy\"] = self.filter_strategy\n\n # This is here for backwards compatibility\n # To be removed on 4.x.x\n default_settings_paths = self.get(\"default_settings_paths\")\n if default_settings_paths: # pragma: no cover\n new_data[\"default_settings_paths\"] = default_settings_paths\n\n if keep:\n # keep existing values from current env\n new_data.update(\n {\n key: value\n for key, value in self.store.to_dict().copy().items()\n if key.isupper() and key not in RENAMED_VARS\n }\n )\n\n new_data.update(kwargs)\n new_data[\"FORCE_ENV_FOR_DYNACONF\"] = env\n new_settings = LazySettings(**new_data)\n self._env_cache[cache_key] = new_settings\n return new_settings\n\n @contextmanager\n def using_env(self, env, clean=True, silent=True, filename=None):\n \"\"\"\n This context manager allows the contextual use of a different env\n Example of settings.toml::\n\n [development]\n message = 'This is in dev'\n [other]\n message = 'this is in other env'\n\n Program::\n\n >>> from dynaconf import settings\n >>> print settings.MESSAGE\n 'This is in dev'\n >>> with settings.using_env('OTHER'):\n ... print settings.MESSAGE\n 'this is in other env'\n\n :param env: Upper case name of env without any _\n :param clean: If preloaded vars should be cleaned\n :param silent: Silence errors\n :param filename: Custom filename to load (optional)\n :return: context\n \"\"\"\n try:\n self.setenv(env, clean=clean, silent=silent, filename=filename)\n yield\n finally:\n if env.lower() != self.ENV_FOR_DYNACONF.lower():\n del self.loaded_envs[-1]\n self.setenv(self.current_env, clean=clean, filename=filename)\n\n # compat\n using_namespace = using_env\n\n @contextmanager\n def fresh(self):\n \"\"\"\n this context manager force the load of a key direct from the store::\n\n $ export DYNACONF_VALUE='Original'\n >>> from dynaconf import settings\n >>> print settings.VALUE\n 'Original'\n $ export DYNACONF_VALUE='Changed Value'\n >>> print settings.VALUE # will not be reloaded from env vars\n 'Original\n >>> with settings.fresh(): # inside this context all is reloaded\n ... print settings.VALUE\n 'Changed Value'\n\n an alternative is using `settings.get_fresh(key)`\n\n :return: context\n \"\"\"\n\n self._fresh = True\n yield\n self._fresh = False\n\n @property\n def current_env(self):\n \"\"\"Return the current active env\"\"\"\n\n if self.ENVIRONMENTS_FOR_DYNACONF is False:\n return self.MAIN_ENV_FOR_DYNACONF.lower()\n\n if self.FORCE_ENV_FOR_DYNACONF is not None:\n self.ENV_FOR_DYNACONF = self.FORCE_ENV_FOR_DYNACONF\n return self.FORCE_ENV_FOR_DYNACONF\n\n try:\n return self.loaded_envs[-1]\n except IndexError:\n return self.ENV_FOR_DYNACONF\n\n # compat\n current_namespace = current_env\n\n @property\n def settings_module(self):\n \"\"\"Gets SETTINGS_MODULE variable\"\"\"\n settings_module = parse_conf_data(\n os.environ.get(\n self.ENVVAR_FOR_DYNACONF, self.SETTINGS_FILE_FOR_DYNACONF\n ),\n tomlfy=True,\n box_settings=self,\n )\n if settings_module != getattr(self, \"SETTINGS_MODULE\", None):\n self.set(\"SETTINGS_MODULE\", settings_module)\n\n # This is for backewards compatibility, to be removed on 4.x.x\n if not self.SETTINGS_MODULE and self.get(\"default_settings_paths\"):\n self.SETTINGS_MODULE = self.get(\"default_settings_paths\")\n\n return self.SETTINGS_MODULE\n\n # Backwards compatibility see #169\n settings_file = settings_module\n\n def setenv(self, env=None, clean=True, silent=True, filename=None):\n \"\"\"Used to interactively change the env\n Example of settings.toml::\n\n [development]\n message = 'This is in dev'\n [other]\n message = 'this is in other env'\n\n Program::\n\n >>> from dynaconf import settings\n >>> print settings.MESSAGE\n 'This is in dev'\n >>> with settings.using_env('OTHER'):\n ... print settings.MESSAGE\n 'this is in other env'\n\n :param env: Upper case name of env without any _\n :param clean: If preloaded vars should be cleaned\n :param silent: Silence errors\n :param filename: Custom filename to load (optional)\n :return: context\n \"\"\"\n env = env or self.ENV_FOR_DYNACONF\n\n if not isinstance(env, str):\n raise AttributeError(\"env should be a string\")\n\n env = env.upper()\n\n if env != self.ENV_FOR_DYNACONF:\n self.loaded_envs.append(env)\n else:\n self.loaded_envs = []\n\n if clean:\n self.clean(env=env)\n self.execute_loaders(env=env, silent=silent, filename=filename)\n\n # compat\n namespace = setenv\n\n def clean(self, *args, **kwargs):\n \"\"\"Clean all loaded values to reload when switching envs\"\"\"\n for key in list(self.store.keys()):\n self.unset(key)\n\n def unset(self, key, force=False):\n \"\"\"Unset on all references\n\n :param key: The key to be unset\n :param force: Bypass default checks and force unset\n \"\"\"\n key = upperfy(key.strip())\n if (\n key not in UPPER_DEFAULT_SETTINGS\n and key not in self._defaults\n or force\n ):\n with suppress(KeyError, AttributeError):\n # AttributeError can happen when a LazyValue consumes\n # a previously deleted key\n delattr(self, key)\n del self.store[key]\n\n def unset_all(self, keys, force=False): # pragma: no cover\n \"\"\"Unset based on a list of keys\n\n :param keys: a list of keys\n :param force: Bypass default checks and force unset\n \"\"\"\n for key in keys:\n self.unset(key, force=force)\n\n def _dotted_set(self, dotted_key, value, tomlfy=False, **kwargs):\n \"\"\"Sets dotted keys as nested dictionaries.\n\n Dotted set will always reassign the value, to merge use `@merge` token\n\n Arguments:\n dotted_key {str} -- A traversal name e.g: foo.bar.zaz\n value {Any} -- The value to set to the nested value.\n\n Keyword Arguments:\n tomlfy {bool} -- Perform toml parsing (default: {False})\n \"\"\"\n\n split_keys = dotted_key.split(\".\")\n existing_data = self.get(split_keys[0], {})\n new_data = tree = DynaBox(box_settings=self)\n\n for k in split_keys[:-1]:\n tree = tree.setdefault(k, {})\n\n value = parse_conf_data(value, tomlfy=tomlfy, box_settings=self)\n tree[split_keys[-1]] = value\n\n if existing_data:\n new_data = object_merge(\n old=DynaBox({split_keys[0]: existing_data}),\n new=new_data,\n full_path=split_keys,\n )\n self.update(data=new_data, tomlfy=tomlfy, **kwargs)\n\n def set(\n self,\n key,\n value,\n loader_identifier=None,\n tomlfy=False,\n dotted_lookup=empty,\n is_secret=\"DeprecatedArgument\", # noqa\n merge=False,\n ):\n \"\"\"Set a value storing references for the loader\n\n :param key: The key to store\n :param value: The value to store\n :param loader_identifier: Optional loader name e.g: toml, yaml etc.\n :param tomlfy: Bool define if value is parsed by toml (defaults False)\n :param merge: Bool define if existing nested data will be merged.\n \"\"\"\n if dotted_lookup is empty:\n dotted_lookup = self.get(\"DOTTED_LOOKUP_FOR_DYNACONF\")\n\n nested_sep = self.get(\"NESTED_SEPARATOR_FOR_DYNACONF\")\n if nested_sep and nested_sep in key:\n # turn FOO__bar__ZAZ in `FOO.bar.ZAZ`\n key = key.replace(nested_sep, \".\")\n\n if \".\" in key and dotted_lookup is True:\n return self._dotted_set(\n key, value, loader_identifier=loader_identifier, tomlfy=tomlfy\n )\n\n value = parse_conf_data(value, tomlfy=tomlfy, box_settings=self)\n key = upperfy(key.strip())\n existing = getattr(self, key, None)\n\n if getattr(value, \"_dynaconf_del\", None):\n # just in case someone use a `@del` in a first level var.\n self.unset(key, force=True)\n return\n\n if getattr(value, \"_dynaconf_reset\", False): # pragma: no cover\n # just in case someone use a `@reset` in a first level var.\n value = value.unwrap()\n\n if getattr(value, \"_dynaconf_merge_unique\", False):\n # just in case someone use a `@merge_unique` in a first level var\n if existing:\n value = object_merge(existing, value.unwrap(), unique=True)\n else:\n value = value.unwrap()\n\n if getattr(value, \"_dynaconf_merge\", False):\n # just in case someone use a `@merge` in a first level var\n if existing:\n value = object_merge(existing, value.unwrap())\n else:\n value = value.unwrap()\n\n if existing is not None and existing != value:\n # `dynaconf_merge` used in file root `merge=True`\n if merge:\n value = object_merge(existing, value)\n else:\n # `dynaconf_merge` may be used within the key structure\n # Or merge_enabled is set to True\n value = self._merge_before_set(existing, value)\n\n if isinstance(value, dict):\n value = DynaBox(value, box_settings=self)\n\n self.store[key] = value\n self._deleted.discard(key)\n super().__setattr__(key, value)\n\n # set loader identifiers so cleaners know which keys to clean\n if loader_identifier and loader_identifier in self.loaded_by_loaders:\n self.loaded_by_loaders[loader_identifier][key] = value\n elif loader_identifier:\n self.loaded_by_loaders[loader_identifier] = {key: value}\n elif loader_identifier is None:\n # if .set is called without loader identifier it becomes\n # a default value and goes away only when explicitly unset\n self._defaults[key] = value\n\n def update(\n self,\n data=None,\n loader_identifier=None,\n tomlfy=False,\n merge=False,\n is_secret=\"DeprecatedArgument\", # noqa\n dotted_lookup=empty,\n **kwargs,\n ):\n \"\"\"\n Update values in the current settings object without saving in stores::\n\n >>> from dynaconf import settings\n >>> print settings.NAME\n 'Bruno'\n >>> settings.update({'NAME': 'John'}, other_value=1)\n >>> print settings.NAME\n 'John'\n >>> print settings.OTHER_VALUE\n 1\n\n :param data: Data to be updated\n :param loader_identifier: Only to be used by custom loaders\n :param tomlfy: Bool define if value is parsed by toml (defaults False)\n :param merge: Bool define if existing nested data will be merged.\n :param kwargs: extra values to update\n :return: None\n \"\"\"\n data = data or {}\n data.update(kwargs)\n for key, value in data.items():\n self.set(\n key,\n value,\n loader_identifier=loader_identifier,\n tomlfy=tomlfy,\n merge=merge,\n dotted_lookup=dotted_lookup,\n )\n\n def _merge_before_set(self, existing, value):\n \"\"\"Merge the new value being set with the existing value before set\"\"\"\n global_merge = getattr(self, \"MERGE_ENABLED_FOR_DYNACONF\", False)\n if isinstance(value, dict):\n local_merge = value.pop(\n \"dynaconf_merge\", value.pop(\"dynaconf_merge_unique\", None)\n )\n if local_merge not in (True, False, None) and not value:\n # In case `dynaconf_merge:` holds value not boolean - ref #241\n value = local_merge\n\n if global_merge or local_merge:\n value = object_merge(existing, value)\n\n if isinstance(value, (list, tuple)):\n local_merge = (\n \"dynaconf_merge\" in value or \"dynaconf_merge_unique\" in value\n )\n if global_merge or local_merge:\n value = list(value)\n unique = False\n if local_merge:\n try:\n value.remove(\"dynaconf_merge\")\n except ValueError: # EAFP\n value.remove(\"dynaconf_merge_unique\")\n unique = True\n value = object_merge(existing, value, unique=unique)\n return value\n\n @property\n def loaders(self): # pragma: no cover\n \"\"\"Return available loaders\"\"\"\n if self.LOADERS_FOR_DYNACONF in (None, 0, \"0\", \"false\", False):\n return []\n\n if not self._loaders:\n self._loaders = self.LOADERS_FOR_DYNACONF\n\n return [importlib.import_module(loader) for loader in self._loaders]\n\n def reload(self, env=None, silent=None): # pragma: no cover\n \"\"\"Clean end Execute all loaders\"\"\"\n self.clean()\n self.execute_loaders(env, silent)\n\n def execute_loaders(\n self, env=None, silent=None, key=None, filename=None, loaders=None\n ):\n \"\"\"Execute all internal and registered loaders\n\n :param env: The environment to load\n :param silent: If loading errors is silenced\n :param key: if provided load a single key\n :param filename: optional custom filename to load\n :param loaders: optional list of loader modules\n \"\"\"\n if key is None:\n default_loader(self, self._defaults)\n\n env = (env or self.current_env).upper()\n silent = silent or self.SILENT_ERRORS_FOR_DYNACONF\n\n if loaders is None:\n self.pre_load(env, silent=silent, key=key)\n settings_loader(\n self, env=env, silent=silent, key=key, filename=filename\n )\n self.load_extra_yaml(env, silent, key) # DEPRECATED\n enable_external_loaders(self)\n\n loaders = self.loaders\n\n for core_loader in loaders:\n core_loader.load(self, env, silent=silent, key=key)\n\n self.load_includes(env, silent=silent, key=key)\n execute_hooks(\"post\", self, env, silent=silent, key=key)\n\n def pre_load(self, env, silent, key):\n \"\"\"Do we have any file to pre-load before main settings file?\"\"\"\n preloads = self.get(\"PRELOAD_FOR_DYNACONF\", [])\n if preloads:\n self.load_file(path=preloads, env=env, silent=silent, key=key)\n\n def load_includes(self, env, silent, key):\n \"\"\"Do we have any nested includes we need to process?\"\"\"\n includes = self.get(\"DYNACONF_INCLUDE\", [])\n includes.extend(ensure_a_list(self.get(\"INCLUDES_FOR_DYNACONF\")))\n if includes:\n self.load_file(path=includes, env=env, silent=silent, key=key)\n # ensure env vars are the last thing loaded after all includes\n last_loader = self.loaders and self.loaders[-1]\n if last_loader and last_loader == env_loader:\n last_loader.load(self, env, silent, key)\n\n def load_file(self, path=None, env=None, silent=True, key=None):\n \"\"\"Programmatically load files from ``path``.\n\n :param path: A single filename or a file list\n :param env: Which env to load from file (default current_env)\n :param silent: Should raise errors?\n :param key: Load a single key?\n \"\"\"\n env = (env or self.current_env).upper()\n files = ensure_a_list(path)\n if files:\n already_loaded = set()\n for _filename in files:\n\n if py_loader.try_to_load_from_py_module_name(\n obj=self, name=_filename, silent=True\n ):\n # if it was possible to load from module name\n # continue the loop.\n continue\n\n root_dir = str(self._root_path or os.getcwd())\n\n # Issue #494\n if (\n isinstance(_filename, Path)\n and str(_filename.parent) in root_dir\n ): # pragma: no cover\n filepath = str(_filename)\n else:\n filepath = os.path.join(root_dir, str(_filename))\n\n paths = [\n p\n for p in sorted(glob.glob(filepath))\n if \".local.\" not in p\n ]\n local_paths = [\n p for p in sorted(glob.glob(filepath)) if \".local.\" in p\n ]\n\n # Handle possible *.globs sorted alphanumeric\n for path in paths + local_paths:\n if path in already_loaded: # pragma: no cover\n continue\n settings_loader(\n obj=self,\n env=env,\n silent=silent,\n key=key,\n filename=path,\n )\n already_loaded.add(path)\n\n @property\n def _root_path(self):\n \"\"\"ROOT_PATH_FOR_DYNACONF or the path of first loaded file or '.'\"\"\"\n\n if self.ROOT_PATH_FOR_DYNACONF is not None:\n return self.ROOT_PATH_FOR_DYNACONF\n\n if self._loaded_files: # called once\n root_path = os.path.dirname(self._loaded_files[0])\n self.set(\"ROOT_PATH_FOR_DYNACONF\", root_path)\n return root_path\n\n def load_extra_yaml(self, env, silent, key):\n \"\"\"This is deprecated, kept for compat\n\n .. deprecated:: 1.0.0\n Use multiple settings or INCLUDES_FOR_DYNACONF files instead.\n \"\"\"\n if self.get(\"YAML\") is not None:\n warnings.warn(\n \"The use of YAML var is deprecated, please define multiple \"\n \"filepaths instead: \"\n \"e.g: SETTINGS_FILE_FOR_DYNACONF = \"\n \"'settings.py,settings.yaml,settings.toml' or \"\n \"INCLUDES_FOR_DYNACONF=['path.toml', 'folder/*']\"\n )\n yaml_loader.load(\n self,\n env=env,\n filename=self.find_file(self.get(\"YAML\")),\n silent=silent,\n key=key,\n )\n\n def path_for(self, *args):\n \"\"\"Path containing _root_path\"\"\"\n if args and args[0].startswith(os.path.sep):\n return os.path.join(*args)\n return os.path.join(self._root_path or os.getcwd(), *args)\n\n def find_file(self, *args, **kwargs):\n kwargs.setdefault(\"project_root\", self._root_path)\n kwargs.setdefault(\n \"skip_files\", self.get(\"SKIP_FILES_FOR_DYNACONF\", [])\n )\n return find_file(*args, **kwargs)\n\n def flag(self, key, env=None):\n \"\"\"Feature flagging system\n write flags to redis\n $ dynaconf write redis -s DASHBOARD=1 -e premiumuser\n meaning: Any premium user has DASHBOARD feature enabled\n\n In your program do::\n\n # premium user has access to dashboard?\n >>> if settings.flag('dashboard', 'premiumuser'):\n ... activate_dashboard()\n\n The value is ensured to be loaded fresh from redis server\n\n It also works with file settings but the recommended is redis\n as the data can be loaded once it is updated.\n\n :param key: The flag name\n :param env: The env to look for\n \"\"\"\n env = env or self.ENVVAR_PREFIX_FOR_DYNACONF or \"DYNACONF\"\n with self.using_env(env):\n value = self.get_fresh(key)\n return value is True or value in true_values\n\n def populate_obj(self, obj, keys=None, ignore=None):\n \"\"\"Given the `obj` populate it using self.store items.\n\n :param obj: An object to be populated, a class instance.\n :param keys: A list of keys to be included.\n :param ignore: A list of keys to be excluded.\n \"\"\"\n keys = keys or self.keys()\n for key in keys:\n key = upperfy(key)\n if ignore and key in ignore:\n continue\n value = self.get(key, empty)\n if value is not empty:\n setattr(obj, key, value)\n\n def dynaconf_clone(self):\n \"\"\"Clone the current settings object.\"\"\"\n try:\n return copy.deepcopy(self)\n except TypeError:\n # can't deepcopy settings object because of module object\n # being set as value in the settings dict\n new_data = self.to_dict(internal=True)\n new_data[\"dynaconf_skip_loaders\"] = True\n new_data[\"dynaconf_skip_validators\"] = True\n return Settings(**new_data)\n\n @property\n def dynaconf(self):\n \"\"\"A proxy to access internal methods and attributes\n\n Starting in 3.0.0 Dynaconf now allows first level lower case\n keys that are not reserved keyword, so this is a proxy to\n internal methods and attrs.\n \"\"\"\n\n class AttrProxy:\n def __init__(self, obj):\n self.obj = obj\n\n def __getattr__(self, name):\n return getattr(self.obj, f\"dynaconf_{name}\")\n\n return AttrProxy(self)\n\n @property\n def logger(self): # pragma: no cover\n \"\"\"backwards compatibility with pre 3.0 loaders\n In dynaconf 3.0.0 logger and debug messages has been removed.\n \"\"\"\n warnings.warn(\n \"logger and DEBUG messages has been removed on dynaconf 3.0.0\"\n )\n import logging # noqa\n\n return logging.getLogger(\"dynaconf\")\n\n def is_overridden(self, setting): # noqa\n \"\"\"This is to provide Django DJDT support: issue 382\"\"\"\n return False\n\n\n\"\"\"Upper case default settings\"\"\"\nUPPER_DEFAULT_SETTINGS = [k for k in dir(default_settings) if k.isupper()]\n\n\"\"\"Attributes created on Settings before 3.0.0\"\"\"\nRESERVED_ATTRS = (\n [\n item[0]\n for item in inspect.getmembers(LazySettings)\n if not item[0].startswith(\"__\")\n ]\n + [\n item[0]\n for item in inspect.getmembers(Settings)\n if not item[0].startswith(\"__\")\n ]\n + [\n \"_defaults\",\n \"_deleted\",\n \"_env_cache\",\n \"_fresh\",\n \"_kwargs\",\n \"_loaded_by_loaders\",\n \"_loaded_envs\",\n \"_loaded_hooks\",\n \"_loaded_py_modules\",\n \"_loaded_files\",\n \"_loaders\",\n \"_not_installed_warnings\",\n \"_store\",\n \"_warn_dynaconf_global_settings\",\n \"_should_load_dotenv\",\n \"environ\",\n \"SETTINGS_MODULE\",\n \"filter_strategy\",\n \"validators\",\n \"_validate_only\",\n \"_validate_exclude\",\n \"_validate_only_current_env\",\n ]\n)\n", "path": "dynaconf/base.py" } ]
diff --git a/dynaconf/base.py b/dynaconf/base.py index 51e09a404..2858b5492 100644 --- a/dynaconf/base.py +++ b/dynaconf/base.py @@ -694,6 +694,7 @@ def current_env(self): return self.MAIN_ENV_FOR_DYNACONF.lower() if self.FORCE_ENV_FOR_DYNACONF is not None: + self.ENV_FOR_DYNACONF = self.FORCE_ENV_FOR_DYNACONF return self.FORCE_ENV_FOR_DYNACONF try: diff --git a/tests_functional/issues/728_pytest/Makefile b/tests_functional/issues/728_pytest/Makefile new file mode 100644 index 000000000..ebe3e7056 --- /dev/null +++ b/tests_functional/issues/728_pytest/Makefile @@ -0,0 +1,4 @@ +.PHONY: test + +test: + pytest -sv tests.py diff --git a/tests_functional/issues/728_pytest/config.py b/tests_functional/issues/728_pytest/config.py new file mode 100644 index 000000000..14435a787 --- /dev/null +++ b/tests_functional/issues/728_pytest/config.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +from dynaconf import Dynaconf + +settings = Dynaconf( + envvar_prefix="ISSUE728", + settings_files=["settings.toml", ".secrets.toml"], + environments=True, +) diff --git a/tests_functional/issues/728_pytest/settings.toml b/tests_functional/issues/728_pytest/settings.toml new file mode 100644 index 000000000..21cd18f8c --- /dev/null +++ b/tests_functional/issues/728_pytest/settings.toml @@ -0,0 +1,8 @@ +[default] +name = "default name" + +[development] +name = "development name" + +[testing] +name = "testing name" diff --git a/tests_functional/issues/728_pytest/tests.py b/tests_functional/issues/728_pytest/tests.py new file mode 100644 index 000000000..a2cad668e --- /dev/null +++ b/tests_functional/issues/728_pytest/tests.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import pytest +from config import settings + + [email protected](scope="session", autouse=True) +def set_test_settings(): + settings.configure(FORCE_ENV_FOR_DYNACONF="testing") + assert settings.current_env == "testing" + + +def test_running_on_testing_environment(): + assert settings.current_env == "testing" + assert settings.ENV_FOR_DYNACONF == "testing" + assert settings.NAME == "testing name"
internetarchive__openlibrary-6898
Sorting buttons in Reading log don't work as intended When you press sort by "Date Added (oldest)" page refreshes and sorting is applied but "Date Added (oldest)" button doesn't turn green and doesn't become unclickable, and "Date Added (newest)" button doesn't become clickable ### Evidence / Screenshot (if possible) On this scrrencshot books are sorted by oldest ![image](https://user-images.githubusercontent.com/58372007/185414834-58610de2-e5f4-43a3-8b91-238b5195f81d.png) ### Relevant url? <!-- `https://openlibrary.org/...` --> ### Steps to Reproduce <!-- What steps caused you to find the bug? --> 1. Go to ... Reading log 2. Do ... press sort by "Date Added (oldest)" <!-- What actually happened after these steps? What did you expect to happen? --> * Actual: * Expected: ### Details - **Logged in (Y/N)?** Y - **Browser type/version?** Firefox 103 (64-bit) - **Operating system?** Linux Mint and Windows 8.1 - **Environment (prod/dev/local)?** prod <!-- If not sure, put prod --> ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders <!-- @ tag stakeholders of this bug -->
[ { "content": "import json\nimport web\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import public, safeint, render\n\nfrom openlibrary import accounts\nfrom openlibrary.utils import extract_numeric_id_from_olid\nfrom openlibrary.core.booknotes import Booknotes\nfrom openlibrary.core.bookshelves import Bookshelves\nfrom openlibrary.core.lending import add_availability\nfrom openlibrary.core.observations import Observations, convert_observation_ids\nfrom openlibrary.core.sponsorships import get_sponsored_editions\nfrom openlibrary.plugins.upstream import borrow\n\n\nRESULTS_PER_PAGE = 25\n\n\nclass my_books_redirect(delegate.page):\n path = \"/people/([^/]+)/books\"\n\n def GET(self, username):\n raise web.seeother('/people/%s/books/want-to-read' % username)\n\n\nclass my_books_view(delegate.page):\n path = r\"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key):\n i = web.input(page=1, sort='desc')\n return MyBooksTemplate(username, key).render(page=i.page, sort=i.sort)\n\n\nclass public_my_books_json(delegate.page):\n encoding = \"json\"\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key='want-to-read'):\n i = web.input(page=1, limit=5000)\n page = safeint(i.page, 1)\n limit = safeint(i.limit, 5000)\n # check if user's reading log is public\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return delegate.RawText(\n json.dumps({'error': 'User %s not found' % username}),\n content_type=\"application/json\",\n )\n is_public = user.preferences().get('public_readlog', 'no') == 'yes'\n logged_in_user = accounts.get_current_user()\n if (\n is_public\n or logged_in_user\n and logged_in_user.key.split('/')[-1] == username\n ):\n readlog = ReadingLog(user=user)\n books = readlog.get_works(key, page, limit)\n records_json = [\n {\n 'work': {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [\n a.author.get(\"key\")\n for a in w.get('authors', [])\n if a.author\n ],\n 'author_names': [\n str(a.author.name)\n for a in w.get('authors', [])\n if type(a.author) is not str\n ],\n 'first_publish_year': w.first_publish_year or None,\n 'lending_edition_s': (\n w._solr_data\n and w._solr_data.get('lending_edition_s')\n or None\n ),\n 'edition_key': (\n w._solr_data and w._solr_data.get('edition_key') or None\n ),\n 'cover_id': (\n w._solr_data and w._solr_data.get('cover_id') or None\n ),\n 'cover_edition_key': (\n w._solr_data\n and w._solr_data.get('cover_edition_key')\n or None\n ),\n },\n 'logged_edition': w.get('logged_edition') or None,\n 'logged_date': (\n w.get('logged_date').strftime(\"%Y/%m/%d, %H:%M:%S\")\n if w.get('logged_date')\n else None\n ),\n }\n for w in books\n ]\n return delegate.RawText(\n json.dumps({'page': page, 'reading_log_entries': records_json}),\n content_type=\"application/json\",\n )\n else:\n return delegate.RawText(\n json.dumps({'error': 'Shelf %s not found or not accessible' % key}),\n content_type=\"application/json\",\n )\n\n\nclass readinglog_stats(delegate.page):\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)/stats\"\n\n def GET(self, username, key='loans'):\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return render.notfound(\"User %s\" % username, create=False)\n\n cur_user = accounts.get_current_user()\n if not cur_user or cur_user.key.split('/')[-1] != username:\n return render.permission_denied(web.ctx.path, 'Permission Denied')\n\n readlog = ReadingLog(user=user)\n works = readlog.get_works(key, page=1, limit=2000)\n works_json = [\n {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [a.author.key for a in w.get('authors', [])],\n 'first_publish_year': w.first_publish_year or None,\n 'subjects': w.get('subjects'),\n 'subject_people': w.get('subject_people'),\n 'subject_places': w.get('subject_places'),\n 'subject_times': w.get('subject_times'),\n }\n for w in works\n ]\n author_keys = {a for work in works_json for a in work['author_keys']}\n authors_json = [\n {\n 'key': a.key,\n 'name': a.name,\n 'birth_date': a.get('birth_date'),\n }\n for a in web.ctx.site.get_many(list(author_keys))\n ]\n return render['account/readinglog_stats'](\n json.dumps(works_json),\n json.dumps(authors_json),\n len(works_json),\n user.key,\n user.displayname,\n web.ctx.path.rsplit('/', 1)[0],\n key,\n lang=web.ctx.lang,\n )\n\n\n@public\ndef get_public_patron_account(username):\n user = web.ctx.site.get('/people/%s' % username)\n return ReadingLog(user=user)\n\n\nclass MyBooksTemplate:\n # Reading log shelves\n READING_LOG_KEYS = {\"currently-reading\", \"want-to-read\", \"already-read\"}\n\n # Keys that can be accessed when not logged in\n PUBLIC_KEYS = READING_LOG_KEYS | {\"lists\", \"list\"}\n\n # Keys that are only accessible when logged in\n # unioned with the public keys\n ALL_KEYS = PUBLIC_KEYS | {\n \"loans\",\n \"waitlist\",\n \"sponsorships\",\n \"notes\",\n \"observations\",\n \"imports\",\n }\n\n def __init__(self, username, key):\n self.username = username\n self.user = web.ctx.site.get('/people/%s' % self.username)\n self.key = key\n self.readlog = ReadingLog(user=self.user)\n self.lists = self.readlog.lists\n self.counts = self.readlog.reading_log_counts\n\n def render(self, page=1, sort='desc', list=None):\n if not self.user:\n return render.notfound(\"User %s\" % self.username, create=False)\n logged_in_user = accounts.get_current_user()\n is_logged_in_user = (\n logged_in_user and logged_in_user.key.split('/')[-1] == self.username\n )\n is_public = self.user.preferences().get('public_readlog', 'no') == 'yes'\n\n data = None\n\n if is_logged_in_user and self.key in self.ALL_KEYS:\n self.counts.update(PatronBooknotes.get_counts(self.username))\n sponsorships = get_sponsored_editions(self.user)\n self.counts['sponsorships'] = len(sponsorships)\n\n if self.key == 'sponsorships':\n data = (\n add_availability(\n web.ctx.site.get_many(\n [\n '/books/%s' % doc['openlibrary_edition']\n for doc in sponsorships\n ]\n )\n )\n if sponsorships\n else None\n )\n elif self.key in self.READING_LOG_KEYS:\n data = add_availability(\n self.readlog.get_works(\n self.key, page=page, sort='created', sort_order=sort\n ),\n mode=\"openlibrary_work\",\n )\n elif self.key == 'list':\n data = list\n\n else:\n data = self._prepare_data(logged_in_user)\n elif self.key in self.READING_LOG_KEYS and is_public:\n data = add_availability(\n self.readlog.get_works(\n self.key, page=page, sort='created', sort_order=sort\n ),\n mode=\"openlibrary_work\",\n )\n\n if data is not None:\n return render['account/books'](\n data,\n self.key,\n self.counts,\n logged_in_user=logged_in_user,\n user=self.user,\n lists=self.lists,\n public=is_public,\n owners_page=is_logged_in_user,\n )\n\n raise web.seeother(self.user.key)\n\n def _prepare_data(\n self,\n logged_in_user,\n page=1,\n username=None,\n ):\n if self.key == 'loans':\n logged_in_user.update_loan_status()\n return borrow.get_loans(logged_in_user)\n elif self.key == 'waitlist':\n return {}\n elif self.key == 'lists':\n if username:\n return web.ctx.site.get('/people/%s' % username)\n return self.user\n elif self.key == 'notes':\n return PatronBooknotes(self.user).get_notes(page=page)\n elif self.key == 'observations':\n return PatronBooknotes(self.user).get_observations(page=page)\n elif self.key == 'imports':\n return {}\n\n return None\n\n\n@public\ndef get_mybooks_template(username, key, list):\n return MyBooksTemplate(username, key).render(list=list)\n\n\nclass ReadingLog:\n\n \"\"\"Manages the user's account page books (reading log, waitlists, loans)\"\"\"\n\n def __init__(self, user=None):\n self.user = user or accounts.get_current_user()\n self.KEYS = {\n 'waitlists': self.get_waitlisted_editions,\n 'loans': self.get_loans,\n 'want-to-read': self.get_want_to_read,\n 'currently-reading': self.get_currently_reading,\n 'already-read': self.get_already_read,\n }\n\n @property\n def lists(self):\n return self.user.get_lists()\n\n @property\n def sponsorship_counts(self):\n return {'sponsorships': len(get_sponsored_editions(self.user))}\n\n @property\n def booknotes_counts(self):\n return PatronBooknotes.get_counts(self.user.get_username())\n\n @property\n def get_sidebar_counts(self):\n counts = self.reading_log_counts\n counts.update(self.sponsorship_counts)\n counts.update(self.booknotes_counts)\n return counts\n\n @property\n def reading_log_counts(self):\n counts = (\n Bookshelves.count_total_books_logged_by_user_per_shelf(\n self.user.get_username()\n )\n if self.user.get_username()\n else {}\n )\n return {\n 'want-to-read': counts.get(\n Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0\n ),\n 'currently-reading': counts.get(\n Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0\n ),\n 'already-read': counts.get(\n Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0\n ),\n }\n\n def get_loans(self):\n return borrow.get_loans(self.user)\n\n def get_waitlist_summary(self):\n return self.user.get_waitinglist()\n\n def get_waitlisted_editions(self):\n \"\"\"Gets a list of records corresponding to a user's waitlisted\n editions, fetches all the editions, and then inserts the data\n from each waitlist record (e.g. position in line) into the\n corresponding edition\n \"\"\"\n waitlists = self.user.get_waitinglist()\n keyed_waitlists = {w['identifier']: w for w in waitlists}\n ocaids = [i['identifier'] for i in waitlists]\n edition_keys = web.ctx.site.things({\"type\": \"/type/edition\", \"ocaid\": ocaids})\n editions = web.ctx.site.get_many(edition_keys)\n for i in range(len(editions)):\n # insert the waitlist_entry corresponding to this edition\n editions[i].waitlist_record = keyed_waitlists[editions[i].ocaid]\n return editions\n\n def process_logged_books(self, logged_books):\n work_ids = ['/works/OL%sW' % i['work_id'] for i in logged_books]\n works = web.ctx.site.get_many(work_ids)\n for i in range(len(works)):\n # insert the logged edition (if present) and logged date\n works[i].logged_date = logged_books[i]['created']\n works[i].logged_edition = (\n '/books/OL%sM' % logged_books[i]['edition_id']\n if logged_books[i]['edition_id']\n else ''\n )\n return works\n\n def get_want_to_read(\n self, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n return self.process_logged_books(\n Bookshelves.get_users_logged_books(\n self.user.get_username(),\n bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Want to Read'],\n page=page,\n limit=limit,\n sort=sort + ' ' + sort_order,\n )\n )\n\n def get_currently_reading(\n self, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n return self.process_logged_books(\n Bookshelves.get_users_logged_books(\n self.user.get_username(),\n bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Currently Reading'],\n page=page,\n limit=limit,\n sort=sort + ' ' + sort_order,\n )\n )\n\n def get_already_read(\n self, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n return self.process_logged_books(\n Bookshelves.get_users_logged_books(\n self.user.get_username(),\n bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Already Read'],\n page=page,\n limit=limit,\n sort=sort + ' ' + sort_order,\n )\n )\n\n def get_works(\n self, key, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n \"\"\"\n :rtype: list of openlibrary.plugins.upstream.models.Work\n \"\"\"\n key = key.lower()\n if key in self.KEYS:\n return self.KEYS[key](\n page=page, limit=limit, sort=sort, sort_order=sort_order\n )\n else: # must be a list or invalid page!\n # works = web.ctx.site.get_many([ ... ])\n raise\n\n\n@public\ndef get_read_status(work_key, username):\n work_id = extract_numeric_id_from_olid(work_key.split('/')[-1])\n return Bookshelves.get_users_read_status_of_work(username, work_id)\n\n\n@public\ndef add_read_statuses(username, works):\n work_ids = [extract_numeric_id_from_olid(work.key.split('/')[-1]) for work in works]\n results = Bookshelves.get_users_read_status_of_works(username, work_ids)\n results_map = {}\n for result in results:\n results_map[f\"OL{result['work_id']}W\"] = result['bookshelf_id']\n for work in works:\n work_olid = work.key.split('/')[-1]\n work['readinglog'] = results_map.get(work_olid, None)\n return works\n\n\nclass PatronBooknotes:\n \"\"\"Manages the patron's book notes and observations\"\"\"\n\n def __init__(self, user):\n user = user\n self.username = user.key.split('/')[-1]\n\n def get_notes(self, limit=RESULTS_PER_PAGE, page=1):\n notes = Booknotes.get_notes_grouped_by_work(\n self.username, limit=limit, page=page\n )\n\n for entry in notes:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n entry['notes'] = {i['edition_id']: i['notes'] for i in entry['notes']}\n entry['editions'] = {\n k: web.ctx.site.get(f'/books/OL{k}M')\n for k in entry['notes']\n if k != Booknotes.NULL_EDITION_VALUE\n }\n return notes\n\n def get_observations(self, limit=RESULTS_PER_PAGE, page=1):\n observations = Observations.get_observations_grouped_by_work(\n self.username, limit=limit, page=page\n )\n\n for entry in observations:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n ids = {}\n for item in entry['observations']:\n ids[item['observation_type']] = item['observation_values']\n entry['observations'] = convert_observation_ids(ids)\n return observations\n\n def _get_work(self, work_key):\n return web.ctx.site.get(work_key)\n\n def _get_work_details(self, work):\n author_keys = [a.author.key for a in work.get('authors', [])]\n\n return {\n 'cover_url': (\n work.get_cover_url('S')\n or 'https://openlibrary.org/images/icons/avatar_book-sm.png'\n ),\n 'title': work.get('title'),\n 'authors': [a.name for a in web.ctx.site.get_many(author_keys)],\n 'first_publish_year': work.first_publish_year or None,\n }\n\n @classmethod\n def get_counts(cls, username):\n return {\n 'notes': Booknotes.count_works_with_notes_by_user(username),\n 'observations': Observations.count_distinct_observations(username),\n }\n", "path": "openlibrary/plugins/upstream/mybooks.py" } ]
[ { "content": "import json\nimport web\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import public, safeint, render\n\nfrom openlibrary import accounts\nfrom openlibrary.utils import extract_numeric_id_from_olid\nfrom openlibrary.core.booknotes import Booknotes\nfrom openlibrary.core.bookshelves import Bookshelves\nfrom openlibrary.core.lending import add_availability\nfrom openlibrary.core.observations import Observations, convert_observation_ids\nfrom openlibrary.core.sponsorships import get_sponsored_editions\nfrom openlibrary.plugins.upstream import borrow\n\n\nRESULTS_PER_PAGE = 25\n\n\nclass my_books_redirect(delegate.page):\n path = \"/people/([^/]+)/books\"\n\n def GET(self, username):\n raise web.seeother('/people/%s/books/want-to-read' % username)\n\n\nclass my_books_view(delegate.page):\n path = r\"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key):\n i = web.input(page=1, sort='desc')\n return MyBooksTemplate(username, key).render(page=i.page, sort=i.sort)\n\n\nclass public_my_books_json(delegate.page):\n encoding = \"json\"\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)\"\n\n def GET(self, username, key='want-to-read'):\n i = web.input(page=1, limit=5000)\n page = safeint(i.page, 1)\n limit = safeint(i.limit, 5000)\n # check if user's reading log is public\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return delegate.RawText(\n json.dumps({'error': 'User %s not found' % username}),\n content_type=\"application/json\",\n )\n is_public = user.preferences().get('public_readlog', 'no') == 'yes'\n logged_in_user = accounts.get_current_user()\n if (\n is_public\n or logged_in_user\n and logged_in_user.key.split('/')[-1] == username\n ):\n readlog = ReadingLog(user=user)\n books = readlog.get_works(key, page, limit)\n records_json = [\n {\n 'work': {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [\n a.author.get(\"key\")\n for a in w.get('authors', [])\n if a.author\n ],\n 'author_names': [\n str(a.author.name)\n for a in w.get('authors', [])\n if type(a.author) is not str\n ],\n 'first_publish_year': w.first_publish_year or None,\n 'lending_edition_s': (\n w._solr_data\n and w._solr_data.get('lending_edition_s')\n or None\n ),\n 'edition_key': (\n w._solr_data and w._solr_data.get('edition_key') or None\n ),\n 'cover_id': (\n w._solr_data and w._solr_data.get('cover_id') or None\n ),\n 'cover_edition_key': (\n w._solr_data\n and w._solr_data.get('cover_edition_key')\n or None\n ),\n },\n 'logged_edition': w.get('logged_edition') or None,\n 'logged_date': (\n w.get('logged_date').strftime(\"%Y/%m/%d, %H:%M:%S\")\n if w.get('logged_date')\n else None\n ),\n }\n for w in books\n ]\n return delegate.RawText(\n json.dumps({'page': page, 'reading_log_entries': records_json}),\n content_type=\"application/json\",\n )\n else:\n return delegate.RawText(\n json.dumps({'error': 'Shelf %s not found or not accessible' % key}),\n content_type=\"application/json\",\n )\n\n\nclass readinglog_stats(delegate.page):\n path = \"/people/([^/]+)/books/([a-zA-Z_-]+)/stats\"\n\n def GET(self, username, key='loans'):\n user = web.ctx.site.get('/people/%s' % username)\n if not user:\n return render.notfound(\"User %s\" % username, create=False)\n\n cur_user = accounts.get_current_user()\n if not cur_user or cur_user.key.split('/')[-1] != username:\n return render.permission_denied(web.ctx.path, 'Permission Denied')\n\n readlog = ReadingLog(user=user)\n works = readlog.get_works(key, page=1, limit=2000)\n works_json = [\n {\n 'title': w.get('title'),\n 'key': w.key,\n 'author_keys': [a.author.key for a in w.get('authors', [])],\n 'first_publish_year': w.first_publish_year or None,\n 'subjects': w.get('subjects'),\n 'subject_people': w.get('subject_people'),\n 'subject_places': w.get('subject_places'),\n 'subject_times': w.get('subject_times'),\n }\n for w in works\n ]\n author_keys = {a for work in works_json for a in work['author_keys']}\n authors_json = [\n {\n 'key': a.key,\n 'name': a.name,\n 'birth_date': a.get('birth_date'),\n }\n for a in web.ctx.site.get_many(list(author_keys))\n ]\n return render['account/readinglog_stats'](\n json.dumps(works_json),\n json.dumps(authors_json),\n len(works_json),\n user.key,\n user.displayname,\n web.ctx.path.rsplit('/', 1)[0],\n key,\n lang=web.ctx.lang,\n )\n\n\n@public\ndef get_public_patron_account(username):\n user = web.ctx.site.get('/people/%s' % username)\n return ReadingLog(user=user)\n\n\nclass MyBooksTemplate:\n # Reading log shelves\n READING_LOG_KEYS = {\"currently-reading\", \"want-to-read\", \"already-read\"}\n\n # Keys that can be accessed when not logged in\n PUBLIC_KEYS = READING_LOG_KEYS | {\"lists\", \"list\"}\n\n # Keys that are only accessible when logged in\n # unioned with the public keys\n ALL_KEYS = PUBLIC_KEYS | {\n \"loans\",\n \"waitlist\",\n \"sponsorships\",\n \"notes\",\n \"observations\",\n \"imports\",\n }\n\n def __init__(self, username, key):\n self.username = username\n self.user = web.ctx.site.get('/people/%s' % self.username)\n self.key = key\n self.readlog = ReadingLog(user=self.user)\n self.lists = self.readlog.lists\n self.counts = self.readlog.reading_log_counts\n\n def render(self, page=1, sort='desc', list=None):\n if not self.user:\n return render.notfound(\"User %s\" % self.username, create=False)\n logged_in_user = accounts.get_current_user()\n is_logged_in_user = (\n logged_in_user and logged_in_user.key.split('/')[-1] == self.username\n )\n is_public = self.user.preferences().get('public_readlog', 'no') == 'yes'\n\n data = None\n\n if is_logged_in_user and self.key in self.ALL_KEYS:\n self.counts.update(PatronBooknotes.get_counts(self.username))\n sponsorships = get_sponsored_editions(self.user)\n self.counts['sponsorships'] = len(sponsorships)\n\n if self.key == 'sponsorships':\n data = (\n add_availability(\n web.ctx.site.get_many(\n [\n '/books/%s' % doc['openlibrary_edition']\n for doc in sponsorships\n ]\n )\n )\n if sponsorships\n else None\n )\n elif self.key in self.READING_LOG_KEYS:\n data = add_availability(\n self.readlog.get_works(\n self.key, page=page, sort='created', sort_order=sort\n ),\n mode=\"openlibrary_work\",\n )\n elif self.key == 'list':\n data = list\n\n else:\n data = self._prepare_data(logged_in_user)\n elif self.key in self.READING_LOG_KEYS and is_public:\n data = add_availability(\n self.readlog.get_works(\n self.key, page=page, sort='created', sort_order=sort\n ),\n mode=\"openlibrary_work\",\n )\n\n if data is not None:\n return render['account/books'](\n data,\n self.key,\n self.counts,\n logged_in_user=logged_in_user,\n user=self.user,\n lists=self.lists,\n public=is_public,\n owners_page=is_logged_in_user,\n sort_order=sort,\n )\n\n raise web.seeother(self.user.key)\n\n def _prepare_data(\n self,\n logged_in_user,\n page=1,\n username=None,\n ):\n if self.key == 'loans':\n logged_in_user.update_loan_status()\n return borrow.get_loans(logged_in_user)\n elif self.key == 'waitlist':\n return {}\n elif self.key == 'lists':\n if username:\n return web.ctx.site.get('/people/%s' % username)\n return self.user\n elif self.key == 'notes':\n return PatronBooknotes(self.user).get_notes(page=page)\n elif self.key == 'observations':\n return PatronBooknotes(self.user).get_observations(page=page)\n elif self.key == 'imports':\n return {}\n\n return None\n\n\n@public\ndef get_mybooks_template(username, key, list):\n return MyBooksTemplate(username, key).render(list=list)\n\n\nclass ReadingLog:\n\n \"\"\"Manages the user's account page books (reading log, waitlists, loans)\"\"\"\n\n def __init__(self, user=None):\n self.user = user or accounts.get_current_user()\n self.KEYS = {\n 'waitlists': self.get_waitlisted_editions,\n 'loans': self.get_loans,\n 'want-to-read': self.get_want_to_read,\n 'currently-reading': self.get_currently_reading,\n 'already-read': self.get_already_read,\n }\n\n @property\n def lists(self):\n return self.user.get_lists()\n\n @property\n def sponsorship_counts(self):\n return {'sponsorships': len(get_sponsored_editions(self.user))}\n\n @property\n def booknotes_counts(self):\n return PatronBooknotes.get_counts(self.user.get_username())\n\n @property\n def get_sidebar_counts(self):\n counts = self.reading_log_counts\n counts.update(self.sponsorship_counts)\n counts.update(self.booknotes_counts)\n return counts\n\n @property\n def reading_log_counts(self):\n counts = (\n Bookshelves.count_total_books_logged_by_user_per_shelf(\n self.user.get_username()\n )\n if self.user.get_username()\n else {}\n )\n return {\n 'want-to-read': counts.get(\n Bookshelves.PRESET_BOOKSHELVES['Want to Read'], 0\n ),\n 'currently-reading': counts.get(\n Bookshelves.PRESET_BOOKSHELVES['Currently Reading'], 0\n ),\n 'already-read': counts.get(\n Bookshelves.PRESET_BOOKSHELVES['Already Read'], 0\n ),\n }\n\n def get_loans(self):\n return borrow.get_loans(self.user)\n\n def get_waitlist_summary(self):\n return self.user.get_waitinglist()\n\n def get_waitlisted_editions(self):\n \"\"\"Gets a list of records corresponding to a user's waitlisted\n editions, fetches all the editions, and then inserts the data\n from each waitlist record (e.g. position in line) into the\n corresponding edition\n \"\"\"\n waitlists = self.user.get_waitinglist()\n keyed_waitlists = {w['identifier']: w for w in waitlists}\n ocaids = [i['identifier'] for i in waitlists]\n edition_keys = web.ctx.site.things({\"type\": \"/type/edition\", \"ocaid\": ocaids})\n editions = web.ctx.site.get_many(edition_keys)\n for i in range(len(editions)):\n # insert the waitlist_entry corresponding to this edition\n editions[i].waitlist_record = keyed_waitlists[editions[i].ocaid]\n return editions\n\n def process_logged_books(self, logged_books):\n work_ids = ['/works/OL%sW' % i['work_id'] for i in logged_books]\n works = web.ctx.site.get_many(work_ids)\n for i in range(len(works)):\n # insert the logged edition (if present) and logged date\n works[i].logged_date = logged_books[i]['created']\n works[i].logged_edition = (\n '/books/OL%sM' % logged_books[i]['edition_id']\n if logged_books[i]['edition_id']\n else ''\n )\n return works\n\n def get_want_to_read(\n self, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n return self.process_logged_books(\n Bookshelves.get_users_logged_books(\n self.user.get_username(),\n bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Want to Read'],\n page=page,\n limit=limit,\n sort=sort + ' ' + sort_order,\n )\n )\n\n def get_currently_reading(\n self, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n return self.process_logged_books(\n Bookshelves.get_users_logged_books(\n self.user.get_username(),\n bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Currently Reading'],\n page=page,\n limit=limit,\n sort=sort + ' ' + sort_order,\n )\n )\n\n def get_already_read(\n self, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n return self.process_logged_books(\n Bookshelves.get_users_logged_books(\n self.user.get_username(),\n bookshelf_id=Bookshelves.PRESET_BOOKSHELVES['Already Read'],\n page=page,\n limit=limit,\n sort=sort + ' ' + sort_order,\n )\n )\n\n def get_works(\n self, key, page=1, limit=RESULTS_PER_PAGE, sort='created', sort_order='desc'\n ):\n \"\"\"\n :rtype: list of openlibrary.plugins.upstream.models.Work\n \"\"\"\n key = key.lower()\n if key in self.KEYS:\n return self.KEYS[key](\n page=page, limit=limit, sort=sort, sort_order=sort_order\n )\n else: # must be a list or invalid page!\n # works = web.ctx.site.get_many([ ... ])\n raise\n\n\n@public\ndef get_read_status(work_key, username):\n work_id = extract_numeric_id_from_olid(work_key.split('/')[-1])\n return Bookshelves.get_users_read_status_of_work(username, work_id)\n\n\n@public\ndef add_read_statuses(username, works):\n work_ids = [extract_numeric_id_from_olid(work.key.split('/')[-1]) for work in works]\n results = Bookshelves.get_users_read_status_of_works(username, work_ids)\n results_map = {}\n for result in results:\n results_map[f\"OL{result['work_id']}W\"] = result['bookshelf_id']\n for work in works:\n work_olid = work.key.split('/')[-1]\n work['readinglog'] = results_map.get(work_olid, None)\n return works\n\n\nclass PatronBooknotes:\n \"\"\"Manages the patron's book notes and observations\"\"\"\n\n def __init__(self, user):\n user = user\n self.username = user.key.split('/')[-1]\n\n def get_notes(self, limit=RESULTS_PER_PAGE, page=1):\n notes = Booknotes.get_notes_grouped_by_work(\n self.username, limit=limit, page=page\n )\n\n for entry in notes:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n entry['notes'] = {i['edition_id']: i['notes'] for i in entry['notes']}\n entry['editions'] = {\n k: web.ctx.site.get(f'/books/OL{k}M')\n for k in entry['notes']\n if k != Booknotes.NULL_EDITION_VALUE\n }\n return notes\n\n def get_observations(self, limit=RESULTS_PER_PAGE, page=1):\n observations = Observations.get_observations_grouped_by_work(\n self.username, limit=limit, page=page\n )\n\n for entry in observations:\n entry['work_key'] = f\"/works/OL{entry['work_id']}W\"\n entry['work'] = self._get_work(entry['work_key'])\n entry['work_details'] = self._get_work_details(entry['work'])\n ids = {}\n for item in entry['observations']:\n ids[item['observation_type']] = item['observation_values']\n entry['observations'] = convert_observation_ids(ids)\n return observations\n\n def _get_work(self, work_key):\n return web.ctx.site.get(work_key)\n\n def _get_work_details(self, work):\n author_keys = [a.author.key for a in work.get('authors', [])]\n\n return {\n 'cover_url': (\n work.get_cover_url('S')\n or 'https://openlibrary.org/images/icons/avatar_book-sm.png'\n ),\n 'title': work.get('title'),\n 'authors': [a.name for a in web.ctx.site.get_many(author_keys)],\n 'first_publish_year': work.first_publish_year or None,\n }\n\n @classmethod\n def get_counts(cls, username):\n return {\n 'notes': Booknotes.count_works_with_notes_by_user(username),\n 'observations': Observations.count_distinct_observations(username),\n }\n", "path": "openlibrary/plugins/upstream/mybooks.py" } ]
diff --git a/openlibrary/plugins/upstream/mybooks.py b/openlibrary/plugins/upstream/mybooks.py index 23710527bd4..22772bcd013 100644 --- a/openlibrary/plugins/upstream/mybooks.py +++ b/openlibrary/plugins/upstream/mybooks.py @@ -248,6 +248,7 @@ def render(self, page=1, sort='desc', list=None): lists=self.lists, public=is_public, owners_page=is_logged_in_user, + sort_order=sort, ) raise web.seeother(self.user.key)
scikit-hep__awkward-1977
ak.from_parquet for multiple files uses wrong number of arguments to concatenate ### Version of Awkward Array 2.0.0rc6 ### Description and code to reproduce If passing a directory with more than one file to `ak.from_parquet` the following exception is raised: (the `outputs` directory in this example contains 50 files) ```python In [4]: ak.from_parquet("outputs") --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In [4], line 1 ----> 1 ak.from_parquet("outputs") File ~/.pyenv/versions/3.10.8/envs/dev/lib/python3.10/site-packages/awkward/operations/ak_from_parquet.py:75, in from_parquet(path, columns, row_groups, storage_options, max_gap, max_block, footer_sample_size, generate_bitmasks, highlevel, behavior) 67 import awkward._connect.pyarrow # noqa: F401 69 parquet_columns, subform, actual_paths, fs, subrg, row_counts, meta = metadata( 70 path, 71 storage_options, 72 row_groups, 73 columns, 74 ) ---> 75 return _load( 76 actual_paths, 77 parquet_columns if columns is not None else None, 78 subrg, 79 max_gap, 80 max_block, 81 footer_sample_size, 82 generate_bitmasks, 83 subform, 84 highlevel, 85 behavior, 86 fs, 87 ) File ~/.pyenv/versions/3.10.8/envs/dev/lib/python3.10/site-packages/awkward/operations/ak_from_parquet.py:246, in _load(actual_paths, parquet_columns, subrg, max_gap, max_block, footer_sample_size, generate_bitmasks, subform, highlevel, behavior, fs, metadata) 243 return ak.Array(arrays[0]) 244 else: 245 # TODO: if each array is a record? --> 246 return ak.operations.ak_concatenate._impl( 247 arrays, 0, True, True, highlevel, behavior 248 ) TypeError: _impl() takes 5 positional arguments but 6 were given ```
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\n\ndef from_parquet(\n path,\n *,\n columns=None,\n row_groups=None,\n storage_options=None,\n max_gap=64_000,\n max_block=256_000_000,\n footer_sample_size=1_000_000,\n generate_bitmasks=False,\n highlevel=True,\n behavior=None,\n):\n \"\"\"\n Args:\n path (str): Local filename or remote URL, passed to fsspec for resolution.\n May contain glob patterns.\n columns (None, str, or list of str): Glob pattern(s) with bash-like curly\n brackets for matching column names. Nested records are separated by dots.\n If a list of patterns, the logical-or is matched. If None, all columns\n are read.\n row_groups (None or set of int): Row groups to read; must be non-negative.\n Order is ignored: the output array is presented in the order specified by\n Parquet metadata. If None, all row groups/all rows are read.\n storage_options: Passed to `fsspec.parquet.open_parquet_file`.\n max_gap (int): Passed to `fsspec.parquet.open_parquet_file`.\n max_block (int): Passed to `fsspec.parquet.open_parquet_file`.\n footer_sample_size (int): Passed to `fsspec.parquet.open_parquet_file`.\n generate_bitmasks (bool): If enabled and Arrow/Parquet does not have Awkward\n metadata, `generate_bitmasks=True` creates empty bitmasks for nullable\n types that don't have bitmasks in the Arrow/Parquet data, so that the\n Form (BitMaskedForm vs UnmaskedForm) is predictable.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Reads data from a local or remote Parquet file or collection of files.\n\n The data are eagerly (not lazily) read and must fit into memory. Use `columns`\n and/or `row_groups` to select and filter manageable subsets of the data, and\n use #ak.metadata_from_parquet to find column names and the range of row groups\n that a dataset has.\n\n See also #ak.to_parquet, #ak.metadata_from_parquet.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.from_parquet\",\n dict(\n path=path,\n columns=columns,\n row_groups=row_groups,\n storage_options=storage_options,\n max_gap=max_gap,\n max_block=max_block,\n footer_sample_size=footer_sample_size,\n generate_bitmasks=generate_bitmasks,\n highlevel=highlevel,\n behavior=behavior,\n ),\n ):\n import awkward._connect.pyarrow # noqa: F401\n\n parquet_columns, subform, actual_paths, fs, subrg, row_counts, meta = metadata(\n path,\n storage_options,\n row_groups,\n columns,\n )\n return _load(\n actual_paths,\n parquet_columns if columns is not None else None,\n subrg,\n max_gap,\n max_block,\n footer_sample_size,\n generate_bitmasks,\n subform,\n highlevel,\n behavior,\n fs,\n )\n\n\ndef metadata(\n path,\n storage_options=None,\n row_groups=None,\n columns=None,\n ignore_metadata=False,\n scan_files=True,\n):\n import awkward._connect.pyarrow\n\n # early exit if missing deps\n pyarrow_parquet = awkward._connect.pyarrow.import_pyarrow_parquet(\"ak.from_parquet\")\n import fsspec.parquet\n\n if row_groups is not None:\n if not all(ak._util.is_integer(x) and x >= 0 for x in row_groups):\n raise ak._errors.wrap_error(\n ValueError(\"row_groups must be a set of non-negative integers\")\n )\n if len(set(row_groups)) < len(row_groups):\n raise ak._errors.wrap_error(ValueError(\"row group indices must not repeat\"))\n\n fs, _, paths = fsspec.get_fs_token_paths(\n path, mode=\"rb\", storage_options=storage_options\n )\n all_paths, path_for_schema, can_sub = _all_and_metadata_paths(\n path, fs, paths, ignore_metadata, scan_files\n )\n\n subrg = [None] * len(all_paths)\n actual_paths = all_paths\n with fs.open(\n path_for_schema,\n ) as file_for_metadata:\n parquetfile_for_metadata = pyarrow_parquet.ParquetFile(file_for_metadata)\n\n list_indicator = \"list.item\"\n for column_metadata in parquetfile_for_metadata.schema:\n if (\n column_metadata.max_repetition_level > 0\n and \".list.element.\" in column_metadata.path\n ):\n list_indicator = \"list.element\"\n break\n\n subform = ak._connect.pyarrow.form_handle_arrow(\n parquetfile_for_metadata.schema_arrow, pass_empty_field=True\n )\n if columns is not None:\n subform = subform.select_columns(columns)\n\n # Handle empty field at root\n if parquetfile_for_metadata.schema_arrow.names == [\"\"]:\n column_prefix = (\"\",)\n else:\n column_prefix = ()\n\n metadata = parquetfile_for_metadata.metadata\n if scan_files and not path_for_schema.endswith(\"/_metadata\"):\n if path_for_schema in all_paths:\n scan_paths = all_paths[1:]\n else:\n scan_paths = all_paths\n for apath in scan_paths:\n with fs.open(apath, \"rb\") as f:\n md = pyarrow_parquet.ParquetFile(f).metadata\n # TODO: not nested directory structure yet\n md.set_file_path(apath.rsplit(\"/\", 1)[-1])\n metadata.append_row_groups(md)\n if row_groups is not None:\n if any(_ >= metadata.num_row_groups for _ in row_groups):\n raise ak._errors.wrap_error(\n ValueError(\n f\"Row group selection out of bounds 0..{metadata.num_row_groups - 1}\"\n )\n )\n if not can_sub:\n raise ak._errors.wrap_error(\n TypeError(\n \"Requested selection of row-groups, but not scanning metadata\"\n )\n )\n\n path_rgs = {}\n rgs_path = {}\n subrg = []\n col_counts = []\n for i in range(metadata.num_row_groups):\n fp = metadata.row_group(i).column(0).file_path\n path_rgs.setdefault(fp, []).append(i)\n rgs_path[i] = fp\n\n actual_paths = []\n for select in row_groups:\n path = rgs_path[select]\n path2 = [_ for _ in all_paths if _.endswith(path)][0]\n if path2 not in actual_paths:\n actual_paths.append(path2)\n subrg.append([path_rgs[path].index(select)])\n else:\n subrg[-1].append(path_rgs[path].index(select))\n col_counts.append(metadata.row_group(select).num_rows)\n else:\n if can_sub:\n col_counts = [\n metadata.row_group(i).num_rows for i in range(metadata.num_row_groups)\n ]\n else:\n col_counts = None\n\n parquet_columns = subform.columns(\n list_indicator=list_indicator, column_prefix=column_prefix\n )\n\n return parquet_columns, subform, actual_paths, fs, subrg, col_counts, metadata\n\n\ndef _load(\n actual_paths,\n parquet_columns,\n subrg,\n max_gap,\n max_block,\n footer_sample_size,\n generate_bitmasks,\n subform,\n highlevel,\n behavior,\n fs,\n metadata=None,\n):\n arrays = []\n for i, p in enumerate(actual_paths):\n arrays.append(\n _read_parquet_file(\n p,\n fs=fs,\n parquet_columns=parquet_columns,\n row_groups=subrg[i],\n max_gap=max_gap,\n max_block=max_block,\n footer_sample_size=footer_sample_size,\n generate_bitmasks=generate_bitmasks,\n metadata=metadata,\n )\n )\n\n if len(arrays) == 0:\n return subform.length_zero_array(highlevel=highlevel, behavior=behavior)\n elif len(arrays) == 1:\n # make high-level\n if isinstance(arrays[0], ak.record.Record):\n return ak.Record(arrays[0])\n return ak.Array(arrays[0])\n else:\n # TODO: if each array is a record?\n return ak.operations.ak_concatenate._impl(\n arrays, 0, True, True, highlevel, behavior\n )\n\n\ndef _open_file(\n path, fs, columns, row_groups, max_gap, max_block, footer_sample_size, metadata\n):\n \"\"\"Picks between fsspec.parquet and normal fs.open\"\"\"\n import fsspec.parquet\n\n # condition should be if columns and ow_groups are not all the possible ones\n if (columns or row_groups) and getattr(fs, \"async_impl\", False):\n return fsspec.parquet.open_parquet_file(\n path,\n fs=fs,\n engine=\"pyarrow\",\n columns=columns,\n row_groups=row_groups,\n max_gap=max_gap,\n metadata=metadata,\n max_block=max_block,\n footer_sample_size=footer_sample_size,\n )\n else:\n return fs.open(path, \"rb\")\n\n\ndef _read_parquet_file(\n path,\n fs,\n parquet_columns,\n row_groups,\n footer_sample_size,\n max_gap,\n max_block,\n generate_bitmasks,\n metadata=None,\n):\n import pyarrow.parquet as pyarrow_parquet\n\n with _open_file(\n path,\n fs,\n parquet_columns,\n row_groups,\n max_gap,\n max_block,\n footer_sample_size,\n metadata,\n ) as file:\n parquetfile = pyarrow_parquet.ParquetFile(file)\n\n if row_groups is None:\n arrow_table = parquetfile.read(parquet_columns)\n else:\n arrow_table = parquetfile.read_row_groups(row_groups, parquet_columns)\n\n return ak.operations.ak_from_arrow._impl(\n arrow_table,\n generate_bitmasks,\n # why is high-level False here?\n False,\n None,\n )\n\n\nclass _DictOfEmptyBuffers:\n def __getitem__(self, where):\n return b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n\n\ndef _all_and_metadata_paths(path, fs, paths, ignore_metadata=False, scan_files=True):\n all_paths = []\n for x in paths:\n if fs.isfile(x):\n is_meta = x.rsplit(\"/\", 1)[-1] == \"_metadata\"\n if is_meta and ignore_metadata:\n continue\n is_comm = x.rsplit(\"/\", 1)[-1] == \"_common_metadata\"\n if is_comm and scan_files:\n continue\n all_paths.append((x, is_meta, is_comm))\n elif fs.isdir(x):\n for f, fdata in fs.find(x, detail=True).items():\n is_meta = f.endswith(\"_metadata\")\n if is_meta and ignore_metadata:\n continue\n is_comm = f.endswith(\"_common_metadata\")\n if is_comm and scan_files:\n continue\n if f.endswith((\".parq\", \".parquet\")) or is_meta or is_comm:\n if fdata[\"type\"] == \"file\":\n all_paths.append((f, is_meta, is_comm))\n\n path_for_metadata = [x for x, is_meta, is_comm in all_paths if is_meta]\n if len(path_for_metadata) != 0:\n path_for_metadata = path_for_metadata[0]\n can_sub = True\n else:\n path_for_metadata = [x for x, is_meta, is_comm in all_paths if is_comm]\n if len(path_for_metadata) != 0:\n path_for_metadata = path_for_metadata[0]\n else:\n if len(all_paths) != 0:\n path_for_metadata = all_paths[0][0]\n # we will still know rew-groups and counts if we scan, so can sub-select\n can_sub = scan_files or len(all_paths) == 1\n\n all_paths = [x for x, is_meta, is_comm in all_paths if not is_meta and not is_comm]\n\n if len(all_paths) == 0:\n raise ak._errors.wrap_error(\n ValueError(f\"no *.parquet or *.parq matches for path {path!r}\")\n )\n\n return all_paths, path_for_metadata, can_sub\n", "path": "src/awkward/operations/ak_from_parquet.py" } ]
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\n\ndef from_parquet(\n path,\n *,\n columns=None,\n row_groups=None,\n storage_options=None,\n max_gap=64_000,\n max_block=256_000_000,\n footer_sample_size=1_000_000,\n generate_bitmasks=False,\n highlevel=True,\n behavior=None,\n):\n \"\"\"\n Args:\n path (str): Local filename or remote URL, passed to fsspec for resolution.\n May contain glob patterns.\n columns (None, str, or list of str): Glob pattern(s) with bash-like curly\n brackets for matching column names. Nested records are separated by dots.\n If a list of patterns, the logical-or is matched. If None, all columns\n are read.\n row_groups (None or set of int): Row groups to read; must be non-negative.\n Order is ignored: the output array is presented in the order specified by\n Parquet metadata. If None, all row groups/all rows are read.\n storage_options: Passed to `fsspec.parquet.open_parquet_file`.\n max_gap (int): Passed to `fsspec.parquet.open_parquet_file`.\n max_block (int): Passed to `fsspec.parquet.open_parquet_file`.\n footer_sample_size (int): Passed to `fsspec.parquet.open_parquet_file`.\n generate_bitmasks (bool): If enabled and Arrow/Parquet does not have Awkward\n metadata, `generate_bitmasks=True` creates empty bitmasks for nullable\n types that don't have bitmasks in the Arrow/Parquet data, so that the\n Form (BitMaskedForm vs UnmaskedForm) is predictable.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Reads data from a local or remote Parquet file or collection of files.\n\n The data are eagerly (not lazily) read and must fit into memory. Use `columns`\n and/or `row_groups` to select and filter manageable subsets of the data, and\n use #ak.metadata_from_parquet to find column names and the range of row groups\n that a dataset has.\n\n See also #ak.to_parquet, #ak.metadata_from_parquet.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.from_parquet\",\n dict(\n path=path,\n columns=columns,\n row_groups=row_groups,\n storage_options=storage_options,\n max_gap=max_gap,\n max_block=max_block,\n footer_sample_size=footer_sample_size,\n generate_bitmasks=generate_bitmasks,\n highlevel=highlevel,\n behavior=behavior,\n ),\n ):\n import awkward._connect.pyarrow # noqa: F401\n\n parquet_columns, subform, actual_paths, fs, subrg, row_counts, meta = metadata(\n path,\n storage_options,\n row_groups,\n columns,\n )\n return _load(\n actual_paths,\n parquet_columns if columns is not None else None,\n subrg,\n max_gap,\n max_block,\n footer_sample_size,\n generate_bitmasks,\n subform,\n highlevel,\n behavior,\n fs,\n )\n\n\ndef metadata(\n path,\n storage_options=None,\n row_groups=None,\n columns=None,\n ignore_metadata=False,\n scan_files=True,\n):\n import awkward._connect.pyarrow\n\n # early exit if missing deps\n pyarrow_parquet = awkward._connect.pyarrow.import_pyarrow_parquet(\"ak.from_parquet\")\n import fsspec.parquet\n\n if row_groups is not None:\n if not all(ak._util.is_integer(x) and x >= 0 for x in row_groups):\n raise ak._errors.wrap_error(\n ValueError(\"row_groups must be a set of non-negative integers\")\n )\n if len(set(row_groups)) < len(row_groups):\n raise ak._errors.wrap_error(ValueError(\"row group indices must not repeat\"))\n\n fs, _, paths = fsspec.get_fs_token_paths(\n path, mode=\"rb\", storage_options=storage_options\n )\n all_paths, path_for_schema, can_sub = _all_and_metadata_paths(\n path, fs, paths, ignore_metadata, scan_files\n )\n\n subrg = [None] * len(all_paths)\n actual_paths = all_paths\n with fs.open(\n path_for_schema,\n ) as file_for_metadata:\n parquetfile_for_metadata = pyarrow_parquet.ParquetFile(file_for_metadata)\n\n list_indicator = \"list.item\"\n for column_metadata in parquetfile_for_metadata.schema:\n if (\n column_metadata.max_repetition_level > 0\n and \".list.element.\" in column_metadata.path\n ):\n list_indicator = \"list.element\"\n break\n\n subform = ak._connect.pyarrow.form_handle_arrow(\n parquetfile_for_metadata.schema_arrow, pass_empty_field=True\n )\n if columns is not None:\n subform = subform.select_columns(columns)\n\n # Handle empty field at root\n if parquetfile_for_metadata.schema_arrow.names == [\"\"]:\n column_prefix = (\"\",)\n else:\n column_prefix = ()\n\n metadata = parquetfile_for_metadata.metadata\n if scan_files and not path_for_schema.endswith(\"/_metadata\"):\n if path_for_schema in all_paths:\n scan_paths = all_paths[1:]\n else:\n scan_paths = all_paths\n for apath in scan_paths:\n with fs.open(apath, \"rb\") as f:\n md = pyarrow_parquet.ParquetFile(f).metadata\n # TODO: not nested directory structure yet\n md.set_file_path(apath.rsplit(\"/\", 1)[-1])\n metadata.append_row_groups(md)\n if row_groups is not None:\n if any(_ >= metadata.num_row_groups for _ in row_groups):\n raise ak._errors.wrap_error(\n ValueError(\n f\"Row group selection out of bounds 0..{metadata.num_row_groups - 1}\"\n )\n )\n if not can_sub:\n raise ak._errors.wrap_error(\n TypeError(\n \"Requested selection of row-groups, but not scanning metadata\"\n )\n )\n\n path_rgs = {}\n rgs_path = {}\n subrg = []\n col_counts = []\n for i in range(metadata.num_row_groups):\n fp = metadata.row_group(i).column(0).file_path\n path_rgs.setdefault(fp, []).append(i)\n rgs_path[i] = fp\n\n actual_paths = []\n for select in row_groups:\n path = rgs_path[select]\n path2 = [_ for _ in all_paths if _.endswith(path)][0]\n if path2 not in actual_paths:\n actual_paths.append(path2)\n subrg.append([path_rgs[path].index(select)])\n else:\n subrg[-1].append(path_rgs[path].index(select))\n col_counts.append(metadata.row_group(select).num_rows)\n else:\n if can_sub:\n col_counts = [\n metadata.row_group(i).num_rows for i in range(metadata.num_row_groups)\n ]\n else:\n col_counts = None\n\n parquet_columns = subform.columns(\n list_indicator=list_indicator, column_prefix=column_prefix\n )\n\n return parquet_columns, subform, actual_paths, fs, subrg, col_counts, metadata\n\n\ndef _load(\n actual_paths,\n parquet_columns,\n subrg,\n max_gap,\n max_block,\n footer_sample_size,\n generate_bitmasks,\n subform,\n highlevel,\n behavior,\n fs,\n metadata=None,\n):\n arrays = []\n for i, p in enumerate(actual_paths):\n arrays.append(\n _read_parquet_file(\n p,\n fs=fs,\n parquet_columns=parquet_columns,\n row_groups=subrg[i],\n max_gap=max_gap,\n max_block=max_block,\n footer_sample_size=footer_sample_size,\n generate_bitmasks=generate_bitmasks,\n metadata=metadata,\n )\n )\n\n if len(arrays) == 0:\n return subform.length_zero_array(highlevel=highlevel, behavior=behavior)\n elif len(arrays) == 1:\n # make high-level\n if isinstance(arrays[0], ak.record.Record):\n return ak.Record(arrays[0])\n return ak.Array(arrays[0])\n else:\n # TODO: if each array is a record?\n return ak.operations.ak_concatenate._impl(\n arrays, axis=0, mergebool=True, highlevel=highlevel, behavior=behavior\n )\n\n\ndef _open_file(\n path, fs, columns, row_groups, max_gap, max_block, footer_sample_size, metadata\n):\n \"\"\"Picks between fsspec.parquet and normal fs.open\"\"\"\n import fsspec.parquet\n\n # condition should be if columns and ow_groups are not all the possible ones\n if (columns or row_groups) and getattr(fs, \"async_impl\", False):\n return fsspec.parquet.open_parquet_file(\n path,\n fs=fs,\n engine=\"pyarrow\",\n columns=columns,\n row_groups=row_groups,\n max_gap=max_gap,\n metadata=metadata,\n max_block=max_block,\n footer_sample_size=footer_sample_size,\n )\n else:\n return fs.open(path, \"rb\")\n\n\ndef _read_parquet_file(\n path,\n fs,\n parquet_columns,\n row_groups,\n footer_sample_size,\n max_gap,\n max_block,\n generate_bitmasks,\n metadata=None,\n):\n import pyarrow.parquet as pyarrow_parquet\n\n with _open_file(\n path,\n fs,\n parquet_columns,\n row_groups,\n max_gap,\n max_block,\n footer_sample_size,\n metadata,\n ) as file:\n parquetfile = pyarrow_parquet.ParquetFile(file)\n\n if row_groups is None:\n arrow_table = parquetfile.read(parquet_columns)\n else:\n arrow_table = parquetfile.read_row_groups(row_groups, parquet_columns)\n\n return ak.operations.ak_from_arrow._impl(\n arrow_table,\n generate_bitmasks,\n # why is high-level False here?\n False,\n None,\n )\n\n\nclass _DictOfEmptyBuffers:\n def __getitem__(self, where):\n return b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n\n\ndef _all_and_metadata_paths(path, fs, paths, ignore_metadata=False, scan_files=True):\n all_paths = []\n for x in paths:\n if fs.isfile(x):\n is_meta = x.rsplit(\"/\", 1)[-1] == \"_metadata\"\n if is_meta and ignore_metadata:\n continue\n is_comm = x.rsplit(\"/\", 1)[-1] == \"_common_metadata\"\n if is_comm and scan_files:\n continue\n all_paths.append((x, is_meta, is_comm))\n elif fs.isdir(x):\n for f, fdata in fs.find(x, detail=True).items():\n is_meta = f.endswith(\"_metadata\")\n if is_meta and ignore_metadata:\n continue\n is_comm = f.endswith(\"_common_metadata\")\n if is_comm and scan_files:\n continue\n if f.endswith((\".parq\", \".parquet\")) or is_meta or is_comm:\n if fdata[\"type\"] == \"file\":\n all_paths.append((f, is_meta, is_comm))\n\n path_for_metadata = [x for x, is_meta, is_comm in all_paths if is_meta]\n if len(path_for_metadata) != 0:\n path_for_metadata = path_for_metadata[0]\n can_sub = True\n else:\n path_for_metadata = [x for x, is_meta, is_comm in all_paths if is_comm]\n if len(path_for_metadata) != 0:\n path_for_metadata = path_for_metadata[0]\n else:\n if len(all_paths) != 0:\n path_for_metadata = all_paths[0][0]\n # we will still know rew-groups and counts if we scan, so can sub-select\n can_sub = scan_files or len(all_paths) == 1\n\n all_paths = [x for x, is_meta, is_comm in all_paths if not is_meta and not is_comm]\n\n if len(all_paths) == 0:\n raise ak._errors.wrap_error(\n ValueError(f\"no *.parquet or *.parq matches for path {path!r}\")\n )\n\n return all_paths, path_for_metadata, can_sub\n", "path": "src/awkward/operations/ak_from_parquet.py" } ]
diff --git a/src/awkward/operations/ak_from_parquet.py b/src/awkward/operations/ak_from_parquet.py index 1f8526beb1..490790a829 100644 --- a/src/awkward/operations/ak_from_parquet.py +++ b/src/awkward/operations/ak_from_parquet.py @@ -244,7 +244,7 @@ def _load( else: # TODO: if each array is a record? return ak.operations.ak_concatenate._impl( - arrays, 0, True, True, highlevel, behavior + arrays, axis=0, mergebool=True, highlevel=highlevel, behavior=behavior )
meltano__meltano-6078
Adjust logging level for requests/urllib With the hub changes and especially snowplow changes, when running with debug mode enabled we're littering the logs with some giant http requests (scroll the code block to get an idea of how large these look in the console when line wrapped): ``` (melty-3.8) ➜ rundev MELTANO_DISABLE_TRACKING=False MELTANO_SNOWPLOW_COLLECTOR_ENDPOINTS='["http://localhost:9090"]' meltano --log-level=debug invoke tap-gitlab 2022-06-04T23:19:04.941335Z [debug ] Creating engine <meltano.core.project.Project object at 0x109533a60>@sqlite:////Users/syn/projects/meltano-projects/rundev/.meltano/meltano.db 2022-06-04T23:19:04.996207Z [debug ] Starting new HTTP connection (1): localhost:9090 2022-06-04T23:19:05.030353Z [debug ] http://localhost:9090 "GET /i?e=ue&ue_px=eyJzY2hlbWEiOiAiaWdsdTpjb20uc25vd3Bsb3dhbmFseXRpY3Muc25vd3Bsb3cvdW5zdHJ1Y3RfZXZlbnQvanNvbnNjaGVtYS8xLTAtMCIsICJkYXRhIjogeyJzY2hlbWEiOiAiaWdsdTpjb20ubWVsdGFuby9jbGlfZXZlbnQvanNvbnNjaGVtYS8xLTAtMCIsICJkYXRhIjogeyJldmVudCI6ICJzdGFydGVkIn19fQ%3D%3D&eid=ee086532-f9be-4819-9b66-3dc04049096a&dtm=1654384744990&cx=eyJzY2hlbWEiOiAiaWdsdTpjb20uc25vd3Bsb3dhbmFseXRpY3Muc25vd3Bsb3cvY29udGV4dHMvanNvbnNjaGVtYS8xLTAtMSIsICJkYXRhIjogW3sic2NoZW1hIjogImlnbHU6Y29tLm1lbHRhbm8vZW52aXJvbm1lbnRfY29udGV4dC9qc29uc2NoZW1hLzEtMC0wIiwgImRhdGEiOiB7ImNvbnRleHRfdXVpZCI6ICIxZThmNDdmMS04NDU2LTQ0NDEtOTY5ZS0yZDkyNTk0OThiZGUiLCAibWVsdGFub192ZXJzaW9uIjogIjEuMTA1LjAiLCAiaXNfZGV2X2J1aWxkIjogdHJ1ZSwgImlzX2NpX2Vudmlyb25tZW50IjogdHJ1ZSwgInB5dGhvbl92ZXJzaW9uIjogIjMuOC4xMiIsICJzeXN0ZW1fbmFtZSI6ICJEYXJ3aW4iLCAic3lzdGVtX3JlbGVhc2UiOiAiMjEuMS4wIiwgInN5c3RlbV92ZXJzaW9uIjogIkRhcndpbiBLZXJuZWwgVmVyc2lvbiAyMS4xLjA6IFdlZCBPY3QgMTMgMTc6MzM6MDEgUERUIDIwMjE7IHJvb3Q6eG51LTgwMTkuNDEuNX4xL1JFTEVBU0VfQVJNNjRfVDYwMDAiLCAibWFjaGluZSI6ICJhcm02NCIsICJ3aW5kb3dzX2VkaXRpb24iOiBudWxsLCAiZnJlZWRlc2t0b3BfaWQiOiBudWxsLCAiZnJlZWRlc2t0b3BfaWRfbGlrZSI6IG51bGwsICJmcmVlZGVza3RvcF92ZXJzaW9uX2lkIjogbnVsbCwgIm51bV9jcHVfY29yZXMiOiAxMCwgIm51bV9jcHVfY29yZXNfYXZhaWxhYmxlIjogMTAsICJwcm9jZXNzX2hpZXJhcmNoeSI6IFt7InByb2Nlc3NfbmFtZV9oYXNoIjogIjBhZjE5NjY1ODhjZWQwNmUzMTQzYWU3MjAyNDVjOWI3YWVhYWUyMTNjNjkyMWMxMmM3NDJhMTY2Njc5Y2M1MDUiLCAicHJvY2Vzc19jcmVhdGlvbl90aW1lc3RhbXAiOiAiMjAyMi0wNi0wNFQyMzoxOTowNC4yOTMzNTFaIn0sIHsicHJvY2Vzc19uYW1lX2hhc2giOiAiYTI2ZTM3NjU0Mjg1YWY0MmM0NjlkMmI1Mjc0YmVjYjY1YjgxYjI3YTQ0NDU1Y2Y0ZDlmYWY2YzQzYTBjNDU2ZSIsICJwcm9jZXNzX2NyZWF0aW9uX3RpbWVzdGFtcCI6ICIyMDIyLTA2LTA0VDE3OjAwOjA2Ljg4ODAyNVoifSwgeyJwcm9jZXNzX25hbWVfaGFzaCI6ICI0Mjg4MjEzNTBlOTY5MTQ5MWY2MTZiNzU0Y2Q4MzE1ZmI4NmQ3OTdhYjM1ZDg0MzQ3OWU3MzJlZjkwNjY1MzI0IiwgInByb2Nlc3NfY3JlYXRpb25fdGltZXN0YW1wIjogIjIwMjItMDYtMDRUMTc6MDA6MDYuODY0MTkzWiJ9LCB7InByb2Nlc3NfbmFtZV9oYXNoIjogIjVmZDk4OWY5ZDM2YWI1MzRlNzJlMTkyOWQ2OWQyMzAzY2RmZjU2M2VjODdhMzMwOGM0NzcyNGRiOTFjNDNjODgiLCAicHJvY2Vzc19jcmVhdGlvbl90aW1lc3RhbXAiOiAiMjAyMi0wNS0yNFQxNToxMjowMS4yODI2MTdaIn0sIHsicHJvY2Vzc19uYW1lX2hhc2giOiAiNGZmMjJiZWFmNjBkMGJmZmU5ZTA1NTg4YTU0NjcyZTZlZDUyZWIwMjI2MzNkMDQ0YmZiNWUyZDFlNjUzOGM1ZCIsICJwcm9jZXNzX2NyZWF0aW9uX3RpbWVzdGFtcCI6ICIyMDIyLTA1LTI0VDE0OjQ5OjA3LjkyOTM5NVoifSwgeyJwcm9jZXNzX25hbWVfaGFzaCI6ICJjODI2ZTg0MWZiMzcwODZmYWMzMTE0ZDY2NGMwZTI3N2JjNDk4Y2YzNWI3ODRmNmExYjkzZDk5ZTZmMzU5ZWE0IiwgInByb2Nlc3NfY3JlYXRpb25fdGltZXN0YW1wIjogIjIwMjItMDUtMjRUMTQ6NDg6NDcuMDQ0MTg5WiJ9LCB7InByb2Nlc3NfbmFtZV9oYXNoIjogImIwYzIwZTdjNmU0NWQ1YTJkZmFhNGY3NzM5ZGQwZDQzYzUwOTJhZTBhODc1MTY1OGQyOGM1NzNmYThmZDk4MWIiLCAicHJvY2Vzc19jcmVhdGlvbl90aW1lc3RhbXAiOiAiMjAyMi0wNS0yNFQxNDo0ODo0Ny4wMDY0NDlaIn1dfX0sIHsic2NoZW1hIjogImlnbHU6Y29tLm1lbHRhbm8vcHJvamVjdF9jb250ZXh0L2pzb25zY2hlbWEvMS0wLTAiLCAiZGF0YSI6IHsiY29udGV4dF91dWlkIjogIjg5Mzk1MDljLTM2ZTEtNDI3Ny1hZTNmLTExMTcwNjk2MWY2YiIsICJwcm9qZWN0X3V1aWQiOiAiMWE3ZDk5ODktZjcwNC00ODViLWJhN2EtYTc0Nzg3MDNjYTdiIiwgInByb2plY3RfdXVpZF9zb3VyY2UiOiAiZXhwbGljaXQiLCAiY2xpZW50X3V1aWQiOiAiYmMwM2RkYzgtN2MyNS00NTdjLTk1NTYtNjE1M2M4MzZmMTk1IiwgImVudmlyb25tZW50X25hbWVfaGFzaCI6IG51bGx9fSwgeyJzY2hlbWEiOiAiaWdsdTpjb20ubWVsdGFuby9jbGlfY29udGV4dC9qc29uc2NoZW1hLzEtMC0wIiwgImRhdGEiOiB7ImV2ZW50X3V1aWQiOiAiNDU0MWQ4MTAtYjUwMC00MDg2LWEwMzUtMTAwZGYwNTI4ZGMzIiwgImNvbW1hbmQiOiAiaW52b2tlIiwgInN1Yl9jb21tYW5kIjogbnVsbCwgIm9wdGlvbl9rZXlzIjogW119fV19&tv=py-0.10.0&p=pc&lang=en_US&tz=America%2FChicago&stm=1654384744000 HTTP/1.1" 200 43 2022-06-04T23:19:05.060016Z [debug ] Lockfile is feature-flagged status=False 2022-06-04T23:19:05.103509Z [debug ] Starting new HTTPS connection (1): discovery.meltano.com:443 2022-06-04T23:19:05.266283Z [debug ] https://discovery.meltano.com:443 "GET /discovery.yml?project_id=1a7d9989-f704-485b-ba7a-a7478703ca7b HTTP/1.1" 200 23607 2022-06-04T23:19:05.693139Z [debug ] Found plugin plugin=tap-gitlab source=discovery 2022-06-04T23:16:48.221553Z [debug ] http://localhost:9090 "GET /i?e=se&se_ca=meltano+invoke&se_ac=meltano+invoke+tap-gitlab+&se_la=1a7d9989-f704-485b-ba7a-a7478703ca7b&eid=ca88b1c2-551a-4334-8a25-f793f68b8a96&dtm=1654384608194&tv=py-0.10.0&p=pc&lang=en_US&tz=America%2FChicago&stm=1654384608000 HTTP/1.1" 200 43 2022-06-04T23:16:48.225794Z [debug ] Starting new HTTP connection (1): localhost:9090 2022-06-04T23:16:48.240215Z [debug ] http://localhost:9090 "GET /i?e=ue&ue_px=eyJzY2hlbWEiOiAiaWdsdTpjb20uc25vd3Bsb3dhbmFseXRpY3Muc25vd3Bsb3cvdW5zdHJ1Y3RfZXZlbnQvanNvbnNjaGVtYS8xLTAtMCIsICJkYXRhIjogeyJzY2hlbWEiOiAiaWdsdTpjb20ubWVsdGFuby9jbGlfZXZlbnQvanNvbnNjaGVtYS8xLTAtMCIsICJkYXRhIjogeyJldmVudCI6ICJjb21wbGV0ZWQifX19&eid=b42bfc91-8a1f-4dde-924f-9d7d5c634a49&dtm=1654384608222&cx=eyJzY2hlbWEiOiAiaWdsdTpjb20uc25vd3Bsb3dhbmFseXRpY3Muc25vd3Bsb3cvY29udGV4dHMvanNvbnNjaGVtYS8xLTAtMSIsICJkYXRhIjogW3sic2NoZW1hIjogImlnbHU6Y29tLm1lbHRhbm8vZW52aXJvbm1lbnRfY29udGV4dC9qc29uc2NoZW1hLzEtMC0wIiwgImRhdGEiOiB7ImNvbnRleHRfdXVpZCI6ICJlMDBjZTExYS1iYTg0LTRiZDctOWJlZC1kZWNkODBkZDFmNTMiLCAibWVsdGFub192ZXJzaW9uIjogIjEuMTA1LjAiLCAiaXNfZGV2X2J1aWxkIjogdHJ1ZSwgImlzX2NpX2Vudmlyb25tZW50IjogdHJ1ZSwgInB5dGhvbl92ZXJzaW9uIjogIjMuOC4xMiIsICJzeXN0ZW1fbmFtZSI6ICJEYXJ3aW4iLCAic3lzdGVtX3JlbGVhc2UiOiAiMjEuMS4wIiwgInN5c3RlbV92ZXJzaW9uIjogIkRhcndpbiBLZXJuZWwgVmVyc2lvbiAyMS4xLjA6IFdlZCBPY3QgMTMgMTc6MzM6MDEgUERUIDIwMjE7IHJvb3Q6eG51LTgwMTkuNDEuNX4xL1JFTEVBU0VfQVJNNjRfVDYwMDAiLCAibWFjaGluZSI6ICJhcm02NCIsICJ3aW5kb3dzX2VkaXRpb24iOiBudWxsLCAiZnJlZWRlc2t0b3BfaWQiOiBudWxsLCAiZnJlZWRlc2t0b3BfaWRfbGlrZSI6IG51bGwsICJmcmVlZGVza3RvcF92ZXJzaW9uX2lkIjogbnVsbCwgIm51bV9jcHVfY29yZXMiOiAxMCwgIm51bV9jcHVfY29yZXNfYXZhaWxhYmxlIjogMTAsICJwcm9jZXNzX2hpZXJhcmNoeSI6IFt7InByb2Nlc3NfbmFtZV9oYXNoIjogIjBhZjE5NjY1ODhjZWQwNmUzMTQzYWU3MjAyNDVjOWI3YWVhYWUyMTNjNjkyMWMxMmM3NDJhMTY2Njc5Y2M1MDUiLCAicHJvY2Vzc19jcmVhdGlvbl90aW1lc3RhbXAiOiAiMjAyMi0wNi0wNFQyMzoxNjo0NC45NjEwNjBaIn0sIHsicHJvY2Vzc19uYW1lX2hhc2giOiAiYTI2ZTM3NjU0Mjg1YWY0MmM0NjlkMmI1Mjc0YmVjYjY1YjgxYjI3YTQ0NDU1Y2Y0ZDlmYWY2YzQzYTBjNDU2ZSIsICJwcm9jZXNzX2NyZWF0aW9uX3RpbWVzdGFtcCI6ICIyMDIyLTA2LTA0VDE3OjAwOjA2Ljg4ODAyNVoifSwgeyJwcm9jZXNzX25hbWVfaGFzaCI6ICI0Mjg4MjEzNTBlOTY5MTQ5MWY2MTZiNzU0Y2Q4MzE1ZmI4NmQ3OTdhYjM1ZDg0MzQ3OWU3MzJlZjkwNjY1MzI0IiwgInByb2Nlc3NfY3JlYXRpb25fdGltZXN0YW1wIjogIjIwMjItMDYtMDRUMTc6MDA6MDYuODY0MTkzWiJ9LCB7InByb2Nlc3NfbmFtZV9oYXNoIjogIjVmZDk4OWY5ZDM2YWI1MzRlNzJlMTkyOWQ2OWQyMzAzY2RmZjU2M2VjODdhMzMwOGM0NzcyNGRiOTFjNDNjODgiLCAicHJvY2Vzc19jcmVhdGlvbl90aW1lc3RhbXAiOiAiMjAyMi0wNS0yNFQxNToxMjowMS4yODI2MTdaIn0sIHsicHJvY2Vzc19uYW1lX2hhc2giOiAiNGZmMjJiZWFmNjBkMGJmZmU5ZTA1NTg4YTU0NjcyZTZlZDUyZWIwMjI2MzNkMDQ0YmZiNWUyZDFlNjUzOGM1ZCIsICJwcm9jZXNzX2NyZWF0aW9uX3RpbWVzdGFtcCI6ICIyMDIyLTA1LTI0VDE0OjQ5OjA3LjkyOTM5NVoifSwgeyJwcm9jZXNzX25hbWVfaGFzaCI6ICJjODI2ZTg0MWZiMzcwODZmYWMzMTE0ZDY2NGMwZTI3N2JjNDk4Y2YzNWI3ODRmNmExYjkzZDk5ZTZmMzU5ZWE0IiwgInByb2Nlc3NfY3JlYXRpb25fdGltZXN0YW1wIjogIjIwMjItMDUtMjRUMTQ6NDg6NDcuMDQ0MTg5WiJ9LCB7InByb2Nlc3NfbmFtZV9oYXNoIjogImIwYzIwZTdjNmU0NWQ1YTJkZmFhNGY3NzM5ZGQwZDQzYzUwOTJhZTBhODc1MTY1OGQyOGM1NzNmYThmZDk4MWIiLCAicHJvY2Vzc19jcmVhdGlvbl90aW1lc3RhbXAiOiAiMjAyMi0wNS0yNFQxNDo0ODo0Ny4wMDY0NDlaIn1dfX0sIHsic2NoZW1hIjogImlnbHU6Y29tLm1lbHRhbm8vcHJvamVjdF9jb250ZXh0L2pzb25zY2hlbWEvMS0wLTAiLCAiZGF0YSI6IHsiY29udGV4dF91dWlkIjogIjJhNmFjMjhmLTA5NDYtNDIyNC05ZmM4LWQ1N2U1NTFlYzU5NCIsICJwcm9qZWN0X3V1aWQiOiAiMWE3ZDk5ODktZjcwNC00ODViLWJhN2EtYTc0Nzg3MDNjYTdiIiwgInByb2plY3RfdXVpZF9zb3VyY2UiOiAiZXhwbGljaXQiLCAiY2xpZW50X3V1aWQiOiAiYmMwM2RkYzgtN2MyNS00NTdjLTk1NTYtNjE1M2M4MzZmMTk1IiwgImVudmlyb25tZW50X25hbWVfaGFzaCI6IG51bGx9fSwgeyJzY2hlbWEiOiAiaWdsdTpjb20ubWVsdGFuby9jbGlfY29udGV4dC9qc29uc2NoZW1hLzEtMC0wIiwgImRhdGEiOiB7ImV2ZW50X3V1aWQiOiAiNmRkZDRjYjktMTVlYi00NjA2LTgyYzEtMWJkMjEwZjNkNzM5IiwgImNvbW1hbmQiOiAiaW52b2tlIiwgInN1Yl9jb21tYW5kIjogbnVsbCwgIm9wdGlvbl9rZXlzIjogW119fV19&tv=py-0.10.0&p=pc&lang=en_US&tz=America%2FChicago&stm=1654384608000 HTTP/1.1" 200 43 ``` We should proactively set the logging level of urllib3 to only print info or higher, e.g: ```python logging.getLogger("urllib3").setLevel(logging.INFO) ```
[ { "content": "\"\"\"Various utilities for configuring logging in a meltano project.\"\"\"\nimport asyncio\nimport logging\nimport os\nfrom contextlib import suppress\nfrom logging import config as logging_config\nfrom typing import Dict, Optional\n\nimport structlog\nimport yaml\n\nfrom meltano.core.logging.formatters import LEVELED_TIMESTAMPED_PRE_CHAIN, TIMESTAMPER\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\ntry:\n from typing import Protocol # noqa: WPS433\nexcept ImportError:\n from typing_extensions import Protocol # noqa: WPS433,WPS440\n\n\nLEVELS = { # noqa: WPS407\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\nDEFAULT_LEVEL = \"info\"\nFORMAT = \"[%(asctime)s] [%(process)d|%(threadName)10s|%(name)s] [%(levelname)s] %(message)s\" # noqa: WPS323\n\n\ndef parse_log_level(log_level: Dict[str, int]) -> int:\n \"\"\"Parse a level descriptor into an logging level.\n\n Args:\n log_level: level descriptor.\n\n Returns:\n int: actual logging level.\n \"\"\"\n return LEVELS.get(log_level, LEVELS[DEFAULT_LEVEL])\n\n\ndef read_config(config_file: Optional[str] = None) -> dict:\n \"\"\"Read a logging config yaml from disk.\n\n Args:\n config_file: path to the config file to read.\n\n Returns:\n dict: parsed yaml config\n \"\"\"\n if config_file and os.path.exists(config_file):\n with open(config_file) as cf:\n return yaml.safe_load(cf.read())\n else:\n return None\n\n\ndef default_config(log_level: str) -> dict:\n \"\"\"Generate a default logging config.\n\n Args:\n log_level: set log levels to provided level.\n\n Returns:\n dict: logging config suitable for use with logging.config.dictConfig\n \"\"\"\n return {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"colored\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.dev.ConsoleRenderer(colors=True),\n \"foreign_pre_chain\": LEVELED_TIMESTAMPED_PRE_CHAIN,\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": log_level.upper(),\n \"formatter\": \"colored\",\n \"stream\": \"ext://sys.stderr\",\n },\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"console\"],\n \"level\": log_level.upper(),\n \"propagate\": True,\n },\n \"snowplow_tracker.emitters\": {\n \"handlers\": [\"console\"],\n \"level\": logging.ERROR,\n },\n },\n }\n\n\ndef setup_logging(project: Project = None, log_level: str = DEFAULT_LEVEL) -> None:\n \"\"\"Configure logging for a meltano project.\n\n Args:\n project: the meltano project\n log_level: set log levels to provided level.\n \"\"\"\n # Mimick Python 3.8's `force=True` kwarg to override any\n # existing logger handlers\n # See https://github.com/python/cpython/commit/cf67d6a934b51b1f97e72945b596477b271f70b8\n root = logging.getLogger()\n for handler in root.handlers[:]:\n root.removeHandler(handler)\n handler.close()\n\n log_level = DEFAULT_LEVEL.upper()\n log_config = None\n\n if project:\n settings_service = ProjectSettingsService(project)\n log_config = settings_service.get(\"cli.log_config\")\n log_level = settings_service.get(\"cli.log_level\")\n\n config = read_config(log_config) or default_config(log_level)\n logging_config.dictConfig(config)\n structlog.configure(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.PositionalArgumentsFormatter(),\n TIMESTAMPER,\n structlog.processors.StackInfoRenderer(),\n structlog.processors.format_exc_info,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n )\n\n\ndef change_console_log_level(log_level: int = logging.DEBUG) -> None:\n \"\"\"Change the log level for the current root logger, but only on the 'console' handler.\n\n Most useful when you want change the log level on the fly for console output, but want to respect other aspects\n of any potential logging.yaml sourced configs. Note that if a logging.yaml config without a 'console' handler\n is used, this will not override the log level.\n\n Args:\n log_level: set log levels to provided level.\n \"\"\"\n root_logger = logging.getLogger()\n root_logger.setLevel(log_level)\n for handler in root_logger.handlers:\n if handler.name == \"console\":\n handler.setLevel(log_level)\n\n\nclass SubprocessOutputWriter(Protocol):\n \"\"\"SubprocessOutputWriter is a basic interface definition suitable for use with capture_subprocess_output.\"\"\"\n\n def writelines(self, lines: str):\n \"\"\"Any type with a writelines method accepting a string could be used as an output writer.\n\n Args:\n lines: string to write\n \"\"\"\n pass\n\n\nasync def _write_line_writer(writer, line):\n # StreamWriters like a subprocess's stdin need special consideration\n if isinstance(writer, asyncio.StreamWriter):\n try: # noqa: WPS229\n writer.write(line)\n await writer.drain()\n except (BrokenPipeError, ConnectionResetError):\n with suppress(AttributeError): # `wait_closed` is Python 3.7+\n await writer.wait_closed()\n\n return False\n else:\n writer.writeline(line.decode())\n\n return True\n\n\nasync def capture_subprocess_output(\n reader: Optional[asyncio.StreamReader], *line_writers: SubprocessOutputWriter\n) -> None:\n \"\"\"Capture in real time the output stream of a suprocess that is run async.\n\n The stream has been set to asyncio.subprocess.PIPE and is provided using\n reader to this function.\n\n As new lines are captured for reader, they are written to output_stream.\n This async function should be run with await asyncio.wait() while waiting\n for the subprocess to end.\n\n Args:\n reader: asyncio.StreamReader object that is the output stream of the subprocess.\n line_writers: any object thats a StreamWriter or has a writelines method accepting a string.\n \"\"\"\n while not reader.at_eof():\n line = await reader.readline()\n if not line:\n continue\n\n for writer in line_writers:\n if not await _write_line_writer(writer, line):\n # If the destination stream is closed, we can stop capturing output.\n return\n", "path": "src/meltano/core/logging/utils.py" } ]
[ { "content": "\"\"\"Various utilities for configuring logging in a meltano project.\"\"\"\nimport asyncio\nimport logging\nimport os\nfrom contextlib import suppress\nfrom logging import config as logging_config\nfrom typing import Dict, Optional\n\nimport structlog\nimport yaml\n\nfrom meltano.core.logging.formatters import LEVELED_TIMESTAMPED_PRE_CHAIN, TIMESTAMPER\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\ntry:\n from typing import Protocol # noqa: WPS433\nexcept ImportError:\n from typing_extensions import Protocol # noqa: WPS433,WPS440\n\n\nLEVELS = { # noqa: WPS407\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\nDEFAULT_LEVEL = \"info\"\nFORMAT = \"[%(asctime)s] [%(process)d|%(threadName)10s|%(name)s] [%(levelname)s] %(message)s\" # noqa: WPS323\n\n\ndef parse_log_level(log_level: Dict[str, int]) -> int:\n \"\"\"Parse a level descriptor into an logging level.\n\n Args:\n log_level: level descriptor.\n\n Returns:\n int: actual logging level.\n \"\"\"\n return LEVELS.get(log_level, LEVELS[DEFAULT_LEVEL])\n\n\ndef read_config(config_file: Optional[str] = None) -> dict:\n \"\"\"Read a logging config yaml from disk.\n\n Args:\n config_file: path to the config file to read.\n\n Returns:\n dict: parsed yaml config\n \"\"\"\n if config_file and os.path.exists(config_file):\n with open(config_file) as cf:\n return yaml.safe_load(cf.read())\n else:\n return None\n\n\ndef default_config(log_level: str) -> dict:\n \"\"\"Generate a default logging config.\n\n Args:\n log_level: set log levels to provided level.\n\n Returns:\n dict: logging config suitable for use with logging.config.dictConfig\n \"\"\"\n return {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"colored\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.dev.ConsoleRenderer(colors=True),\n \"foreign_pre_chain\": LEVELED_TIMESTAMPED_PRE_CHAIN,\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": log_level.upper(),\n \"formatter\": \"colored\",\n \"stream\": \"ext://sys.stderr\",\n },\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"console\"],\n \"level\": log_level.upper(),\n \"propagate\": True,\n },\n \"snowplow_tracker.emitters\": {\n \"handlers\": [\"console\"],\n \"level\": logging.ERROR,\n },\n \"urllib3\": {\n \"handlers\": [\"console\"],\n \"level\": logging.INFO,\n \"propagate\": False,\n },\n },\n }\n\n\ndef setup_logging(project: Project = None, log_level: str = DEFAULT_LEVEL) -> None:\n \"\"\"Configure logging for a meltano project.\n\n Args:\n project: the meltano project\n log_level: set log levels to provided level.\n \"\"\"\n # Mimick Python 3.8's `force=True` kwarg to override any\n # existing logger handlers\n # See https://github.com/python/cpython/commit/cf67d6a934b51b1f97e72945b596477b271f70b8\n root = logging.getLogger()\n for handler in root.handlers[:]:\n root.removeHandler(handler)\n handler.close()\n\n log_level = DEFAULT_LEVEL.upper()\n log_config = None\n\n if project:\n settings_service = ProjectSettingsService(project)\n log_config = settings_service.get(\"cli.log_config\")\n log_level = settings_service.get(\"cli.log_level\")\n\n config = read_config(log_config) or default_config(log_level)\n logging_config.dictConfig(config)\n structlog.configure(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.PositionalArgumentsFormatter(),\n TIMESTAMPER,\n structlog.processors.StackInfoRenderer(),\n structlog.processors.format_exc_info,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n )\n\n\ndef change_console_log_level(log_level: int = logging.DEBUG) -> None:\n \"\"\"Change the log level for the current root logger, but only on the 'console' handler.\n\n Most useful when you want change the log level on the fly for console output, but want to respect other aspects\n of any potential logging.yaml sourced configs. Note that if a logging.yaml config without a 'console' handler\n is used, this will not override the log level.\n\n Args:\n log_level: set log levels to provided level.\n \"\"\"\n root_logger = logging.getLogger()\n root_logger.setLevel(log_level)\n for handler in root_logger.handlers:\n if handler.name == \"console\":\n handler.setLevel(log_level)\n\n\nclass SubprocessOutputWriter(Protocol):\n \"\"\"SubprocessOutputWriter is a basic interface definition suitable for use with capture_subprocess_output.\"\"\"\n\n def writelines(self, lines: str):\n \"\"\"Any type with a writelines method accepting a string could be used as an output writer.\n\n Args:\n lines: string to write\n \"\"\"\n pass\n\n\nasync def _write_line_writer(writer, line):\n # StreamWriters like a subprocess's stdin need special consideration\n if isinstance(writer, asyncio.StreamWriter):\n try: # noqa: WPS229\n writer.write(line)\n await writer.drain()\n except (BrokenPipeError, ConnectionResetError):\n with suppress(AttributeError): # `wait_closed` is Python 3.7+\n await writer.wait_closed()\n\n return False\n else:\n writer.writeline(line.decode())\n\n return True\n\n\nasync def capture_subprocess_output(\n reader: Optional[asyncio.StreamReader], *line_writers: SubprocessOutputWriter\n) -> None:\n \"\"\"Capture in real time the output stream of a suprocess that is run async.\n\n The stream has been set to asyncio.subprocess.PIPE and is provided using\n reader to this function.\n\n As new lines are captured for reader, they are written to output_stream.\n This async function should be run with await asyncio.wait() while waiting\n for the subprocess to end.\n\n Args:\n reader: asyncio.StreamReader object that is the output stream of the subprocess.\n line_writers: any object thats a StreamWriter or has a writelines method accepting a string.\n \"\"\"\n while not reader.at_eof():\n line = await reader.readline()\n if not line:\n continue\n\n for writer in line_writers:\n if not await _write_line_writer(writer, line):\n # If the destination stream is closed, we can stop capturing output.\n return\n", "path": "src/meltano/core/logging/utils.py" } ]
diff --git a/src/meltano/core/logging/utils.py b/src/meltano/core/logging/utils.py index 823aac8f5a..f483d7e182 100644 --- a/src/meltano/core/logging/utils.py +++ b/src/meltano/core/logging/utils.py @@ -95,6 +95,11 @@ def default_config(log_level: str) -> dict: "handlers": ["console"], "level": logging.ERROR, }, + "urllib3": { + "handlers": ["console"], + "level": logging.INFO, + "propagate": False, + }, }, }
pallets__click-1825
resolve_path differs on Windows depending on Python version <!-- This issue tracker is a tool to address bugs in Click itself. Please use Pallets Discord or Stack Overflow for questions about your own code. Replace this comment with a clear outline of what the bug is. --> <!-- Describe how to replicate the bug. Include a minimal reproducible example that demonstrates the bug. Include the full traceback if there was an exception. --> <!-- Describe the expected behavior that should have happened but didn't. --> Hi. There was an issue filed under [Typer](https://github.com/tiangolo/typer/issues/244#issuecomment-792455309) that gives a full explanation, but basically the use of `os.path.realpath` in the `resolve_path` logic for click.Path differs between Python 3.7 and 3.8 on Windows. Prior to 3.8, `os.path.realpath` did not resolve symlinks. Therefore Click users on Windows using Python 3.7 or lower are getting the wrong results for resolve_path. More info: https://docs.python.org/3/library/os.path.html#os.path.realpath
[ { "content": "import os\nimport stat\nimport typing as t\nfrom datetime import datetime\n\nfrom ._compat import _get_argv_encoding\nfrom ._compat import filename_to_ui\nfrom ._compat import get_filesystem_encoding\nfrom ._compat import get_strerror\nfrom ._compat import open_stream\nfrom .exceptions import BadParameter\nfrom .utils import LazyFile\nfrom .utils import safecall\n\n\nclass ParamType:\n \"\"\"Represents the type of a parameter. Validates and converts values\n from the command line or Python into the correct type.\n\n To implement a custom type, subclass and implement at least the\n following:\n\n - The :attr:`name` class attribute must be set.\n - Calling an instance of the type with ``None`` must return\n ``None``. This is already implemented by default.\n - :meth:`convert` must convert string values to the correct type.\n - :meth:`convert` must accept values that are already the correct\n type.\n - It must be able to convert a value if the ``ctx`` and ``param``\n arguments are ``None``. This can occur when converting prompt\n input.\n \"\"\"\n\n is_composite = False\n\n #: the descriptive name of this type\n name: t.Optional[str] = None\n\n #: if a list of this type is expected and the value is pulled from a\n #: string environment variable, this is what splits it up. `None`\n #: means any whitespace. For all parameters the general rule is that\n #: whitespace splits them up. The exception are paths and files which\n #: are split by ``os.path.pathsep`` by default (\":\" on Unix and \";\" on\n #: Windows).\n envvar_list_splitter: t.ClassVar[t.Optional[str]] = None\n\n def to_info_dict(self):\n \"\"\"Gather information that could be useful for a tool generating\n user-facing documentation.\n\n Use :meth:`click.Context.to_info_dict` to traverse the entire\n CLI structure.\n\n .. versionadded:: 8.0\n \"\"\"\n # The class name without the \"ParamType\" suffix.\n param_type = type(self).__name__.partition(\"ParamType\")[0]\n param_type = param_type.partition(\"ParameterType\")[0]\n return {\"param_type\": param_type, \"name\": self.name}\n\n def __call__(self, value, param=None, ctx=None):\n if value is not None:\n return self.convert(value, param, ctx)\n\n def get_metavar(self, param):\n \"\"\"Returns the metavar default for this param if it provides one.\"\"\"\n\n def get_missing_message(self, param):\n \"\"\"Optionally might return extra information about a missing\n parameter.\n\n .. versionadded:: 2.0\n \"\"\"\n\n def convert(self, value, param, ctx):\n \"\"\"Convert the value to the correct type. This is not called if\n the value is ``None`` (the missing value).\n\n This must accept string values from the command line, as well as\n values that are already the correct type. It may also convert\n other compatible types.\n\n The ``param`` and ``ctx`` arguments may be ``None`` in certain\n situations, such as when converting prompt input.\n\n If the value cannot be converted, call :meth:`fail` with a\n descriptive message.\n\n :param value: The value to convert.\n :param param: The parameter that is using this type to convert\n its value. May be ``None``.\n :param ctx: The current context that arrived at this value. May\n be ``None``.\n \"\"\"\n return value\n\n def split_envvar_value(self, rv):\n \"\"\"Given a value from an environment variable this splits it up\n into small chunks depending on the defined envvar list splitter.\n\n If the splitter is set to `None`, which means that whitespace splits,\n then leading and trailing whitespace is ignored. Otherwise, leading\n and trailing splitters usually lead to empty items being included.\n \"\"\"\n return (rv or \"\").split(self.envvar_list_splitter)\n\n def fail(self, message, param=None, ctx=None):\n \"\"\"Helper method to fail with an invalid value message.\"\"\"\n raise BadParameter(message, ctx=ctx, param=param)\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Return a list of\n :class:`~click.shell_completion.CompletionItem` objects for the\n incomplete value. Most types do not provide completions, but\n some do, and this allows custom types to provide custom\n completions as well.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n return []\n\n\nclass CompositeParamType(ParamType):\n is_composite = True\n\n @property\n def arity(self):\n raise NotImplementedError()\n\n\nclass FuncParamType(ParamType):\n def __init__(self, func):\n self.name = func.__name__\n self.func = func\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"func\"] = self.func\n return info_dict\n\n def convert(self, value, param, ctx):\n try:\n return self.func(value)\n except ValueError:\n try:\n value = str(value)\n except UnicodeError:\n value = value.decode(\"utf-8\", \"replace\")\n\n self.fail(value, param, ctx)\n\n\nclass UnprocessedParamType(ParamType):\n name = \"text\"\n\n def convert(self, value, param, ctx):\n return value\n\n def __repr__(self):\n return \"UNPROCESSED\"\n\n\nclass StringParamType(ParamType):\n name = \"text\"\n\n def convert(self, value, param, ctx):\n if isinstance(value, bytes):\n enc = _get_argv_encoding()\n try:\n value = value.decode(enc)\n except UnicodeError:\n fs_enc = get_filesystem_encoding()\n if fs_enc != enc:\n try:\n value = value.decode(fs_enc)\n except UnicodeError:\n value = value.decode(\"utf-8\", \"replace\")\n else:\n value = value.decode(\"utf-8\", \"replace\")\n return value\n return str(value)\n\n def __repr__(self):\n return \"STRING\"\n\n\nclass Choice(ParamType):\n \"\"\"The choice type allows a value to be checked against a fixed set\n of supported values. All of these values have to be strings.\n\n You should only pass a list or tuple of choices. Other iterables\n (like generators) may lead to surprising results.\n\n The resulting value will always be one of the originally passed choices\n regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``\n being specified.\n\n See :ref:`choice-opts` for an example.\n\n :param case_sensitive: Set to false to make choices case\n insensitive. Defaults to true.\n \"\"\"\n\n name = \"choice\"\n\n def __init__(self, choices, case_sensitive=True):\n self.choices = choices\n self.case_sensitive = case_sensitive\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"choices\"] = self.choices\n info_dict[\"case_sensitive\"] = self.case_sensitive\n return info_dict\n\n def get_metavar(self, param):\n choices_str = \"|\".join(self.choices)\n\n # Use curly braces to indicate a required argument.\n if param.required and param.param_type_name == \"argument\":\n return f\"{{{choices_str}}}\"\n\n # Use square braces to indicate an option or optional argument.\n return f\"[{choices_str}]\"\n\n def get_missing_message(self, param):\n choice_str = \",\\n\\t\".join(self.choices)\n return f\"Choose from:\\n\\t{choice_str}\"\n\n def convert(self, value, param, ctx):\n # Match through normalization and case sensitivity\n # first do token_normalize_func, then lowercase\n # preserve original `value` to produce an accurate message in\n # `self.fail`\n normed_value = value\n normed_choices = {choice: choice for choice in self.choices}\n\n if ctx is not None and ctx.token_normalize_func is not None:\n normed_value = ctx.token_normalize_func(value)\n normed_choices = {\n ctx.token_normalize_func(normed_choice): original\n for normed_choice, original in normed_choices.items()\n }\n\n if not self.case_sensitive:\n normed_value = normed_value.casefold()\n normed_choices = {\n normed_choice.casefold(): original\n for normed_choice, original in normed_choices.items()\n }\n\n if normed_value in normed_choices:\n return normed_choices[normed_value]\n\n one_of = \"one of \" if len(self.choices) > 1 else \"\"\n choices_str = \", \".join(repr(c) for c in self.choices)\n self.fail(f\"{value!r} is not {one_of}{choices_str}.\", param, ctx)\n\n def __repr__(self):\n return f\"Choice({list(self.choices)})\"\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Complete choices that start with the incomplete value.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n from click.shell_completion import CompletionItem\n\n str_choices = map(str, self.choices)\n\n if self.case_sensitive:\n matched = (c for c in str_choices if c.startswith(incomplete))\n else:\n incomplete = incomplete.lower()\n matched = (c for c in str_choices if c.lower().startswith(incomplete))\n\n return [CompletionItem(c) for c in matched]\n\n\nclass DateTime(ParamType):\n \"\"\"The DateTime type converts date strings into `datetime` objects.\n\n The format strings which are checked are configurable, but default to some\n common (non-timezone aware) ISO 8601 formats.\n\n When specifying *DateTime* formats, you should only pass a list or a tuple.\n Other iterables, like generators, may lead to surprising results.\n\n The format strings are processed using ``datetime.strptime``, and this\n consequently defines the format strings which are allowed.\n\n Parsing is tried using each format, in order, and the first format which\n parses successfully is used.\n\n :param formats: A list or tuple of date format strings, in the order in\n which they should be tried. Defaults to\n ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,\n ``'%Y-%m-%d %H:%M:%S'``.\n \"\"\"\n\n name = \"datetime\"\n\n def __init__(self, formats=None):\n self.formats = formats or [\"%Y-%m-%d\", \"%Y-%m-%dT%H:%M:%S\", \"%Y-%m-%d %H:%M:%S\"]\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"formats\"] = self.formats\n return info_dict\n\n def get_metavar(self, param):\n return f\"[{'|'.join(self.formats)}]\"\n\n def _try_to_convert_date(self, value, format):\n try:\n return datetime.strptime(value, format)\n except ValueError:\n return None\n\n def convert(self, value, param, ctx):\n if isinstance(value, datetime):\n return value\n\n for format in self.formats:\n converted = self._try_to_convert_date(value, format)\n\n if converted is not None:\n return converted\n\n plural = \"s\" if len(self.formats) > 1 else \"\"\n formats_str = \", \".join(repr(f) for f in self.formats)\n self.fail(\n f\"{value!r} does not match the format{plural} {formats_str}.\", param, ctx\n )\n\n def __repr__(self):\n return \"DateTime\"\n\n\nclass _NumberParamTypeBase(ParamType):\n _number_class: t.ClassVar[t.Optional[t.Type]] = None\n\n def convert(self, value, param, ctx):\n try:\n return self._number_class(value)\n except ValueError:\n self.fail(f\"{value!r} is not a valid {self.name}.\", param, ctx)\n\n\nclass _NumberRangeBase(_NumberParamTypeBase):\n def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):\n self.min = min\n self.max = max\n self.min_open = min_open\n self.max_open = max_open\n self.clamp = clamp\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict.update(\n min=self.min,\n max=self.max,\n min_open=self.min_open,\n max_open=self.max_open,\n clamp=self.clamp,\n )\n return info_dict\n\n def convert(self, value, param, ctx):\n import operator\n\n rv = super().convert(value, param, ctx)\n lt_min = self.min is not None and (\n operator.le if self.min_open else operator.lt\n )(rv, self.min)\n gt_max = self.max is not None and (\n operator.ge if self.max_open else operator.gt\n )(rv, self.max)\n\n if self.clamp:\n if lt_min:\n return self._clamp(self.min, 1, self.min_open)\n\n if gt_max:\n return self._clamp(self.max, -1, self.max_open)\n\n if lt_min or gt_max:\n self.fail(f\"{rv} is not in the range {self._describe_range()}.\", param, ctx)\n\n return rv\n\n def _clamp(self, bound, dir, open):\n \"\"\"Find the valid value to clamp to bound in the given\n direction.\n\n :param bound: The boundary value.\n :param dir: 1 or -1 indicating the direction to move.\n :param open: If true, the range does not include the bound.\n \"\"\"\n raise NotImplementedError\n\n def _describe_range(self):\n \"\"\"Describe the range for use in help text.\"\"\"\n if self.min is None:\n op = \"<\" if self.max_open else \"<=\"\n return f\"x{op}{self.max}\"\n\n if self.max is None:\n op = \">\" if self.min_open else \">=\"\n return f\"x{op}{self.min}\"\n\n lop = \"<\" if self.min_open else \"<=\"\n rop = \"<\" if self.max_open else \"<=\"\n return f\"{self.min}{lop}x{rop}{self.max}\"\n\n def __repr__(self):\n clamp = \" clamped\" if self.clamp else \"\"\n return f\"<{type(self).__name__} {self._describe_range()}{clamp}>\"\n\n\nclass IntParamType(_NumberParamTypeBase):\n name = \"integer\"\n _number_class = int\n\n def __repr__(self):\n return \"INT\"\n\n\nclass IntRange(_NumberRangeBase, IntParamType):\n \"\"\"Restrict an :data:`click.INT` value to a range of accepted\n values. See :ref:`ranges`.\n\n If ``min`` or ``max`` are not passed, any value is accepted in that\n direction. If ``min_open`` or ``max_open`` are enabled, the\n corresponding boundary is not included in the range.\n\n If ``clamp`` is enabled, a value outside the range is clamped to the\n boundary instead of failing.\n\n .. versionchanged:: 8.0\n Added the ``min_open`` and ``max_open`` parameters.\n \"\"\"\n\n name = \"integer range\"\n\n def _clamp(self, bound, dir, open):\n if not open:\n return bound\n\n return bound + dir\n\n\nclass FloatParamType(_NumberParamTypeBase):\n name = \"float\"\n _number_class = float\n\n def __repr__(self):\n return \"FLOAT\"\n\n\nclass FloatRange(_NumberRangeBase, FloatParamType):\n \"\"\"Restrict a :data:`click.FLOAT` value to a range of accepted\n values. See :ref:`ranges`.\n\n If ``min`` or ``max`` are not passed, any value is accepted in that\n direction. If ``min_open`` or ``max_open`` are enabled, the\n corresponding boundary is not included in the range.\n\n If ``clamp`` is enabled, a value outside the range is clamped to the\n boundary instead of failing. This is not supported if either\n boundary is marked ``open``.\n\n .. versionchanged:: 8.0\n Added the ``min_open`` and ``max_open`` parameters.\n \"\"\"\n\n name = \"float range\"\n\n def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):\n super().__init__(\n min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp\n )\n\n if (min_open or max_open) and clamp:\n raise TypeError(\"Clamping is not supported for open bounds.\")\n\n def _clamp(self, bound, dir, open):\n if not open:\n return bound\n\n # Could use Python 3.9's math.nextafter here, but clamping an\n # open float range doesn't seem to be particularly useful. It's\n # left up to the user to write a callback to do it if needed.\n raise RuntimeError(\"Clamping is not supported for open bounds.\")\n\n\nclass BoolParamType(ParamType):\n name = \"boolean\"\n\n def convert(self, value, param, ctx):\n if value in {False, True}:\n return bool(value)\n\n norm = value.strip().lower()\n\n if norm in {\"1\", \"true\", \"t\", \"yes\", \"y\", \"on\"}:\n return True\n\n if norm in {\"0\", \"false\", \"f\", \"no\", \"n\", \"off\"}:\n return False\n\n self.fail(f\"{value!r} is not a valid boolean.\", param, ctx)\n\n def __repr__(self):\n return \"BOOL\"\n\n\nclass UUIDParameterType(ParamType):\n name = \"uuid\"\n\n def convert(self, value, param, ctx):\n import uuid\n\n if isinstance(value, uuid.UUID):\n return value\n\n value = value.strip()\n\n try:\n return uuid.UUID(value)\n except ValueError:\n self.fail(f\"{value!r} is not a valid UUID.\", param, ctx)\n\n def __repr__(self):\n return \"UUID\"\n\n\nclass File(ParamType):\n \"\"\"Declares a parameter to be a file for reading or writing. The file\n is automatically closed once the context tears down (after the command\n finished working).\n\n Files can be opened for reading or writing. The special value ``-``\n indicates stdin or stdout depending on the mode.\n\n By default, the file is opened for reading text data, but it can also be\n opened in binary mode or for writing. The encoding parameter can be used\n to force a specific encoding.\n\n The `lazy` flag controls if the file should be opened immediately or upon\n first IO. The default is to be non-lazy for standard input and output\n streams as well as files opened for reading, `lazy` otherwise. When opening a\n file lazily for reading, it is still opened temporarily for validation, but\n will not be held open until first IO. lazy is mainly useful when opening\n for writing to avoid creating the file until it is needed.\n\n Starting with Click 2.0, files can also be opened atomically in which\n case all writes go into a separate file in the same folder and upon\n completion the file will be moved over to the original location. This\n is useful if a file regularly read by other users is modified.\n\n See :ref:`file-args` for more information.\n \"\"\"\n\n name = \"filename\"\n envvar_list_splitter = os.path.pathsep\n\n def __init__(\n self, mode=\"r\", encoding=None, errors=\"strict\", lazy=None, atomic=False\n ):\n self.mode = mode\n self.encoding = encoding\n self.errors = errors\n self.lazy = lazy\n self.atomic = atomic\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict.update(mode=self.mode, encoding=self.encoding)\n return info_dict\n\n def resolve_lazy_flag(self, value):\n if self.lazy is not None:\n return self.lazy\n if value == \"-\":\n return False\n elif \"w\" in self.mode:\n return True\n return False\n\n def convert(self, value, param, ctx):\n try:\n if hasattr(value, \"read\") or hasattr(value, \"write\"):\n return value\n\n lazy = self.resolve_lazy_flag(value)\n\n if lazy:\n f = LazyFile(\n value, self.mode, self.encoding, self.errors, atomic=self.atomic\n )\n if ctx is not None:\n ctx.call_on_close(f.close_intelligently)\n return f\n\n f, should_close = open_stream(\n value, self.mode, self.encoding, self.errors, atomic=self.atomic\n )\n # If a context is provided, we automatically close the file\n # at the end of the context execution (or flush out). If a\n # context does not exist, it's the caller's responsibility to\n # properly close the file. This for instance happens when the\n # type is used with prompts.\n if ctx is not None:\n if should_close:\n ctx.call_on_close(safecall(f.close))\n else:\n ctx.call_on_close(safecall(f.flush))\n return f\n except OSError as e: # noqa: B014\n self.fail(f\"{filename_to_ui(value)!r}: {get_strerror(e)}\", param, ctx)\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Return a special completion marker that tells the completion\n system to use the shell to provide file path completions.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n from click.shell_completion import CompletionItem\n\n return [CompletionItem(incomplete, type=\"file\")]\n\n\nclass Path(ParamType):\n \"\"\"The path type is similar to the :class:`File` type but it performs\n different checks. First of all, instead of returning an open file\n handle it returns just the filename. Secondly, it can perform various\n basic checks about what the file or directory should be.\n\n :param exists: if set to true, the file or directory needs to exist for\n this value to be valid. If this is not required and a\n file does indeed not exist, then all further checks are\n silently skipped.\n :param file_okay: controls if a file is a possible value.\n :param dir_okay: controls if a directory is a possible value.\n :param writable: if true, a writable check is performed.\n :param readable: if true, a readable check is performed.\n :param resolve_path: if this is true, then the path is fully resolved\n before the value is passed onwards. This means\n that it's absolute and symlinks are resolved. It\n will not expand a tilde-prefix, as this is\n supposed to be done by the shell only.\n :param allow_dash: If this is set to `True`, a single dash to indicate\n standard streams is permitted.\n :param path_type: Convert the incoming path value to this type. If\n ``None``, keep Python's default, which is ``str``. Useful to\n convert to :class:`pathlib.Path`.\n\n .. versionchanged:: 8.0\n Allow passing ``type=pathlib.Path``.\n\n .. versionchanged:: 6.0\n Added the ``allow_dash`` parameter.\n \"\"\"\n\n envvar_list_splitter = os.path.pathsep\n\n def __init__(\n self,\n exists=False,\n file_okay=True,\n dir_okay=True,\n writable=False,\n readable=True,\n resolve_path=False,\n allow_dash=False,\n path_type=None,\n ):\n self.exists = exists\n self.file_okay = file_okay\n self.dir_okay = dir_okay\n self.writable = writable\n self.readable = readable\n self.resolve_path = resolve_path\n self.allow_dash = allow_dash\n self.type = path_type\n\n if self.file_okay and not self.dir_okay:\n self.name = \"file\"\n elif self.dir_okay and not self.file_okay:\n self.name = \"directory\"\n else:\n self.name = \"path\"\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict.update(\n exists=self.exists,\n file_okay=self.file_okay,\n dir_okay=self.dir_okay,\n writable=self.writable,\n readable=self.readable,\n allow_dash=self.allow_dash,\n )\n return info_dict\n\n def coerce_path_result(self, rv):\n if self.type is not None and not isinstance(rv, self.type):\n if self.type is str:\n rv = os.fsdecode(rv)\n elif self.type is bytes:\n rv = os.fsencode(rv)\n else:\n rv = self.type(rv)\n\n return rv\n\n def convert(self, value, param, ctx):\n rv = value\n\n is_dash = self.file_okay and self.allow_dash and rv in (b\"-\", \"-\")\n\n if not is_dash:\n if self.resolve_path:\n rv = os.path.realpath(rv)\n\n try:\n st = os.stat(rv)\n except OSError:\n if not self.exists:\n return self.coerce_path_result(rv)\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} does not exist.\",\n param,\n ctx,\n )\n\n if not self.file_okay and stat.S_ISREG(st.st_mode):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is a file.\",\n param,\n ctx,\n )\n if not self.dir_okay and stat.S_ISDIR(st.st_mode):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is a directory.\",\n param,\n ctx,\n )\n if self.writable and not os.access(value, os.W_OK):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is not writable.\",\n param,\n ctx,\n )\n if self.readable and not os.access(value, os.R_OK):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is not readable.\",\n param,\n ctx,\n )\n\n return self.coerce_path_result(rv)\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Return a special completion marker that tells the completion\n system to use the shell to provide path completions for only\n directories or any paths.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n from click.shell_completion import CompletionItem\n\n type = \"dir\" if self.dir_okay and not self.file_okay else \"file\"\n return [CompletionItem(incomplete, type=type)]\n\n\nclass Tuple(CompositeParamType):\n \"\"\"The default behavior of Click is to apply a type on a value directly.\n This works well in most cases, except for when `nargs` is set to a fixed\n count and different types should be used for different items. In this\n case the :class:`Tuple` type can be used. This type can only be used\n if `nargs` is set to a fixed number.\n\n For more information see :ref:`tuple-type`.\n\n This can be selected by using a Python tuple literal as a type.\n\n :param types: a list of types that should be used for the tuple items.\n \"\"\"\n\n def __init__(self, types):\n self.types = [convert_type(ty) for ty in types]\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"types\"] = [t.to_info_dict() for t in self.types]\n return info_dict\n\n @property\n def name(self):\n return f\"<{' '.join(ty.name for ty in self.types)}>\"\n\n @property\n def arity(self):\n return len(self.types)\n\n def convert(self, value, param, ctx):\n if len(value) != len(self.types):\n raise TypeError(\n \"It would appear that nargs is set to conflict with the\"\n \" composite type arity.\"\n )\n return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))\n\n\ndef convert_type(ty, default=None):\n \"\"\"Find the most appropriate :class:`ParamType` for the given Python\n type. If the type isn't provided, it can be inferred from a default\n value.\n \"\"\"\n guessed_type = False\n\n if ty is None and default is not None:\n if isinstance(default, (tuple, list)):\n # If the default is empty, ty will remain None and will\n # return STRING.\n if default:\n item = default[0]\n\n # A tuple of tuples needs to detect the inner types.\n # Can't call convert recursively because that would\n # incorrectly unwind the tuple to a single type.\n if isinstance(item, (tuple, list)):\n ty = tuple(map(type, item))\n else:\n ty = type(item)\n else:\n ty = type(default)\n\n guessed_type = True\n\n if isinstance(ty, tuple):\n return Tuple(ty)\n\n if isinstance(ty, ParamType):\n return ty\n\n if ty is str or ty is None:\n return STRING\n\n if ty is int:\n return INT\n\n if ty is float:\n return FLOAT\n\n # Booleans are only okay if not guessed. For is_flag options with\n # flag_value, default=True indicates which flag_value is the\n # default.\n if ty is bool and not guessed_type:\n return BOOL\n\n if guessed_type:\n return STRING\n\n if __debug__:\n try:\n if issubclass(ty, ParamType):\n raise AssertionError(\n f\"Attempted to use an uninstantiated parameter type ({ty}).\"\n )\n except TypeError:\n # ty is an instance (correct), so issubclass fails.\n pass\n\n return FuncParamType(ty)\n\n\n#: A dummy parameter type that just does nothing. From a user's\n#: perspective this appears to just be the same as `STRING` but\n#: internally no string conversion takes place if the input was bytes.\n#: This is usually useful when working with file paths as they can\n#: appear in bytes and unicode.\n#:\n#: For path related uses the :class:`Path` type is a better choice but\n#: there are situations where an unprocessed type is useful which is why\n#: it is is provided.\n#:\n#: .. versionadded:: 4.0\nUNPROCESSED = UnprocessedParamType()\n\n#: A unicode string parameter type which is the implicit default. This\n#: can also be selected by using ``str`` as type.\nSTRING = StringParamType()\n\n#: An integer parameter. This can also be selected by using ``int`` as\n#: type.\nINT = IntParamType()\n\n#: A floating point value parameter. This can also be selected by using\n#: ``float`` as type.\nFLOAT = FloatParamType()\n\n#: A boolean parameter. This is the default for boolean flags. This can\n#: also be selected by using ``bool`` as a type.\nBOOL = BoolParamType()\n\n#: A UUID parameter.\nUUID = UUIDParameterType()\n", "path": "src/click/types.py" } ]
[ { "content": "import os\nimport stat\nimport typing as t\nfrom datetime import datetime\n\nfrom ._compat import _get_argv_encoding\nfrom ._compat import filename_to_ui\nfrom ._compat import get_filesystem_encoding\nfrom ._compat import get_strerror\nfrom ._compat import open_stream\nfrom .exceptions import BadParameter\nfrom .utils import LazyFile\nfrom .utils import safecall\n\n\nclass ParamType:\n \"\"\"Represents the type of a parameter. Validates and converts values\n from the command line or Python into the correct type.\n\n To implement a custom type, subclass and implement at least the\n following:\n\n - The :attr:`name` class attribute must be set.\n - Calling an instance of the type with ``None`` must return\n ``None``. This is already implemented by default.\n - :meth:`convert` must convert string values to the correct type.\n - :meth:`convert` must accept values that are already the correct\n type.\n - It must be able to convert a value if the ``ctx`` and ``param``\n arguments are ``None``. This can occur when converting prompt\n input.\n \"\"\"\n\n is_composite = False\n\n #: the descriptive name of this type\n name: t.Optional[str] = None\n\n #: if a list of this type is expected and the value is pulled from a\n #: string environment variable, this is what splits it up. `None`\n #: means any whitespace. For all parameters the general rule is that\n #: whitespace splits them up. The exception are paths and files which\n #: are split by ``os.path.pathsep`` by default (\":\" on Unix and \";\" on\n #: Windows).\n envvar_list_splitter: t.ClassVar[t.Optional[str]] = None\n\n def to_info_dict(self):\n \"\"\"Gather information that could be useful for a tool generating\n user-facing documentation.\n\n Use :meth:`click.Context.to_info_dict` to traverse the entire\n CLI structure.\n\n .. versionadded:: 8.0\n \"\"\"\n # The class name without the \"ParamType\" suffix.\n param_type = type(self).__name__.partition(\"ParamType\")[0]\n param_type = param_type.partition(\"ParameterType\")[0]\n return {\"param_type\": param_type, \"name\": self.name}\n\n def __call__(self, value, param=None, ctx=None):\n if value is not None:\n return self.convert(value, param, ctx)\n\n def get_metavar(self, param):\n \"\"\"Returns the metavar default for this param if it provides one.\"\"\"\n\n def get_missing_message(self, param):\n \"\"\"Optionally might return extra information about a missing\n parameter.\n\n .. versionadded:: 2.0\n \"\"\"\n\n def convert(self, value, param, ctx):\n \"\"\"Convert the value to the correct type. This is not called if\n the value is ``None`` (the missing value).\n\n This must accept string values from the command line, as well as\n values that are already the correct type. It may also convert\n other compatible types.\n\n The ``param`` and ``ctx`` arguments may be ``None`` in certain\n situations, such as when converting prompt input.\n\n If the value cannot be converted, call :meth:`fail` with a\n descriptive message.\n\n :param value: The value to convert.\n :param param: The parameter that is using this type to convert\n its value. May be ``None``.\n :param ctx: The current context that arrived at this value. May\n be ``None``.\n \"\"\"\n return value\n\n def split_envvar_value(self, rv):\n \"\"\"Given a value from an environment variable this splits it up\n into small chunks depending on the defined envvar list splitter.\n\n If the splitter is set to `None`, which means that whitespace splits,\n then leading and trailing whitespace is ignored. Otherwise, leading\n and trailing splitters usually lead to empty items being included.\n \"\"\"\n return (rv or \"\").split(self.envvar_list_splitter)\n\n def fail(self, message, param=None, ctx=None):\n \"\"\"Helper method to fail with an invalid value message.\"\"\"\n raise BadParameter(message, ctx=ctx, param=param)\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Return a list of\n :class:`~click.shell_completion.CompletionItem` objects for the\n incomplete value. Most types do not provide completions, but\n some do, and this allows custom types to provide custom\n completions as well.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n return []\n\n\nclass CompositeParamType(ParamType):\n is_composite = True\n\n @property\n def arity(self):\n raise NotImplementedError()\n\n\nclass FuncParamType(ParamType):\n def __init__(self, func):\n self.name = func.__name__\n self.func = func\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"func\"] = self.func\n return info_dict\n\n def convert(self, value, param, ctx):\n try:\n return self.func(value)\n except ValueError:\n try:\n value = str(value)\n except UnicodeError:\n value = value.decode(\"utf-8\", \"replace\")\n\n self.fail(value, param, ctx)\n\n\nclass UnprocessedParamType(ParamType):\n name = \"text\"\n\n def convert(self, value, param, ctx):\n return value\n\n def __repr__(self):\n return \"UNPROCESSED\"\n\n\nclass StringParamType(ParamType):\n name = \"text\"\n\n def convert(self, value, param, ctx):\n if isinstance(value, bytes):\n enc = _get_argv_encoding()\n try:\n value = value.decode(enc)\n except UnicodeError:\n fs_enc = get_filesystem_encoding()\n if fs_enc != enc:\n try:\n value = value.decode(fs_enc)\n except UnicodeError:\n value = value.decode(\"utf-8\", \"replace\")\n else:\n value = value.decode(\"utf-8\", \"replace\")\n return value\n return str(value)\n\n def __repr__(self):\n return \"STRING\"\n\n\nclass Choice(ParamType):\n \"\"\"The choice type allows a value to be checked against a fixed set\n of supported values. All of these values have to be strings.\n\n You should only pass a list or tuple of choices. Other iterables\n (like generators) may lead to surprising results.\n\n The resulting value will always be one of the originally passed choices\n regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``\n being specified.\n\n See :ref:`choice-opts` for an example.\n\n :param case_sensitive: Set to false to make choices case\n insensitive. Defaults to true.\n \"\"\"\n\n name = \"choice\"\n\n def __init__(self, choices, case_sensitive=True):\n self.choices = choices\n self.case_sensitive = case_sensitive\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"choices\"] = self.choices\n info_dict[\"case_sensitive\"] = self.case_sensitive\n return info_dict\n\n def get_metavar(self, param):\n choices_str = \"|\".join(self.choices)\n\n # Use curly braces to indicate a required argument.\n if param.required and param.param_type_name == \"argument\":\n return f\"{{{choices_str}}}\"\n\n # Use square braces to indicate an option or optional argument.\n return f\"[{choices_str}]\"\n\n def get_missing_message(self, param):\n choice_str = \",\\n\\t\".join(self.choices)\n return f\"Choose from:\\n\\t{choice_str}\"\n\n def convert(self, value, param, ctx):\n # Match through normalization and case sensitivity\n # first do token_normalize_func, then lowercase\n # preserve original `value` to produce an accurate message in\n # `self.fail`\n normed_value = value\n normed_choices = {choice: choice for choice in self.choices}\n\n if ctx is not None and ctx.token_normalize_func is not None:\n normed_value = ctx.token_normalize_func(value)\n normed_choices = {\n ctx.token_normalize_func(normed_choice): original\n for normed_choice, original in normed_choices.items()\n }\n\n if not self.case_sensitive:\n normed_value = normed_value.casefold()\n normed_choices = {\n normed_choice.casefold(): original\n for normed_choice, original in normed_choices.items()\n }\n\n if normed_value in normed_choices:\n return normed_choices[normed_value]\n\n one_of = \"one of \" if len(self.choices) > 1 else \"\"\n choices_str = \", \".join(repr(c) for c in self.choices)\n self.fail(f\"{value!r} is not {one_of}{choices_str}.\", param, ctx)\n\n def __repr__(self):\n return f\"Choice({list(self.choices)})\"\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Complete choices that start with the incomplete value.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n from click.shell_completion import CompletionItem\n\n str_choices = map(str, self.choices)\n\n if self.case_sensitive:\n matched = (c for c in str_choices if c.startswith(incomplete))\n else:\n incomplete = incomplete.lower()\n matched = (c for c in str_choices if c.lower().startswith(incomplete))\n\n return [CompletionItem(c) for c in matched]\n\n\nclass DateTime(ParamType):\n \"\"\"The DateTime type converts date strings into `datetime` objects.\n\n The format strings which are checked are configurable, but default to some\n common (non-timezone aware) ISO 8601 formats.\n\n When specifying *DateTime* formats, you should only pass a list or a tuple.\n Other iterables, like generators, may lead to surprising results.\n\n The format strings are processed using ``datetime.strptime``, and this\n consequently defines the format strings which are allowed.\n\n Parsing is tried using each format, in order, and the first format which\n parses successfully is used.\n\n :param formats: A list or tuple of date format strings, in the order in\n which they should be tried. Defaults to\n ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,\n ``'%Y-%m-%d %H:%M:%S'``.\n \"\"\"\n\n name = \"datetime\"\n\n def __init__(self, formats=None):\n self.formats = formats or [\"%Y-%m-%d\", \"%Y-%m-%dT%H:%M:%S\", \"%Y-%m-%d %H:%M:%S\"]\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"formats\"] = self.formats\n return info_dict\n\n def get_metavar(self, param):\n return f\"[{'|'.join(self.formats)}]\"\n\n def _try_to_convert_date(self, value, format):\n try:\n return datetime.strptime(value, format)\n except ValueError:\n return None\n\n def convert(self, value, param, ctx):\n if isinstance(value, datetime):\n return value\n\n for format in self.formats:\n converted = self._try_to_convert_date(value, format)\n\n if converted is not None:\n return converted\n\n plural = \"s\" if len(self.formats) > 1 else \"\"\n formats_str = \", \".join(repr(f) for f in self.formats)\n self.fail(\n f\"{value!r} does not match the format{plural} {formats_str}.\", param, ctx\n )\n\n def __repr__(self):\n return \"DateTime\"\n\n\nclass _NumberParamTypeBase(ParamType):\n _number_class: t.ClassVar[t.Optional[t.Type]] = None\n\n def convert(self, value, param, ctx):\n try:\n return self._number_class(value)\n except ValueError:\n self.fail(f\"{value!r} is not a valid {self.name}.\", param, ctx)\n\n\nclass _NumberRangeBase(_NumberParamTypeBase):\n def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):\n self.min = min\n self.max = max\n self.min_open = min_open\n self.max_open = max_open\n self.clamp = clamp\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict.update(\n min=self.min,\n max=self.max,\n min_open=self.min_open,\n max_open=self.max_open,\n clamp=self.clamp,\n )\n return info_dict\n\n def convert(self, value, param, ctx):\n import operator\n\n rv = super().convert(value, param, ctx)\n lt_min = self.min is not None and (\n operator.le if self.min_open else operator.lt\n )(rv, self.min)\n gt_max = self.max is not None and (\n operator.ge if self.max_open else operator.gt\n )(rv, self.max)\n\n if self.clamp:\n if lt_min:\n return self._clamp(self.min, 1, self.min_open)\n\n if gt_max:\n return self._clamp(self.max, -1, self.max_open)\n\n if lt_min or gt_max:\n self.fail(f\"{rv} is not in the range {self._describe_range()}.\", param, ctx)\n\n return rv\n\n def _clamp(self, bound, dir, open):\n \"\"\"Find the valid value to clamp to bound in the given\n direction.\n\n :param bound: The boundary value.\n :param dir: 1 or -1 indicating the direction to move.\n :param open: If true, the range does not include the bound.\n \"\"\"\n raise NotImplementedError\n\n def _describe_range(self):\n \"\"\"Describe the range for use in help text.\"\"\"\n if self.min is None:\n op = \"<\" if self.max_open else \"<=\"\n return f\"x{op}{self.max}\"\n\n if self.max is None:\n op = \">\" if self.min_open else \">=\"\n return f\"x{op}{self.min}\"\n\n lop = \"<\" if self.min_open else \"<=\"\n rop = \"<\" if self.max_open else \"<=\"\n return f\"{self.min}{lop}x{rop}{self.max}\"\n\n def __repr__(self):\n clamp = \" clamped\" if self.clamp else \"\"\n return f\"<{type(self).__name__} {self._describe_range()}{clamp}>\"\n\n\nclass IntParamType(_NumberParamTypeBase):\n name = \"integer\"\n _number_class = int\n\n def __repr__(self):\n return \"INT\"\n\n\nclass IntRange(_NumberRangeBase, IntParamType):\n \"\"\"Restrict an :data:`click.INT` value to a range of accepted\n values. See :ref:`ranges`.\n\n If ``min`` or ``max`` are not passed, any value is accepted in that\n direction. If ``min_open`` or ``max_open`` are enabled, the\n corresponding boundary is not included in the range.\n\n If ``clamp`` is enabled, a value outside the range is clamped to the\n boundary instead of failing.\n\n .. versionchanged:: 8.0\n Added the ``min_open`` and ``max_open`` parameters.\n \"\"\"\n\n name = \"integer range\"\n\n def _clamp(self, bound, dir, open):\n if not open:\n return bound\n\n return bound + dir\n\n\nclass FloatParamType(_NumberParamTypeBase):\n name = \"float\"\n _number_class = float\n\n def __repr__(self):\n return \"FLOAT\"\n\n\nclass FloatRange(_NumberRangeBase, FloatParamType):\n \"\"\"Restrict a :data:`click.FLOAT` value to a range of accepted\n values. See :ref:`ranges`.\n\n If ``min`` or ``max`` are not passed, any value is accepted in that\n direction. If ``min_open`` or ``max_open`` are enabled, the\n corresponding boundary is not included in the range.\n\n If ``clamp`` is enabled, a value outside the range is clamped to the\n boundary instead of failing. This is not supported if either\n boundary is marked ``open``.\n\n .. versionchanged:: 8.0\n Added the ``min_open`` and ``max_open`` parameters.\n \"\"\"\n\n name = \"float range\"\n\n def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):\n super().__init__(\n min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp\n )\n\n if (min_open or max_open) and clamp:\n raise TypeError(\"Clamping is not supported for open bounds.\")\n\n def _clamp(self, bound, dir, open):\n if not open:\n return bound\n\n # Could use Python 3.9's math.nextafter here, but clamping an\n # open float range doesn't seem to be particularly useful. It's\n # left up to the user to write a callback to do it if needed.\n raise RuntimeError(\"Clamping is not supported for open bounds.\")\n\n\nclass BoolParamType(ParamType):\n name = \"boolean\"\n\n def convert(self, value, param, ctx):\n if value in {False, True}:\n return bool(value)\n\n norm = value.strip().lower()\n\n if norm in {\"1\", \"true\", \"t\", \"yes\", \"y\", \"on\"}:\n return True\n\n if norm in {\"0\", \"false\", \"f\", \"no\", \"n\", \"off\"}:\n return False\n\n self.fail(f\"{value!r} is not a valid boolean.\", param, ctx)\n\n def __repr__(self):\n return \"BOOL\"\n\n\nclass UUIDParameterType(ParamType):\n name = \"uuid\"\n\n def convert(self, value, param, ctx):\n import uuid\n\n if isinstance(value, uuid.UUID):\n return value\n\n value = value.strip()\n\n try:\n return uuid.UUID(value)\n except ValueError:\n self.fail(f\"{value!r} is not a valid UUID.\", param, ctx)\n\n def __repr__(self):\n return \"UUID\"\n\n\nclass File(ParamType):\n \"\"\"Declares a parameter to be a file for reading or writing. The file\n is automatically closed once the context tears down (after the command\n finished working).\n\n Files can be opened for reading or writing. The special value ``-``\n indicates stdin or stdout depending on the mode.\n\n By default, the file is opened for reading text data, but it can also be\n opened in binary mode or for writing. The encoding parameter can be used\n to force a specific encoding.\n\n The `lazy` flag controls if the file should be opened immediately or upon\n first IO. The default is to be non-lazy for standard input and output\n streams as well as files opened for reading, `lazy` otherwise. When opening a\n file lazily for reading, it is still opened temporarily for validation, but\n will not be held open until first IO. lazy is mainly useful when opening\n for writing to avoid creating the file until it is needed.\n\n Starting with Click 2.0, files can also be opened atomically in which\n case all writes go into a separate file in the same folder and upon\n completion the file will be moved over to the original location. This\n is useful if a file regularly read by other users is modified.\n\n See :ref:`file-args` for more information.\n \"\"\"\n\n name = \"filename\"\n envvar_list_splitter = os.path.pathsep\n\n def __init__(\n self, mode=\"r\", encoding=None, errors=\"strict\", lazy=None, atomic=False\n ):\n self.mode = mode\n self.encoding = encoding\n self.errors = errors\n self.lazy = lazy\n self.atomic = atomic\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict.update(mode=self.mode, encoding=self.encoding)\n return info_dict\n\n def resolve_lazy_flag(self, value):\n if self.lazy is not None:\n return self.lazy\n if value == \"-\":\n return False\n elif \"w\" in self.mode:\n return True\n return False\n\n def convert(self, value, param, ctx):\n try:\n if hasattr(value, \"read\") or hasattr(value, \"write\"):\n return value\n\n lazy = self.resolve_lazy_flag(value)\n\n if lazy:\n f = LazyFile(\n value, self.mode, self.encoding, self.errors, atomic=self.atomic\n )\n if ctx is not None:\n ctx.call_on_close(f.close_intelligently)\n return f\n\n f, should_close = open_stream(\n value, self.mode, self.encoding, self.errors, atomic=self.atomic\n )\n # If a context is provided, we automatically close the file\n # at the end of the context execution (or flush out). If a\n # context does not exist, it's the caller's responsibility to\n # properly close the file. This for instance happens when the\n # type is used with prompts.\n if ctx is not None:\n if should_close:\n ctx.call_on_close(safecall(f.close))\n else:\n ctx.call_on_close(safecall(f.flush))\n return f\n except OSError as e: # noqa: B014\n self.fail(f\"{filename_to_ui(value)!r}: {get_strerror(e)}\", param, ctx)\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Return a special completion marker that tells the completion\n system to use the shell to provide file path completions.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n from click.shell_completion import CompletionItem\n\n return [CompletionItem(incomplete, type=\"file\")]\n\n\nclass Path(ParamType):\n \"\"\"The path type is similar to the :class:`File` type but it performs\n different checks. First of all, instead of returning an open file\n handle it returns just the filename. Secondly, it can perform various\n basic checks about what the file or directory should be.\n\n :param exists: if set to true, the file or directory needs to exist for\n this value to be valid. If this is not required and a\n file does indeed not exist, then all further checks are\n silently skipped.\n :param file_okay: controls if a file is a possible value.\n :param dir_okay: controls if a directory is a possible value.\n :param writable: if true, a writable check is performed.\n :param readable: if true, a readable check is performed.\n :param resolve_path: if this is true, then the path is fully resolved\n before the value is passed onwards. This means\n that it's absolute and symlinks are resolved. It\n will not expand a tilde-prefix, as this is\n supposed to be done by the shell only.\n :param allow_dash: If this is set to `True`, a single dash to indicate\n standard streams is permitted.\n :param path_type: Convert the incoming path value to this type. If\n ``None``, keep Python's default, which is ``str``. Useful to\n convert to :class:`pathlib.Path`.\n\n .. versionchanged:: 8.0\n Allow passing ``type=pathlib.Path``.\n\n .. versionchanged:: 6.0\n Added the ``allow_dash`` parameter.\n \"\"\"\n\n envvar_list_splitter = os.path.pathsep\n\n def __init__(\n self,\n exists=False,\n file_okay=True,\n dir_okay=True,\n writable=False,\n readable=True,\n resolve_path=False,\n allow_dash=False,\n path_type=None,\n ):\n self.exists = exists\n self.file_okay = file_okay\n self.dir_okay = dir_okay\n self.writable = writable\n self.readable = readable\n self.resolve_path = resolve_path\n self.allow_dash = allow_dash\n self.type = path_type\n\n if self.file_okay and not self.dir_okay:\n self.name = \"file\"\n elif self.dir_okay and not self.file_okay:\n self.name = \"directory\"\n else:\n self.name = \"path\"\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict.update(\n exists=self.exists,\n file_okay=self.file_okay,\n dir_okay=self.dir_okay,\n writable=self.writable,\n readable=self.readable,\n allow_dash=self.allow_dash,\n )\n return info_dict\n\n def coerce_path_result(self, rv):\n if self.type is not None and not isinstance(rv, self.type):\n if self.type is str:\n rv = os.fsdecode(rv)\n elif self.type is bytes:\n rv = os.fsencode(rv)\n else:\n rv = self.type(rv)\n\n return rv\n\n def convert(self, value, param, ctx):\n rv = value\n\n is_dash = self.file_okay and self.allow_dash and rv in (b\"-\", \"-\")\n\n if not is_dash:\n if self.resolve_path:\n # realpath on Windows Python < 3.8 doesn't resolve symlinks\n if os.path.islink(rv):\n rv = os.readlink(rv)\n\n rv = os.path.realpath(rv)\n\n try:\n st = os.stat(rv)\n except OSError:\n if not self.exists:\n return self.coerce_path_result(rv)\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} does not exist.\",\n param,\n ctx,\n )\n\n if not self.file_okay and stat.S_ISREG(st.st_mode):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is a file.\",\n param,\n ctx,\n )\n if not self.dir_okay and stat.S_ISDIR(st.st_mode):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is a directory.\",\n param,\n ctx,\n )\n if self.writable and not os.access(value, os.W_OK):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is not writable.\",\n param,\n ctx,\n )\n if self.readable and not os.access(value, os.R_OK):\n self.fail(\n f\"{self.name.title()} {filename_to_ui(value)!r} is not readable.\",\n param,\n ctx,\n )\n\n return self.coerce_path_result(rv)\n\n def shell_complete(self, ctx, param, incomplete):\n \"\"\"Return a special completion marker that tells the completion\n system to use the shell to provide path completions for only\n directories or any paths.\n\n :param ctx: Invocation context for this command.\n :param param: The parameter that is requesting completion.\n :param incomplete: Value being completed. May be empty.\n\n .. versionadded:: 8.0\n \"\"\"\n from click.shell_completion import CompletionItem\n\n type = \"dir\" if self.dir_okay and not self.file_okay else \"file\"\n return [CompletionItem(incomplete, type=type)]\n\n\nclass Tuple(CompositeParamType):\n \"\"\"The default behavior of Click is to apply a type on a value directly.\n This works well in most cases, except for when `nargs` is set to a fixed\n count and different types should be used for different items. In this\n case the :class:`Tuple` type can be used. This type can only be used\n if `nargs` is set to a fixed number.\n\n For more information see :ref:`tuple-type`.\n\n This can be selected by using a Python tuple literal as a type.\n\n :param types: a list of types that should be used for the tuple items.\n \"\"\"\n\n def __init__(self, types):\n self.types = [convert_type(ty) for ty in types]\n\n def to_info_dict(self):\n info_dict = super().to_info_dict()\n info_dict[\"types\"] = [t.to_info_dict() for t in self.types]\n return info_dict\n\n @property\n def name(self):\n return f\"<{' '.join(ty.name for ty in self.types)}>\"\n\n @property\n def arity(self):\n return len(self.types)\n\n def convert(self, value, param, ctx):\n if len(value) != len(self.types):\n raise TypeError(\n \"It would appear that nargs is set to conflict with the\"\n \" composite type arity.\"\n )\n return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))\n\n\ndef convert_type(ty, default=None):\n \"\"\"Find the most appropriate :class:`ParamType` for the given Python\n type. If the type isn't provided, it can be inferred from a default\n value.\n \"\"\"\n guessed_type = False\n\n if ty is None and default is not None:\n if isinstance(default, (tuple, list)):\n # If the default is empty, ty will remain None and will\n # return STRING.\n if default:\n item = default[0]\n\n # A tuple of tuples needs to detect the inner types.\n # Can't call convert recursively because that would\n # incorrectly unwind the tuple to a single type.\n if isinstance(item, (tuple, list)):\n ty = tuple(map(type, item))\n else:\n ty = type(item)\n else:\n ty = type(default)\n\n guessed_type = True\n\n if isinstance(ty, tuple):\n return Tuple(ty)\n\n if isinstance(ty, ParamType):\n return ty\n\n if ty is str or ty is None:\n return STRING\n\n if ty is int:\n return INT\n\n if ty is float:\n return FLOAT\n\n # Booleans are only okay if not guessed. For is_flag options with\n # flag_value, default=True indicates which flag_value is the\n # default.\n if ty is bool and not guessed_type:\n return BOOL\n\n if guessed_type:\n return STRING\n\n if __debug__:\n try:\n if issubclass(ty, ParamType):\n raise AssertionError(\n f\"Attempted to use an uninstantiated parameter type ({ty}).\"\n )\n except TypeError:\n # ty is an instance (correct), so issubclass fails.\n pass\n\n return FuncParamType(ty)\n\n\n#: A dummy parameter type that just does nothing. From a user's\n#: perspective this appears to just be the same as `STRING` but\n#: internally no string conversion takes place if the input was bytes.\n#: This is usually useful when working with file paths as they can\n#: appear in bytes and unicode.\n#:\n#: For path related uses the :class:`Path` type is a better choice but\n#: there are situations where an unprocessed type is useful which is why\n#: it is is provided.\n#:\n#: .. versionadded:: 4.0\nUNPROCESSED = UnprocessedParamType()\n\n#: A unicode string parameter type which is the implicit default. This\n#: can also be selected by using ``str`` as type.\nSTRING = StringParamType()\n\n#: An integer parameter. This can also be selected by using ``int`` as\n#: type.\nINT = IntParamType()\n\n#: A floating point value parameter. This can also be selected by using\n#: ``float`` as type.\nFLOAT = FloatParamType()\n\n#: A boolean parameter. This is the default for boolean flags. This can\n#: also be selected by using ``bool`` as a type.\nBOOL = BoolParamType()\n\n#: A UUID parameter.\nUUID = UUIDParameterType()\n", "path": "src/click/types.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 406088e07..4f0dfb94e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -187,6 +187,8 @@ Unreleased - Add a ``pass_meta_key`` decorator for passing a key from ``Context.meta``. This is useful for extensions using ``meta`` to store information. :issue:`1739` +- ``Path`` ``resolve_path`` resolves symlinks on Windows Python < 3.8. + :issue:`1813` Version 7.1.2 diff --git a/src/click/types.py b/src/click/types.py index 0ed73c099..6cf611fa7 100644 --- a/src/click/types.py +++ b/src/click/types.py @@ -734,6 +734,10 @@ def convert(self, value, param, ctx): if not is_dash: if self.resolve_path: + # realpath on Windows Python < 3.8 doesn't resolve symlinks + if os.path.islink(rv): + rv = os.readlink(rv) + rv = os.path.realpath(rv) try:
pyinstaller__pyinstaller-8555
with setuptools v70.0.0: `ModuleNotFoundError: No module named 'pkg_resources.extern'` <!-- Welcome to the PyInstaller issue tracker! Before creating an issue, please heed the following: 1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller - For questions and general support, use the discussions forum. 2. Use the search function before creating a new issue. Duplicates will be closed and directed to the original discussion. 3. When making a bug report, make sure you provide all required information. The easier it is for maintainers to reproduce, the faster it'll be fixed. --> <!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ --> ## Description of the issue I have added some TODO notes below but wanted to submit this sooner than later for any other users running into this issue today to be able to find it. This morning I noticed an error in my tests that exercise PyInstaller generated Windows .exe's in CI. https://github.com/Chia-Network/chia-blockchain/actions/runs/9175546125/job/25229722015?pr=16898 ``` Traceback (most recent call last): File "Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgres.py", line 158, in <module> File "Lib\site-packages\PyInstaller\hooks\rthooks\pyi_rth_pkgres.py", line 36, in _pyi_rthook File "PyInstaller\loader\pyimod02_importers.py", line 419, in exec_module File "pkg_resources\__init__.py", line [7](https://github.com/Chia-Network/chia-blockchain/actions/runs/9175546125/job/25229722015?pr=16898#step:6:8)7, in <module> ModuleNotFoundError: No module named 'pkg_resources.extern' [2148] Failed to execute script 'pyi_rth_pkgres' due to unhandled exception! ``` First I correlated this with [the release of setuptools v70.0.0](https://pypi.org/project/setuptools/70.0.0/#history) a few hours earlier (and not a new PyInstaller release `:]`). After looking here and finding no issues reported I checked over at setuptools and found https://github.com/pypa/setuptools/issues/4374. In that discussion I noted that the issue appears with https://github.com/pypa/setuptools/commit/e9995828311c5e0c843622ca2be85e7f09f1ff0d and not its parent commit. That commit does indeed change how some of the `pkg_resources.extern` imports are handled inside `pkg_resources`. Another developer provided an example, though that example has not yet resulted in recreation of the issue. ### Context information (for bug reports) * Output of `pyinstaller --version`: ```(paste here)``` (TODO: add this to my CI run) * Version of Python: 3.10 * Platform: Windows (GitHub Actions runner) * How you installed Python: in-house action https://github.com/chia-network/actions/setup-python that should, in this case, pass through to upstream https://github.com/actions/setup-python * Did you also try this on another platform? Does it work there? ~Similar Linux (Rocky and Ubuntu) builds as well as macOS (Intel and ARM) builds and tests seem to continue to work fine. I could afford to review these runs in more detail for other relevant changes, but have not quite yet.~ When forcing the setuptools to be installed, this does happen on all of the platforms we build executables for (Rocky, Ubuntu (Intel and ARM), macOS (Intel and ARM), Windows) * try the latest development version, using the following command: https://github.com/Chia-Network/chia-blockchain/actions/runs/9179289212/job/25241848658?pr=18051 shows the failure using `develop`, specifically 676584885f2dfa1f885ab6155a5eda9150892c03. * follow *all* the instructions in our "If Things Go Wrong" Guide (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly) * [x] start with clean installation * [ ] use the latest development version * [x] Run your frozen program **from a command window (shell)** — instead of double-clicking on it * [ ] Package your program in **--onedir mode** * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file. ### A minimal example program which shows the error TODO: I will try to create this ``` (paste text here) “Minimal“ means: remove everything from your code which is not relevant for this bug, esp. don't use external programs, remote requests, etc. A very good example is https://gist.github.com/ronen/024cdae9ff2d50488438. This one helped us reproducing and fixing a quite complex problem within approx 1 hour. ``` ### Stacktrace / full error message ``` (paste text here) ``` Please also see <https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs> for more about what would use to solve the issue.
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, check_requirement, can_import_module\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\n\n# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n# filter it out when `railroad` is not available.\nif can_import_module('railroad'):\n hiddenimports = collect_submodules('pkg_resources._vendor')\nelse:\n hiddenimports = collect_submodules(\n 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n )\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nif check_requirement('setuptools >= 45.0.0, < 49.1.1'):\n hiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\nhiddenimports += collect_submodules('packaging')\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif check_requirement(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif check_requirement(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py" } ]
[ { "content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, check_requirement, can_import_module\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\n\n# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n# filter it out when `railroad` is not available.\nif can_import_module('railroad'):\n hiddenimports = collect_submodules('pkg_resources._vendor')\nelse:\n hiddenimports = collect_submodules(\n 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n )\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nif check_requirement('setuptools >= 45.0.0, < 49.1.1'):\n hiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\nhiddenimports += collect_submodules('packaging')\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif check_requirement(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif check_requirement(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n\n# As of setuptools 70.0.0, we need pkg_resources.extern added to hidden imports.\nif check_requirement(\"setuptools >= 70.0.0\"):\n hiddenimports += [\n 'pkg_resources.extern',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py" } ]
diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py index b3817f8c65..dfcf382356 100644 --- a/PyInstaller/hooks/hook-pkg_resources.py +++ b/PyInstaller/hooks/hook-pkg_resources.py @@ -55,3 +55,9 @@ 'pkg_resources._vendor.jaraco.context', 'pkg_resources._vendor.jaraco.text', ] + +# As of setuptools 70.0.0, we need pkg_resources.extern added to hidden imports. +if check_requirement("setuptools >= 70.0.0"): + hiddenimports += [ + 'pkg_resources.extern', + ] diff --git a/news/8554.hooks.rst b/news/8554.hooks.rst new file mode 100644 index 0000000000..2720218797 --- /dev/null +++ b/news/8554.hooks.rst @@ -0,0 +1,2 @@ +Update ``pkg_resources`` hook for compatibility with ``setuptools`` v70.0.0 +and later (fix ``ModuleNotFoundError: No module named 'pkg_resources.extern'``).
pandas-dev__pandas-7007
Matplotlib cursor position wrong after using asfreq method to change freq of DateTimeIndex from None to something After using the `asfreq` method to change the frequency of a time-series DataFrame from `None` to something e.g. `15Min` the cursor position in matplotlib graphs of that DataFrame is no longer correct (usually shows a datetime just after the unix epoch). The following demonstrates this (NB dt in df1 is not a constant): ``` df1 = pandas.read_csv('tseries1.csv', names=['tstamp', 'Q'], parse_dates=True, index_col='tstamp').clip_lower(0).fillna(0) df1['T'] = pandas.read_csv('tseries2.csv', names=['tstamp', 'T'], parse_dates=True, index_col='tstamp', squeeze=True).clip_lower(0).fillna(0) df2 = df1.asfreq(freq='15Min', method='ffill') # NB df1.index.freq is None # NB df2.index.freq is <15 * Minutes> df1.plot() df2.plot() plt.show() ``` I find the Matplotlib cursor position to be invaluable when looking for features in very long time-series. Versions: - pandas master (commit ID 764b444) - numpy 1.8 - matplotlib 1.3.0 Matplotlib cursor position wrong after using asfreq method to change freq of DateTimeIndex from None to something After using the `asfreq` method to change the frequency of a time-series DataFrame from `None` to something e.g. `15Min` the cursor position in matplotlib graphs of that DataFrame is no longer correct (usually shows a datetime just after the unix epoch). The following demonstrates this (NB dt in df1 is not a constant): ``` df1 = pandas.read_csv('tseries1.csv', names=['tstamp', 'Q'], parse_dates=True, index_col='tstamp').clip_lower(0).fillna(0) df1['T'] = pandas.read_csv('tseries2.csv', names=['tstamp', 'T'], parse_dates=True, index_col='tstamp', squeeze=True).clip_lower(0).fillna(0) df2 = df1.asfreq(freq='15Min', method='ffill') # NB df1.index.freq is None # NB df2.index.freq is <15 * Minutes> df1.plot() df2.plot() plt.show() ``` I find the Matplotlib cursor position to be invaluable when looking for features in very long time-series. Versions: - pandas master (commit ID 764b444) - numpy 1.8 - matplotlib 1.3.0
[ { "content": "\"\"\"\nPeriod formatters and locators adapted from scikits.timeseries by\nPierre GF Gerard-Marchant & Matt Knox\n\"\"\"\n\n#!!! TODO: Use the fact that axis can have units to simplify the process\nimport datetime as pydt\nfrom datetime import datetime\n\nfrom matplotlib import pylab\nimport matplotlib.units as units\n\nimport numpy as np\n\nfrom pandas import isnull\nfrom pandas.tseries.period import Period\nfrom pandas.tseries.offsets import DateOffset\nimport pandas.tseries.frequencies as frequencies\nfrom pandas.tseries.index import DatetimeIndex\nimport pandas.core.common as com\n\nfrom pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator,\n TimeSeries_DateFormatter)\n\n#----------------------------------------------------------------------\n# Plotting functions and monkey patches\n\n\ndef tsplot(series, plotf, **kwargs):\n \"\"\"\n Plots a Series on the given Matplotlib axes or the current axes\n\n Parameters\n ----------\n axes : Axes\n series : Series\n\n Notes\n _____\n Supports same kwargs as Axes.plot\n\n \"\"\"\n # Used inferred freq is possible, need a test case for inferred\n if 'ax' in kwargs:\n ax = kwargs.pop('ax')\n else:\n import matplotlib.pyplot as plt\n ax = plt.gca()\n\n freq = _get_freq(ax, series)\n # resample against axes freq if necessary\n if freq is None: # pragma: no cover\n raise ValueError('Cannot use dynamic axis without frequency info')\n else:\n # Convert DatetimeIndex to PeriodIndex\n if isinstance(series.index, DatetimeIndex):\n series = series.to_period(freq=freq)\n freq, ax_freq, series = _maybe_resample(series, ax, freq, plotf,\n kwargs)\n\n # Set ax with freq info\n _decorate_axes(ax, freq, kwargs)\n\n # mask missing values\n args = _maybe_mask(series)\n\n # how to make sure ax.clear() flows through?\n if not hasattr(ax, '_plot_data'):\n ax._plot_data = []\n ax._plot_data.append((series, kwargs))\n\n # styles\n style = kwargs.pop('style', None)\n if style is not None:\n args.append(style)\n\n lines = plotf(ax, *args, **kwargs)\n label = kwargs.get('label', None)\n\n # set date formatter, locators and rescale limits\n format_dateaxis(ax, ax.freq)\n left, right = _get_xlim(ax.get_lines())\n ax.set_xlim(left, right)\n\n # x and y coord info\n tz = series.index.to_datetime().tz\n ax.format_coord = lambda t, y : \"t = {} y = {:8f}\".format(datetime.fromtimestamp(t, tz), y)\n\n return lines\n\n\ndef _maybe_resample(series, ax, freq, plotf, kwargs):\n ax_freq = _get_ax_freq(ax)\n if ax_freq is not None and freq != ax_freq:\n if frequencies.is_superperiod(freq, ax_freq): # upsample input\n series = series.copy()\n series.index = series.index.asfreq(ax_freq, how='s')\n freq = ax_freq\n elif _is_sup(freq, ax_freq): # one is weekly\n how = kwargs.pop('how', 'last')\n series = series.resample('D', how=how).dropna()\n series = series.resample(ax_freq, how=how).dropna()\n freq = ax_freq\n elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):\n _upsample_others(ax, freq, plotf, kwargs)\n ax_freq = freq\n else: # pragma: no cover\n raise ValueError('Incompatible frequency conversion')\n return freq, ax_freq, series\n\n\ndef _get_ax_freq(ax):\n ax_freq = getattr(ax, 'freq', None)\n if ax_freq is None:\n if hasattr(ax, 'left_ax'):\n ax_freq = getattr(ax.left_ax, 'freq', None)\n if hasattr(ax, 'right_ax'):\n ax_freq = getattr(ax.right_ax, 'freq', None)\n return ax_freq\n\n\ndef _is_sub(f1, f2):\n return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or\n (f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))\n\n\ndef _is_sup(f1, f2):\n return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or\n (f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))\n\n\ndef _upsample_others(ax, freq, plotf, kwargs):\n legend = ax.get_legend()\n lines, labels = _replot_ax(ax, freq, plotf, kwargs)\n\n other_ax = None\n if hasattr(ax, 'left_ax'):\n other_ax = ax.left_ax\n if hasattr(ax, 'right_ax'):\n other_ax = ax.right_ax\n\n if other_ax is not None:\n rlines, rlabels = _replot_ax(other_ax, freq, plotf, kwargs)\n lines.extend(rlines)\n labels.extend(rlabels)\n\n if (legend is not None and kwargs.get('legend', True) and\n len(lines) > 0):\n title = legend.get_title().get_text()\n if title == 'None':\n title = None\n ax.legend(lines, labels, loc='best', title=title)\n\n\ndef _replot_ax(ax, freq, plotf, kwargs):\n data = getattr(ax, '_plot_data', None)\n ax._plot_data = []\n ax.clear()\n _decorate_axes(ax, freq, kwargs)\n\n lines = []\n labels = []\n if data is not None:\n for series, kwds in data:\n series = series.copy()\n idx = series.index.asfreq(freq, how='S')\n series.index = idx\n ax._plot_data.append(series)\n args = _maybe_mask(series)\n lines.append(plotf(ax, *args, **kwds)[0])\n labels.append(com.pprint_thing(series.name))\n\n return lines, labels\n\n\ndef _decorate_axes(ax, freq, kwargs):\n ax.freq = freq\n xaxis = ax.get_xaxis()\n xaxis.freq = freq\n if not hasattr(ax, 'legendlabels'):\n ax.legendlabels = [kwargs.get('label', None)]\n else:\n ax.legendlabels.append(kwargs.get('label', None))\n ax.view_interval = None\n ax.date_axis_info = None\n\n\ndef _maybe_mask(series):\n mask = isnull(series)\n if mask.any():\n masked_array = np.ma.array(series.values)\n masked_array = np.ma.masked_where(mask, masked_array)\n args = [series.index, masked_array]\n else:\n args = [series.index, series.values]\n return args\n\n\ndef _get_freq(ax, series):\n # get frequency from data\n freq = getattr(series.index, 'freq', None)\n if freq is None:\n freq = getattr(series.index, 'inferred_freq', None)\n\n ax_freq = getattr(ax, 'freq', None)\n\n # use axes freq if no data freq\n if freq is None:\n freq = ax_freq\n\n # get the period frequency\n if isinstance(freq, DateOffset):\n freq = freq.rule_code\n else:\n freq = frequencies.get_base_alias(freq)\n\n freq = frequencies.get_period_alias(freq)\n\n return freq\n\n\ndef _get_xlim(lines):\n left, right = np.inf, -np.inf\n for l in lines:\n x = l.get_xdata()\n left = min(x[0].ordinal, left)\n right = max(x[-1].ordinal, right)\n return left, right\n\n# Patch methods for subplot. Only format_dateaxis is currently used.\n# Do we need the rest for convenience?\n\n\ndef format_dateaxis(subplot, freq):\n \"\"\"\n Pretty-formats the date axis (x-axis).\n\n Major and minor ticks are automatically set for the frequency of the\n current underlying series. As the dynamic mode is activated by\n default, changing the limits of the x axis will intelligently change\n the positions of the ticks.\n \"\"\"\n majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,\n minor_locator=False,\n plot_obj=subplot)\n minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,\n minor_locator=True,\n plot_obj=subplot)\n subplot.xaxis.set_major_locator(majlocator)\n subplot.xaxis.set_minor_locator(minlocator)\n\n majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,\n minor_locator=False,\n plot_obj=subplot)\n minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,\n minor_locator=True,\n plot_obj=subplot)\n subplot.xaxis.set_major_formatter(majformatter)\n subplot.xaxis.set_minor_formatter(minformatter)\n pylab.draw_if_interactive()\n", "path": "pandas/tseries/plotting.py" } ]
[ { "content": "\"\"\"\nPeriod formatters and locators adapted from scikits.timeseries by\nPierre GF Gerard-Marchant & Matt Knox\n\"\"\"\n\n#!!! TODO: Use the fact that axis can have units to simplify the process\nimport datetime as pydt\nfrom datetime import datetime\n\nfrom matplotlib import pylab\nimport matplotlib.units as units\n\nimport numpy as np\n\nfrom pandas import isnull\nfrom pandas.tseries.period import Period\nfrom pandas.tseries.offsets import DateOffset\nimport pandas.tseries.frequencies as frequencies\nfrom pandas.tseries.index import DatetimeIndex\nimport pandas.core.common as com\n\nfrom pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator,\n TimeSeries_DateFormatter)\n\n#----------------------------------------------------------------------\n# Plotting functions and monkey patches\n\n\ndef tsplot(series, plotf, **kwargs):\n \"\"\"\n Plots a Series on the given Matplotlib axes or the current axes\n\n Parameters\n ----------\n axes : Axes\n series : Series\n\n Notes\n _____\n Supports same kwargs as Axes.plot\n\n \"\"\"\n # Used inferred freq is possible, need a test case for inferred\n if 'ax' in kwargs:\n ax = kwargs.pop('ax')\n else:\n import matplotlib.pyplot as plt\n ax = plt.gca()\n\n freq = _get_freq(ax, series)\n # resample against axes freq if necessary\n if freq is None: # pragma: no cover\n raise ValueError('Cannot use dynamic axis without frequency info')\n else:\n # Convert DatetimeIndex to PeriodIndex\n if isinstance(series.index, DatetimeIndex):\n series = series.to_period(freq=freq)\n freq, ax_freq, series = _maybe_resample(series, ax, freq, plotf,\n kwargs)\n\n # Set ax with freq info\n _decorate_axes(ax, freq, kwargs)\n\n # mask missing values\n args = _maybe_mask(series)\n\n # how to make sure ax.clear() flows through?\n if not hasattr(ax, '_plot_data'):\n ax._plot_data = []\n ax._plot_data.append((series, kwargs))\n\n # styles\n style = kwargs.pop('style', None)\n if style is not None:\n args.append(style)\n\n lines = plotf(ax, *args, **kwargs)\n label = kwargs.get('label', None)\n\n # set date formatter, locators and rescale limits\n format_dateaxis(ax, ax.freq)\n left, right = _get_xlim(ax.get_lines())\n ax.set_xlim(left, right)\n\n # x and y coord info\n ax.format_coord = lambda t, y: \"t = {} y = {:8f}\".format(Period(ordinal=int(t), freq=ax.freq), y)\n\n return lines\n\n\ndef _maybe_resample(series, ax, freq, plotf, kwargs):\n ax_freq = _get_ax_freq(ax)\n if ax_freq is not None and freq != ax_freq:\n if frequencies.is_superperiod(freq, ax_freq): # upsample input\n series = series.copy()\n series.index = series.index.asfreq(ax_freq, how='s')\n freq = ax_freq\n elif _is_sup(freq, ax_freq): # one is weekly\n how = kwargs.pop('how', 'last')\n series = series.resample('D', how=how).dropna()\n series = series.resample(ax_freq, how=how).dropna()\n freq = ax_freq\n elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):\n _upsample_others(ax, freq, plotf, kwargs)\n ax_freq = freq\n else: # pragma: no cover\n raise ValueError('Incompatible frequency conversion')\n return freq, ax_freq, series\n\n\ndef _get_ax_freq(ax):\n ax_freq = getattr(ax, 'freq', None)\n if ax_freq is None:\n if hasattr(ax, 'left_ax'):\n ax_freq = getattr(ax.left_ax, 'freq', None)\n if hasattr(ax, 'right_ax'):\n ax_freq = getattr(ax.right_ax, 'freq', None)\n return ax_freq\n\n\ndef _is_sub(f1, f2):\n return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or\n (f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))\n\n\ndef _is_sup(f1, f2):\n return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or\n (f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))\n\n\ndef _upsample_others(ax, freq, plotf, kwargs):\n legend = ax.get_legend()\n lines, labels = _replot_ax(ax, freq, plotf, kwargs)\n\n other_ax = None\n if hasattr(ax, 'left_ax'):\n other_ax = ax.left_ax\n if hasattr(ax, 'right_ax'):\n other_ax = ax.right_ax\n\n if other_ax is not None:\n rlines, rlabels = _replot_ax(other_ax, freq, plotf, kwargs)\n lines.extend(rlines)\n labels.extend(rlabels)\n\n if (legend is not None and kwargs.get('legend', True) and\n len(lines) > 0):\n title = legend.get_title().get_text()\n if title == 'None':\n title = None\n ax.legend(lines, labels, loc='best', title=title)\n\n\ndef _replot_ax(ax, freq, plotf, kwargs):\n data = getattr(ax, '_plot_data', None)\n ax._plot_data = []\n ax.clear()\n _decorate_axes(ax, freq, kwargs)\n\n lines = []\n labels = []\n if data is not None:\n for series, kwds in data:\n series = series.copy()\n idx = series.index.asfreq(freq, how='S')\n series.index = idx\n ax._plot_data.append(series)\n args = _maybe_mask(series)\n lines.append(plotf(ax, *args, **kwds)[0])\n labels.append(com.pprint_thing(series.name))\n\n return lines, labels\n\n\ndef _decorate_axes(ax, freq, kwargs):\n ax.freq = freq\n xaxis = ax.get_xaxis()\n xaxis.freq = freq\n if not hasattr(ax, 'legendlabels'):\n ax.legendlabels = [kwargs.get('label', None)]\n else:\n ax.legendlabels.append(kwargs.get('label', None))\n ax.view_interval = None\n ax.date_axis_info = None\n\n\ndef _maybe_mask(series):\n mask = isnull(series)\n if mask.any():\n masked_array = np.ma.array(series.values)\n masked_array = np.ma.masked_where(mask, masked_array)\n args = [series.index, masked_array]\n else:\n args = [series.index, series.values]\n return args\n\n\ndef _get_freq(ax, series):\n # get frequency from data\n freq = getattr(series.index, 'freq', None)\n if freq is None:\n freq = getattr(series.index, 'inferred_freq', None)\n\n ax_freq = getattr(ax, 'freq', None)\n\n # use axes freq if no data freq\n if freq is None:\n freq = ax_freq\n\n # get the period frequency\n if isinstance(freq, DateOffset):\n freq = freq.rule_code\n else:\n freq = frequencies.get_base_alias(freq)\n\n freq = frequencies.get_period_alias(freq)\n\n return freq\n\n\ndef _get_xlim(lines):\n left, right = np.inf, -np.inf\n for l in lines:\n x = l.get_xdata()\n left = min(x[0].ordinal, left)\n right = max(x[-1].ordinal, right)\n return left, right\n\n# Patch methods for subplot. Only format_dateaxis is currently used.\n# Do we need the rest for convenience?\n\n\ndef format_dateaxis(subplot, freq):\n \"\"\"\n Pretty-formats the date axis (x-axis).\n\n Major and minor ticks are automatically set for the frequency of the\n current underlying series. As the dynamic mode is activated by\n default, changing the limits of the x axis will intelligently change\n the positions of the ticks.\n \"\"\"\n majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,\n minor_locator=False,\n plot_obj=subplot)\n minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,\n minor_locator=True,\n plot_obj=subplot)\n subplot.xaxis.set_major_locator(majlocator)\n subplot.xaxis.set_minor_locator(minlocator)\n\n majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,\n minor_locator=False,\n plot_obj=subplot)\n minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,\n minor_locator=True,\n plot_obj=subplot)\n subplot.xaxis.set_major_formatter(majformatter)\n subplot.xaxis.set_minor_formatter(minformatter)\n pylab.draw_if_interactive()\n", "path": "pandas/tseries/plotting.py" } ]
diff --git a/doc/source/release.rst b/doc/source/release.rst index 91bf6084e0faa..975d92cc215c4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -450,8 +450,9 @@ Bug Fixes - Bug in enabling ``subplots=True`` in ``DataFrame.plot`` only has single column raises ``TypeError``, and ``Series.plot`` raises ``AttributeError`` (:issue:`6951`) - Bug in ``DataFrame.plot`` draws unnecessary axes when enabling ``subplots`` and ``kind=scatter`` (:issue:`6951`) - Bug in ``read_csv`` from a filesystem with non-utf-8 encoding (:issue:`6807`) -- Bug in ``iloc`` when setting / aligning (:issue:``6766`) +- Bug in ``iloc`` when setting / aligning (:issue:`6766`) - Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) +- Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index ae32367a57cd3..abec1d469114f 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -83,8 +83,7 @@ def tsplot(series, plotf, **kwargs): ax.set_xlim(left, right) # x and y coord info - tz = series.index.to_datetime().tz - ax.format_coord = lambda t, y : "t = {} y = {:8f}".format(datetime.fromtimestamp(t, tz), y) + ax.format_coord = lambda t, y: "t = {} y = {:8f}".format(Period(ordinal=int(t), freq=ax.freq), y) return lines diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 118c09ddf826f..5d1e4b67041f7 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -131,6 +131,21 @@ def test_get_datevalue(self): self.assertEqual(get_datevalue('1/1/1987', 'D'), Period('1987-1-1', 'D').ordinal) + @slow + def test_ts_plot_format_coord(self): + def check_format_of_first_point(ax, expected_string): + first_line = ax.get_lines()[0] + first_x = first_line.get_xdata()[0].ordinal + first_y = first_line.get_ydata()[0] + self.assertEqual(expected_string, ax.format_coord(first_x, first_y)) + + annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC')) + check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000') + + # note this is added to the annual plot already in existence, and changes its freq field + daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D')) + check_format_of_first_point(daily.plot(), 't = 2014-01-01 y = 1.000000') + @slow def test_line_plot_period_series(self): for s in self.period_ser:
ipython__ipython-9109
Cannot inspect callable that raise in __bool__ The following should not raise. ``` python class NoBoolCall: def __call__(self): pass def __bool__(self): raise NotImplementedError('Must be implemented') b = NoBoolCall() b? ``` Likely in `oinspect.py`: ``` 835: if callable_obj: ``` Should be `if callable_obj is not None`, though i'm not sure.
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Tools for inspecting Python objects.\n\nUses syntax highlighting for presenting the various information elements.\n\nSimilar in spirit to the inspect module, but all calls take a name argument to\nreference the name under which an object is being read.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import print_function\n\n__all__ = ['Inspector','InspectColors']\n\n# stdlib modules\nimport inspect\nimport linecache\nimport os\nfrom textwrap import dedent\nimport types\nimport io as stdlib_io\n\ntry:\n from itertools import izip_longest\nexcept ImportError:\n from itertools import zip_longest as izip_longest\n\n# IPython's own\nfrom IPython.core import page\nfrom IPython.lib.pretty import pretty\nfrom IPython.testing.skipdoctest import skip_doctest_py3\nfrom IPython.utils import PyColorize\nfrom IPython.utils import io\nfrom IPython.utils import openpy\nfrom IPython.utils import py3compat\nfrom IPython.utils.dir2 import safe_hasattr\nfrom IPython.utils.path import compress_user\nfrom IPython.utils.text import indent\nfrom IPython.utils.wildcard import list_namespace\nfrom IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable\nfrom IPython.utils.py3compat import cast_unicode, string_types, PY3\nfrom IPython.utils.signatures import signature\n\n# builtin docstrings to ignore\n_func_call_docstring = types.FunctionType.__call__.__doc__\n_object_init_docstring = object.__init__.__doc__\n_builtin_type_docstrings = {\n inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,\n types.FunctionType, property)\n}\n\n_builtin_func_type = type(all)\n_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions\n#****************************************************************************\n# Builtin color schemes\n\nColors = TermColors # just a shorthand\n\nInspectColors = PyColorize.ANSICodeColors\n\n#****************************************************************************\n# Auxiliary functions and objects\n\n# See the messaging spec for the definition of all these fields. This list\n# effectively defines the order of display\ninfo_fields = ['type_name', 'base_class', 'string_form', 'namespace',\n 'length', 'file', 'definition', 'docstring', 'source',\n 'init_definition', 'class_docstring', 'init_docstring',\n 'call_def', 'call_docstring',\n # These won't be printed but will be used to determine how to\n # format the object\n 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name'\n ]\n\n\ndef object_info(**kw):\n \"\"\"Make an object info dict with all fields present.\"\"\"\n infodict = dict(izip_longest(info_fields, [None]))\n infodict.update(kw)\n return infodict\n\n\ndef get_encoding(obj):\n \"\"\"Get encoding for python source file defining obj\n\n Returns None if obj is not defined in a sourcefile.\n \"\"\"\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile is None:\n return None\n elif ofile.endswith(('.so', '.dll', '.pyd')):\n return None\n elif not os.path.isfile(ofile):\n return None\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2\n encoding, lines = openpy.detect_encoding(buffer.readline)\n return encoding\n\ndef getdoc(obj):\n \"\"\"Stable wrapper around inspect.getdoc.\n\n This can't crash because of attribute problems.\n\n It also attempts to call a getdoc() method on the given object. This\n allows objects which provide their docstrings via non-standard mechanisms\n (like Pyro proxies) to still be inspected by ipython's ? system.\"\"\"\n # Allow objects to offer customized documentation via a getdoc method:\n try:\n ds = obj.getdoc()\n except Exception:\n pass\n else:\n # if we get extra info, we add it to the normal docstring.\n if isinstance(ds, string_types):\n return inspect.cleandoc(ds)\n \n try:\n docstr = inspect.getdoc(obj)\n encoding = get_encoding(obj)\n return py3compat.cast_unicode(docstr, encoding=encoding)\n except Exception:\n # Harden against an inspect failure, which can occur with\n # SWIG-wrapped extensions.\n raise\n return None\n\n\ndef getsource(obj, oname=''):\n \"\"\"Wrapper around inspect.getsource.\n\n This can be modified by other projects to provide customized source\n extraction.\n\n Parameters\n ----------\n obj : object\n an object whose source code we will attempt to extract\n oname : str\n (optional) a name under which the object is known\n\n Returns\n -------\n src : unicode or None\n\n \"\"\"\n\n if isinstance(obj, property):\n sources = []\n for attrname in ['fget', 'fset', 'fdel']:\n fn = getattr(obj, attrname)\n if fn is not None:\n encoding = get_encoding(fn)\n oname_prefix = ('%s.' % oname) if oname else ''\n sources.append(cast_unicode(\n ''.join(('# ', oname_prefix, attrname)),\n encoding=encoding))\n if inspect.isfunction(fn):\n sources.append(dedent(getsource(fn)))\n else:\n # Default str/repr only prints function name,\n # pretty.pretty prints module name too.\n sources.append(cast_unicode(\n '%s%s = %s\\n' % (\n oname_prefix, attrname, pretty(fn)),\n encoding=encoding))\n if sources:\n return '\\n'.join(sources)\n else:\n return None\n\n else:\n # Get source for non-property objects.\n\n obj = _get_wrapped(obj)\n\n try:\n src = inspect.getsource(obj)\n except TypeError:\n # The object itself provided no meaningful source, try looking for\n # its class definition instead.\n if hasattr(obj, '__class__'):\n try:\n src = inspect.getsource(obj.__class__)\n except TypeError:\n return None\n\n encoding = get_encoding(obj)\n return cast_unicode(src, encoding=encoding)\n\n\ndef is_simple_callable(obj):\n \"\"\"True if obj is a function ()\"\"\"\n return (inspect.isfunction(obj) or inspect.ismethod(obj) or \\\n isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))\n\n\ndef getargspec(obj):\n \"\"\"Wrapper around :func:`inspect.getfullargspec` on Python 3, and\n :func:inspect.getargspec` on Python 2.\n \n In addition to functions and methods, this can also handle objects with a\n ``__call__`` attribute.\n \"\"\"\n if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):\n obj = obj.__call__\n\n return inspect.getfullargspec(obj) if PY3 else inspect.getargspec(obj)\n\n\ndef format_argspec(argspec):\n \"\"\"Format argspect, convenience wrapper around inspect's.\n\n This takes a dict instead of ordered arguments and calls\n inspect.format_argspec with the arguments in the necessary order.\n \"\"\"\n return inspect.formatargspec(argspec['args'], argspec['varargs'],\n argspec['varkw'], argspec['defaults'])\n\n\ndef call_tip(oinfo, format_call=True):\n \"\"\"Extract call tip data from an oinfo dict.\n\n Parameters\n ----------\n oinfo : dict\n\n format_call : bool, optional\n If True, the call line is formatted and returned as a string. If not, a\n tuple of (name, argspec) is returned.\n\n Returns\n -------\n call_info : None, str or (str, dict) tuple.\n When format_call is True, the whole call information is formattted as a\n single string. Otherwise, the object's name and its argspec dict are\n returned. If no call information is available, None is returned.\n\n docstring : str or None\n The most relevant docstring for calling purposes is returned, if\n available. The priority is: call docstring for callable instances, then\n constructor docstring for classes, then main object's docstring otherwise\n (regular functions).\n \"\"\"\n # Get call definition\n argspec = oinfo.get('argspec')\n if argspec is None:\n call_line = None\n else:\n # Callable objects will have 'self' as their first argument, prune\n # it out if it's there for clarity (since users do *not* pass an\n # extra first argument explicitly).\n try:\n has_self = argspec['args'][0] == 'self'\n except (KeyError, IndexError):\n pass\n else:\n if has_self:\n argspec['args'] = argspec['args'][1:]\n\n call_line = oinfo['name']+format_argspec(argspec)\n\n # Now get docstring.\n # The priority is: call docstring, constructor docstring, main one.\n doc = oinfo.get('call_docstring')\n if doc is None:\n doc = oinfo.get('init_docstring')\n if doc is None:\n doc = oinfo.get('docstring','')\n\n return call_line, doc\n\n\ndef _get_wrapped(obj):\n \"\"\"Get the original object if wrapped in one or more @decorators\"\"\"\n while safe_hasattr(obj, '__wrapped__'):\n obj = obj.__wrapped__\n return obj\n\ndef find_file(obj):\n \"\"\"Find the absolute path to the file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getabsfile`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n fname : str\n The absolute path to the file where the object was defined.\n \"\"\"\n obj = _get_wrapped(obj)\n\n fname = None\n try:\n fname = inspect.getabsfile(obj)\n except TypeError:\n # For an instance, the file that matters is where its class was\n # declared.\n if hasattr(obj, '__class__'):\n try:\n fname = inspect.getabsfile(obj.__class__)\n except TypeError:\n # Can happen for builtins\n pass\n except:\n pass\n return cast_unicode(fname)\n\n\ndef find_source_lines(obj):\n \"\"\"Find the line number in a file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getsourcelines`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n lineno : int\n The line number where the object definition starts.\n \"\"\"\n obj = _get_wrapped(obj)\n \n try:\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except TypeError:\n # For instances, try the class object like getsource() does\n if hasattr(obj, '__class__'):\n lineno = inspect.getsourcelines(obj.__class__)[1]\n else:\n lineno = None\n except:\n return None\n\n return lineno\n\n\nclass Inspector:\n def __init__(self, color_table=InspectColors,\n code_color_table=PyColorize.ANSICodeColors,\n scheme='NoColor',\n str_detail_level=0):\n self.color_table = color_table\n self.parser = PyColorize.Parser(code_color_table,out='str')\n self.format = self.parser.format\n self.str_detail_level = str_detail_level\n self.set_active_scheme(scheme)\n\n def _getdef(self,obj,oname=''):\n \"\"\"Return the call signature for any callable object.\n\n If any exception is generated, None is returned instead and the\n exception is suppressed.\"\"\"\n try:\n hdef = oname + str(signature(obj))\n return cast_unicode(hdef)\n except:\n return None\n\n def __head(self,h):\n \"\"\"Return a header string with proper colors.\"\"\"\n return '%s%s%s' % (self.color_table.active_colors.header,h,\n self.color_table.active_colors.normal)\n\n def set_active_scheme(self, scheme):\n self.color_table.set_active_scheme(scheme)\n self.parser.color_table.set_active_scheme(scheme)\n\n def noinfo(self, msg, oname):\n \"\"\"Generic message when no information is found.\"\"\"\n print('No %s found' % msg, end=' ')\n if oname:\n print('for %s' % oname)\n else:\n print()\n\n def pdef(self, obj, oname=''):\n \"\"\"Print the call signature for any callable object.\n\n If the object is a class, print the constructor information.\"\"\"\n\n if not callable(obj):\n print('Object is not callable.')\n return\n\n header = ''\n\n if inspect.isclass(obj):\n header = self.__head('Class constructor information:\\n')\n obj = obj.__init__\n elif (not py3compat.PY3) and type(obj) is types.InstanceType:\n obj = obj.__call__\n\n output = self._getdef(obj,oname)\n if output is None:\n self.noinfo('definition header',oname)\n else:\n print(header,self.format(output), end=' ', file=io.stdout)\n\n # In Python 3, all classes are new-style, so they all have __init__.\n @skip_doctest_py3\n def pdoc(self,obj,oname='',formatter = None):\n \"\"\"Print the docstring for any object.\n\n Optional:\n -formatter: a function to run the docstring through for specially\n formatted docstrings.\n\n Examples\n --------\n\n In [1]: class NoInit:\n ...: pass\n\n In [2]: class NoDoc:\n ...: def __init__(self):\n ...: pass\n\n In [3]: %pdoc NoDoc\n No documentation found for NoDoc\n\n In [4]: %pdoc NoInit\n No documentation found for NoInit\n\n In [5]: obj = NoInit()\n\n In [6]: %pdoc obj\n No documentation found for obj\n\n In [5]: obj2 = NoDoc()\n\n In [6]: %pdoc obj2\n No documentation found for obj2\n \"\"\"\n\n head = self.__head # For convenience\n lines = []\n ds = getdoc(obj)\n if formatter:\n ds = formatter(ds)\n if ds:\n lines.append(head(\"Class docstring:\"))\n lines.append(indent(ds))\n if inspect.isclass(obj) and hasattr(obj, '__init__'):\n init_ds = getdoc(obj.__init__)\n if init_ds is not None:\n lines.append(head(\"Init docstring:\"))\n lines.append(indent(init_ds))\n elif hasattr(obj,'__call__'):\n call_ds = getdoc(obj.__call__)\n if call_ds:\n lines.append(head(\"Call docstring:\"))\n lines.append(indent(call_ds))\n\n if not lines:\n self.noinfo('documentation',oname)\n else:\n page.page('\\n'.join(lines))\n\n def psource(self, obj, oname=''):\n \"\"\"Print the source code for an object.\"\"\"\n\n # Flush the source cache because inspect can return out-of-date source\n linecache.checkcache()\n try:\n src = getsource(obj, oname=oname)\n except Exception:\n src = None\n\n if src is None:\n self.noinfo('source', oname)\n else:\n page.page(self.format(src))\n\n def pfile(self, obj, oname=''):\n \"\"\"Show the whole file where an object was defined.\"\"\"\n \n lineno = find_source_lines(obj)\n if lineno is None:\n self.noinfo('file', oname)\n return\n\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile.endswith(('.so', '.dll', '.pyd')):\n print('File %r is binary, not printing.' % ofile)\n elif not os.path.isfile(ofile):\n print('File %r does not exist, not printing.' % ofile)\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)\n\n def _format_fields(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n out = []\n header = self.__head\n if title_width == 0:\n title_width = max(len(title) + 2 for title, _ in fields)\n for title, content in fields:\n if len(content.splitlines()) > 1:\n title = header(title + \":\") + \"\\n\"\n else:\n title = header((title+\":\").ljust(title_width))\n out.append(cast_unicode(title) + cast_unicode(content))\n return \"\\n\".join(out)\n \n def _format_info(self, obj, oname='', formatter=None, info=None, detail_level=0):\n \"\"\"Format an info dict as text\"\"\"\n info = self.info(obj, oname=oname, formatter=formatter,\n info=info, detail_level=detail_level)\n displayfields = []\n def add_fields(fields):\n for title, key in fields:\n field = info[key]\n if field is not None:\n if key == \"source\":\n displayfields.append((title, self.format(cast_unicode(field.rstrip()))))\n else:\n displayfields.append((title, field.rstrip()))\n\n if info['isalias']:\n add_fields([('Repr', \"string_form\")])\n\n elif info['ismagic']:\n if detail_level > 0 and info['source'] is not None:\n add_fields([(\"Source\", \"source\")])\n else:\n add_fields([(\"Docstring\", \"docstring\")])\n\n add_fields([(\"File\", \"file\"),\n ])\n\n elif info['isclass'] or is_simple_callable(obj):\n # Functions, methods, classes\n add_fields([(\"Signature\", \"definition\"),\n (\"Init signature\", \"init_definition\"),\n ])\n if detail_level > 0 and info['source'] is not None:\n add_fields([(\"Source\", \"source\")])\n else:\n add_fields([(\"Docstring\", \"docstring\"),\n (\"Init docstring\", \"init_docstring\"),\n ])\n\n add_fields([('File', 'file'),\n ('Type', 'type_name'),\n ])\n\n else:\n # General Python objects\n add_fields([(\"Type\", \"type_name\")])\n\n # Base class for old-style instances\n if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']:\n displayfields.append((\"Base Class\", info['base_class'].rstrip()))\n\n add_fields([(\"String form\", \"string_form\")])\n\n # Namespace\n if info['namespace'] != 'Interactive':\n displayfields.append((\"Namespace\", info['namespace'].rstrip()))\n\n add_fields([(\"Length\", \"length\"),\n (\"File\", \"file\"),\n (\"Signature\", \"definition\"),\n ])\n\n # Source or docstring, depending on detail level and whether\n # source found.\n if detail_level > 0 and info['source'] is not None:\n displayfields.append((\"Source\",\n self.format(cast_unicode(info['source']))))\n elif info['docstring'] is not None:\n displayfields.append((\"Docstring\", info[\"docstring\"]))\n\n add_fields([(\"Class docstring\", \"class_docstring\"),\n (\"Init docstring\", \"init_docstring\"),\n (\"Call signature\", \"call_def\"),\n (\"Call docstring\", \"call_docstring\")])\n \n if displayfields:\n return self._format_fields(displayfields)\n else:\n return u''\n \n def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0):\n \"\"\"Show detailed information about an object.\n\n Optional arguments:\n\n - oname: name of the variable pointing to the object.\n\n - formatter: special formatter for docstrings (see pdoc)\n\n - info: a structure with some information fields which may have been\n precomputed already.\n\n - detail_level: if set to 1, more information is given.\n \"\"\"\n text = self._format_info(obj, oname, formatter, info, detail_level)\n if text:\n page.page(text)\n \n def info(self, obj, oname='', formatter=None, info=None, detail_level=0):\n \"\"\"Compute a dict with detailed information about an object.\n\n Optional arguments:\n\n - oname: name of the variable pointing to the object.\n\n - formatter: special formatter for docstrings (see pdoc)\n\n - info: a structure with some information fields which may have been\n precomputed already.\n\n - detail_level: if set to 1, more information is given.\n \"\"\"\n\n obj_type = type(obj)\n\n if info is None:\n ismagic = 0\n isalias = 0\n ospace = ''\n else:\n ismagic = info.ismagic\n isalias = info.isalias\n ospace = info.namespace\n\n # Get docstring, special-casing aliases:\n if isalias:\n if not callable(obj):\n try:\n ds = \"Alias to the system command:\\n %s\" % obj[1]\n except:\n ds = \"Alias: \" + str(obj)\n else:\n ds = \"Alias to \" + str(obj)\n if obj.__doc__:\n ds += \"\\nDocstring:\\n\" + obj.__doc__\n else:\n ds = getdoc(obj)\n if ds is None:\n ds = '<no docstring>'\n if formatter is not None:\n ds = formatter(ds)\n\n # store output in a dict, we initialize it here and fill it as we go\n out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)\n\n string_max = 200 # max size of strings to show (snipped if longer)\n shalf = int((string_max -5)/2)\n\n if ismagic:\n obj_type_name = 'Magic function'\n elif isalias:\n obj_type_name = 'System alias'\n else:\n obj_type_name = obj_type.__name__\n out['type_name'] = obj_type_name\n\n try:\n bclass = obj.__class__\n out['base_class'] = str(bclass)\n except: pass\n\n # String form, but snip if too long in ? form (full in ??)\n if detail_level >= self.str_detail_level:\n try:\n ostr = str(obj)\n str_head = 'string_form'\n if not detail_level and len(ostr)>string_max:\n ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]\n ostr = (\"\\n\" + \" \" * len(str_head.expandtabs())).\\\n join(q.strip() for q in ostr.split(\"\\n\"))\n out[str_head] = ostr\n except:\n pass\n\n if ospace:\n out['namespace'] = ospace\n\n # Length (for strings and lists)\n try:\n out['length'] = str(len(obj))\n except: pass\n\n # Filename where object was defined\n binary_file = False\n fname = find_file(obj)\n if fname is None:\n # if anything goes wrong, we don't want to show source, so it's as\n # if the file was binary\n binary_file = True\n else:\n if fname.endswith(('.so', '.dll', '.pyd')):\n binary_file = True\n elif fname.endswith('<string>'):\n fname = 'Dynamically generated function. No source code available.'\n out['file'] = compress_user(fname)\n\n # Original source code for a callable, class or property.\n if detail_level:\n # Flush the source cache because inspect can return out-of-date\n # source\n linecache.checkcache()\n try:\n if isinstance(obj, property) or not binary_file:\n src = getsource(obj, oname)\n if src is not None:\n src = src.rstrip()\n out['source'] = src\n\n except Exception:\n pass\n\n # Add docstring only if no source is to be shown (avoid repetitions).\n if ds and out.get('source', None) is None:\n out['docstring'] = ds\n\n # Constructor docstring for classes\n if inspect.isclass(obj):\n out['isclass'] = True\n # reconstruct the function definition and print it:\n try:\n obj_init = obj.__init__\n except AttributeError:\n init_def = init_ds = None\n else:\n init_def = self._getdef(obj_init,oname)\n init_ds = getdoc(obj_init)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n\n if init_def or init_ds:\n if init_def:\n out['init_definition'] = self.format(init_def)\n if init_ds:\n out['init_docstring'] = init_ds\n\n # and class docstring for instances:\n else:\n # reconstruct the function definition and print it:\n defln = self._getdef(obj, oname)\n if defln:\n out['definition'] = self.format(defln)\n\n # First, check whether the instance docstring is identical to the\n # class one, and print it separately if they don't coincide. In\n # most cases they will, but it's nice to print all the info for\n # objects which use instance-customized docstrings.\n if ds:\n try:\n cls = getattr(obj,'__class__')\n except:\n class_ds = None\n else:\n class_ds = getdoc(cls)\n # Skip Python's auto-generated docstrings\n if class_ds in _builtin_type_docstrings:\n class_ds = None\n if class_ds and ds != class_ds:\n out['class_docstring'] = class_ds\n\n # Next, try to show constructor docstrings\n try:\n init_ds = getdoc(obj.__init__)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n except AttributeError:\n init_ds = None\n if init_ds:\n out['init_docstring'] = init_ds\n\n # Call form docstring for callable instances\n if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):\n call_def = self._getdef(obj.__call__, oname)\n if call_def:\n call_def = self.format(call_def)\n # it may never be the case that call def and definition differ,\n # but don't include the same signature twice\n if call_def != out.get('definition'):\n out['call_def'] = call_def\n call_ds = getdoc(obj.__call__)\n # Skip Python's auto-generated docstrings\n if call_ds == _func_call_docstring:\n call_ds = None\n if call_ds:\n out['call_docstring'] = call_ds\n\n # Compute the object's argspec as a callable. The key is to decide\n # whether to pull it from the object itself, from its __init__ or\n # from its __call__ method.\n\n if inspect.isclass(obj):\n # Old-style classes need not have an __init__\n callable_obj = getattr(obj, \"__init__\", None)\n elif callable(obj):\n callable_obj = obj\n else:\n callable_obj = None\n\n if callable_obj:\n try:\n argspec = getargspec(callable_obj)\n except (TypeError, AttributeError):\n # For extensions/builtins we can't retrieve the argspec\n pass\n else:\n # named tuples' _asdict() method returns an OrderedDict, but we\n # we want a normal\n out['argspec'] = argspec_dict = dict(argspec._asdict())\n # We called this varkw before argspec became a named tuple.\n # With getfullargspec it's also called varkw.\n if 'varkw' not in argspec_dict:\n argspec_dict['varkw'] = argspec_dict.pop('keywords')\n\n return object_info(**out)\n\n def psearch(self,pattern,ns_table,ns_search=[],\n ignore_case=False,show_all=False):\n \"\"\"Search namespaces with wildcards for objects.\n\n Arguments:\n\n - pattern: string containing shell-like wildcards to use in namespace\n searches and optionally a type specification to narrow the search to\n objects of that type.\n\n - ns_table: dict of name->namespaces for search.\n\n Optional arguments:\n\n - ns_search: list of namespace names to include in search.\n\n - ignore_case(False): make the search case-insensitive.\n\n - show_all(False): show all names, including those starting with\n underscores.\n \"\"\"\n #print 'ps pattern:<%r>' % pattern # dbg\n\n # defaults\n type_pattern = 'all'\n filter = ''\n\n cmds = pattern.split()\n len_cmds = len(cmds)\n if len_cmds == 1:\n # Only filter pattern given\n filter = cmds[0]\n elif len_cmds == 2:\n # Both filter and type specified\n filter,type_pattern = cmds\n else:\n raise ValueError('invalid argument string for psearch: <%s>' %\n pattern)\n\n # filter search namespaces\n for name in ns_search:\n if name not in ns_table:\n raise ValueError('invalid namespace <%s>. Valid names: %s' %\n (name,ns_table.keys()))\n\n #print 'type_pattern:',type_pattern # dbg\n search_result, namespaces_seen = set(), set()\n for ns_name in ns_search:\n ns = ns_table[ns_name]\n # Normally, locals and globals are the same, so we just check one.\n if id(ns) in namespaces_seen:\n continue\n namespaces_seen.add(id(ns))\n tmp_res = list_namespace(ns, type_pattern, filter,\n ignore_case=ignore_case, show_all=show_all)\n search_result.update(tmp_res)\n\n page.page('\\n'.join(sorted(search_result)))\n", "path": "IPython/core/oinspect.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Tools for inspecting Python objects.\n\nUses syntax highlighting for presenting the various information elements.\n\nSimilar in spirit to the inspect module, but all calls take a name argument to\nreference the name under which an object is being read.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom __future__ import print_function\n\n__all__ = ['Inspector','InspectColors']\n\n# stdlib modules\nimport inspect\nimport linecache\nimport os\nfrom textwrap import dedent\nimport types\nimport io as stdlib_io\n\ntry:\n from itertools import izip_longest\nexcept ImportError:\n from itertools import zip_longest as izip_longest\n\n# IPython's own\nfrom IPython.core import page\nfrom IPython.lib.pretty import pretty\nfrom IPython.testing.skipdoctest import skip_doctest_py3\nfrom IPython.utils import PyColorize\nfrom IPython.utils import io\nfrom IPython.utils import openpy\nfrom IPython.utils import py3compat\nfrom IPython.utils.dir2 import safe_hasattr\nfrom IPython.utils.path import compress_user\nfrom IPython.utils.text import indent\nfrom IPython.utils.wildcard import list_namespace\nfrom IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable\nfrom IPython.utils.py3compat import cast_unicode, string_types, PY3\nfrom IPython.utils.signatures import signature\n\n# builtin docstrings to ignore\n_func_call_docstring = types.FunctionType.__call__.__doc__\n_object_init_docstring = object.__init__.__doc__\n_builtin_type_docstrings = {\n inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,\n types.FunctionType, property)\n}\n\n_builtin_func_type = type(all)\n_builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions\n#****************************************************************************\n# Builtin color schemes\n\nColors = TermColors # just a shorthand\n\nInspectColors = PyColorize.ANSICodeColors\n\n#****************************************************************************\n# Auxiliary functions and objects\n\n# See the messaging spec for the definition of all these fields. This list\n# effectively defines the order of display\ninfo_fields = ['type_name', 'base_class', 'string_form', 'namespace',\n 'length', 'file', 'definition', 'docstring', 'source',\n 'init_definition', 'class_docstring', 'init_docstring',\n 'call_def', 'call_docstring',\n # These won't be printed but will be used to determine how to\n # format the object\n 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name'\n ]\n\n\ndef object_info(**kw):\n \"\"\"Make an object info dict with all fields present.\"\"\"\n infodict = dict(izip_longest(info_fields, [None]))\n infodict.update(kw)\n return infodict\n\n\ndef get_encoding(obj):\n \"\"\"Get encoding for python source file defining obj\n\n Returns None if obj is not defined in a sourcefile.\n \"\"\"\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile is None:\n return None\n elif ofile.endswith(('.so', '.dll', '.pyd')):\n return None\n elif not os.path.isfile(ofile):\n return None\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2\n encoding, lines = openpy.detect_encoding(buffer.readline)\n return encoding\n\ndef getdoc(obj):\n \"\"\"Stable wrapper around inspect.getdoc.\n\n This can't crash because of attribute problems.\n\n It also attempts to call a getdoc() method on the given object. This\n allows objects which provide their docstrings via non-standard mechanisms\n (like Pyro proxies) to still be inspected by ipython's ? system.\"\"\"\n # Allow objects to offer customized documentation via a getdoc method:\n try:\n ds = obj.getdoc()\n except Exception:\n pass\n else:\n # if we get extra info, we add it to the normal docstring.\n if isinstance(ds, string_types):\n return inspect.cleandoc(ds)\n \n try:\n docstr = inspect.getdoc(obj)\n encoding = get_encoding(obj)\n return py3compat.cast_unicode(docstr, encoding=encoding)\n except Exception:\n # Harden against an inspect failure, which can occur with\n # SWIG-wrapped extensions.\n raise\n return None\n\n\ndef getsource(obj, oname=''):\n \"\"\"Wrapper around inspect.getsource.\n\n This can be modified by other projects to provide customized source\n extraction.\n\n Parameters\n ----------\n obj : object\n an object whose source code we will attempt to extract\n oname : str\n (optional) a name under which the object is known\n\n Returns\n -------\n src : unicode or None\n\n \"\"\"\n\n if isinstance(obj, property):\n sources = []\n for attrname in ['fget', 'fset', 'fdel']:\n fn = getattr(obj, attrname)\n if fn is not None:\n encoding = get_encoding(fn)\n oname_prefix = ('%s.' % oname) if oname else ''\n sources.append(cast_unicode(\n ''.join(('# ', oname_prefix, attrname)),\n encoding=encoding))\n if inspect.isfunction(fn):\n sources.append(dedent(getsource(fn)))\n else:\n # Default str/repr only prints function name,\n # pretty.pretty prints module name too.\n sources.append(cast_unicode(\n '%s%s = %s\\n' % (\n oname_prefix, attrname, pretty(fn)),\n encoding=encoding))\n if sources:\n return '\\n'.join(sources)\n else:\n return None\n\n else:\n # Get source for non-property objects.\n\n obj = _get_wrapped(obj)\n\n try:\n src = inspect.getsource(obj)\n except TypeError:\n # The object itself provided no meaningful source, try looking for\n # its class definition instead.\n if hasattr(obj, '__class__'):\n try:\n src = inspect.getsource(obj.__class__)\n except TypeError:\n return None\n\n encoding = get_encoding(obj)\n return cast_unicode(src, encoding=encoding)\n\n\ndef is_simple_callable(obj):\n \"\"\"True if obj is a function ()\"\"\"\n return (inspect.isfunction(obj) or inspect.ismethod(obj) or \\\n isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))\n\n\ndef getargspec(obj):\n \"\"\"Wrapper around :func:`inspect.getfullargspec` on Python 3, and\n :func:inspect.getargspec` on Python 2.\n \n In addition to functions and methods, this can also handle objects with a\n ``__call__`` attribute.\n \"\"\"\n if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):\n obj = obj.__call__\n\n return inspect.getfullargspec(obj) if PY3 else inspect.getargspec(obj)\n\n\ndef format_argspec(argspec):\n \"\"\"Format argspect, convenience wrapper around inspect's.\n\n This takes a dict instead of ordered arguments and calls\n inspect.format_argspec with the arguments in the necessary order.\n \"\"\"\n return inspect.formatargspec(argspec['args'], argspec['varargs'],\n argspec['varkw'], argspec['defaults'])\n\n\ndef call_tip(oinfo, format_call=True):\n \"\"\"Extract call tip data from an oinfo dict.\n\n Parameters\n ----------\n oinfo : dict\n\n format_call : bool, optional\n If True, the call line is formatted and returned as a string. If not, a\n tuple of (name, argspec) is returned.\n\n Returns\n -------\n call_info : None, str or (str, dict) tuple.\n When format_call is True, the whole call information is formattted as a\n single string. Otherwise, the object's name and its argspec dict are\n returned. If no call information is available, None is returned.\n\n docstring : str or None\n The most relevant docstring for calling purposes is returned, if\n available. The priority is: call docstring for callable instances, then\n constructor docstring for classes, then main object's docstring otherwise\n (regular functions).\n \"\"\"\n # Get call definition\n argspec = oinfo.get('argspec')\n if argspec is None:\n call_line = None\n else:\n # Callable objects will have 'self' as their first argument, prune\n # it out if it's there for clarity (since users do *not* pass an\n # extra first argument explicitly).\n try:\n has_self = argspec['args'][0] == 'self'\n except (KeyError, IndexError):\n pass\n else:\n if has_self:\n argspec['args'] = argspec['args'][1:]\n\n call_line = oinfo['name']+format_argspec(argspec)\n\n # Now get docstring.\n # The priority is: call docstring, constructor docstring, main one.\n doc = oinfo.get('call_docstring')\n if doc is None:\n doc = oinfo.get('init_docstring')\n if doc is None:\n doc = oinfo.get('docstring','')\n\n return call_line, doc\n\n\ndef _get_wrapped(obj):\n \"\"\"Get the original object if wrapped in one or more @decorators\"\"\"\n while safe_hasattr(obj, '__wrapped__'):\n obj = obj.__wrapped__\n return obj\n\ndef find_file(obj):\n \"\"\"Find the absolute path to the file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getabsfile`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n fname : str\n The absolute path to the file where the object was defined.\n \"\"\"\n obj = _get_wrapped(obj)\n\n fname = None\n try:\n fname = inspect.getabsfile(obj)\n except TypeError:\n # For an instance, the file that matters is where its class was\n # declared.\n if hasattr(obj, '__class__'):\n try:\n fname = inspect.getabsfile(obj.__class__)\n except TypeError:\n # Can happen for builtins\n pass\n except:\n pass\n return cast_unicode(fname)\n\n\ndef find_source_lines(obj):\n \"\"\"Find the line number in a file where an object was defined.\n\n This is essentially a robust wrapper around `inspect.getsourcelines`.\n\n Returns None if no file can be found.\n\n Parameters\n ----------\n obj : any Python object\n\n Returns\n -------\n lineno : int\n The line number where the object definition starts.\n \"\"\"\n obj = _get_wrapped(obj)\n \n try:\n try:\n lineno = inspect.getsourcelines(obj)[1]\n except TypeError:\n # For instances, try the class object like getsource() does\n if hasattr(obj, '__class__'):\n lineno = inspect.getsourcelines(obj.__class__)[1]\n else:\n lineno = None\n except:\n return None\n\n return lineno\n\n\nclass Inspector:\n def __init__(self, color_table=InspectColors,\n code_color_table=PyColorize.ANSICodeColors,\n scheme='NoColor',\n str_detail_level=0):\n self.color_table = color_table\n self.parser = PyColorize.Parser(code_color_table,out='str')\n self.format = self.parser.format\n self.str_detail_level = str_detail_level\n self.set_active_scheme(scheme)\n\n def _getdef(self,obj,oname=''):\n \"\"\"Return the call signature for any callable object.\n\n If any exception is generated, None is returned instead and the\n exception is suppressed.\"\"\"\n try:\n hdef = oname + str(signature(obj))\n return cast_unicode(hdef)\n except:\n return None\n\n def __head(self,h):\n \"\"\"Return a header string with proper colors.\"\"\"\n return '%s%s%s' % (self.color_table.active_colors.header,h,\n self.color_table.active_colors.normal)\n\n def set_active_scheme(self, scheme):\n self.color_table.set_active_scheme(scheme)\n self.parser.color_table.set_active_scheme(scheme)\n\n def noinfo(self, msg, oname):\n \"\"\"Generic message when no information is found.\"\"\"\n print('No %s found' % msg, end=' ')\n if oname:\n print('for %s' % oname)\n else:\n print()\n\n def pdef(self, obj, oname=''):\n \"\"\"Print the call signature for any callable object.\n\n If the object is a class, print the constructor information.\"\"\"\n\n if not callable(obj):\n print('Object is not callable.')\n return\n\n header = ''\n\n if inspect.isclass(obj):\n header = self.__head('Class constructor information:\\n')\n obj = obj.__init__\n elif (not py3compat.PY3) and type(obj) is types.InstanceType:\n obj = obj.__call__\n\n output = self._getdef(obj,oname)\n if output is None:\n self.noinfo('definition header',oname)\n else:\n print(header,self.format(output), end=' ', file=io.stdout)\n\n # In Python 3, all classes are new-style, so they all have __init__.\n @skip_doctest_py3\n def pdoc(self,obj,oname='',formatter = None):\n \"\"\"Print the docstring for any object.\n\n Optional:\n -formatter: a function to run the docstring through for specially\n formatted docstrings.\n\n Examples\n --------\n\n In [1]: class NoInit:\n ...: pass\n\n In [2]: class NoDoc:\n ...: def __init__(self):\n ...: pass\n\n In [3]: %pdoc NoDoc\n No documentation found for NoDoc\n\n In [4]: %pdoc NoInit\n No documentation found for NoInit\n\n In [5]: obj = NoInit()\n\n In [6]: %pdoc obj\n No documentation found for obj\n\n In [5]: obj2 = NoDoc()\n\n In [6]: %pdoc obj2\n No documentation found for obj2\n \"\"\"\n\n head = self.__head # For convenience\n lines = []\n ds = getdoc(obj)\n if formatter:\n ds = formatter(ds)\n if ds:\n lines.append(head(\"Class docstring:\"))\n lines.append(indent(ds))\n if inspect.isclass(obj) and hasattr(obj, '__init__'):\n init_ds = getdoc(obj.__init__)\n if init_ds is not None:\n lines.append(head(\"Init docstring:\"))\n lines.append(indent(init_ds))\n elif hasattr(obj,'__call__'):\n call_ds = getdoc(obj.__call__)\n if call_ds:\n lines.append(head(\"Call docstring:\"))\n lines.append(indent(call_ds))\n\n if not lines:\n self.noinfo('documentation',oname)\n else:\n page.page('\\n'.join(lines))\n\n def psource(self, obj, oname=''):\n \"\"\"Print the source code for an object.\"\"\"\n\n # Flush the source cache because inspect can return out-of-date source\n linecache.checkcache()\n try:\n src = getsource(obj, oname=oname)\n except Exception:\n src = None\n\n if src is None:\n self.noinfo('source', oname)\n else:\n page.page(self.format(src))\n\n def pfile(self, obj, oname=''):\n \"\"\"Show the whole file where an object was defined.\"\"\"\n \n lineno = find_source_lines(obj)\n if lineno is None:\n self.noinfo('file', oname)\n return\n\n ofile = find_file(obj)\n # run contents of file through pager starting at line where the object\n # is defined, as long as the file isn't binary and is actually on the\n # filesystem.\n if ofile.endswith(('.so', '.dll', '.pyd')):\n print('File %r is binary, not printing.' % ofile)\n elif not os.path.isfile(ofile):\n print('File %r does not exist, not printing.' % ofile)\n else:\n # Print only text files, not extension binaries. Note that\n # getsourcelines returns lineno with 1-offset and page() uses\n # 0-offset, so we must adjust.\n page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)\n\n def _format_fields(self, fields, title_width=0):\n \"\"\"Formats a list of fields for display.\n\n Parameters\n ----------\n fields : list\n A list of 2-tuples: (field_title, field_content)\n title_width : int\n How many characters to pad titles to. Default to longest title.\n \"\"\"\n out = []\n header = self.__head\n if title_width == 0:\n title_width = max(len(title) + 2 for title, _ in fields)\n for title, content in fields:\n if len(content.splitlines()) > 1:\n title = header(title + \":\") + \"\\n\"\n else:\n title = header((title+\":\").ljust(title_width))\n out.append(cast_unicode(title) + cast_unicode(content))\n return \"\\n\".join(out)\n \n def _format_info(self, obj, oname='', formatter=None, info=None, detail_level=0):\n \"\"\"Format an info dict as text\"\"\"\n info = self.info(obj, oname=oname, formatter=formatter,\n info=info, detail_level=detail_level)\n displayfields = []\n def add_fields(fields):\n for title, key in fields:\n field = info[key]\n if field is not None:\n if key == \"source\":\n displayfields.append((title, self.format(cast_unicode(field.rstrip()))))\n else:\n displayfields.append((title, field.rstrip()))\n\n if info['isalias']:\n add_fields([('Repr', \"string_form\")])\n\n elif info['ismagic']:\n if detail_level > 0 and info['source'] is not None:\n add_fields([(\"Source\", \"source\")])\n else:\n add_fields([(\"Docstring\", \"docstring\")])\n\n add_fields([(\"File\", \"file\"),\n ])\n\n elif info['isclass'] or is_simple_callable(obj):\n # Functions, methods, classes\n add_fields([(\"Signature\", \"definition\"),\n (\"Init signature\", \"init_definition\"),\n ])\n if detail_level > 0 and info['source'] is not None:\n add_fields([(\"Source\", \"source\")])\n else:\n add_fields([(\"Docstring\", \"docstring\"),\n (\"Init docstring\", \"init_docstring\"),\n ])\n\n add_fields([('File', 'file'),\n ('Type', 'type_name'),\n ])\n\n else:\n # General Python objects\n add_fields([(\"Type\", \"type_name\")])\n\n # Base class for old-style instances\n if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']:\n displayfields.append((\"Base Class\", info['base_class'].rstrip()))\n\n add_fields([(\"String form\", \"string_form\")])\n\n # Namespace\n if info['namespace'] != 'Interactive':\n displayfields.append((\"Namespace\", info['namespace'].rstrip()))\n\n add_fields([(\"Length\", \"length\"),\n (\"File\", \"file\"),\n (\"Signature\", \"definition\"),\n ])\n\n # Source or docstring, depending on detail level and whether\n # source found.\n if detail_level > 0 and info['source'] is not None:\n displayfields.append((\"Source\",\n self.format(cast_unicode(info['source']))))\n elif info['docstring'] is not None:\n displayfields.append((\"Docstring\", info[\"docstring\"]))\n\n add_fields([(\"Class docstring\", \"class_docstring\"),\n (\"Init docstring\", \"init_docstring\"),\n (\"Call signature\", \"call_def\"),\n (\"Call docstring\", \"call_docstring\")])\n \n if displayfields:\n return self._format_fields(displayfields)\n else:\n return u''\n \n def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0):\n \"\"\"Show detailed information about an object.\n\n Optional arguments:\n\n - oname: name of the variable pointing to the object.\n\n - formatter: special formatter for docstrings (see pdoc)\n\n - info: a structure with some information fields which may have been\n precomputed already.\n\n - detail_level: if set to 1, more information is given.\n \"\"\"\n text = self._format_info(obj, oname, formatter, info, detail_level)\n if text:\n page.page(text)\n \n def info(self, obj, oname='', formatter=None, info=None, detail_level=0):\n \"\"\"Compute a dict with detailed information about an object.\n\n Optional arguments:\n\n - oname: name of the variable pointing to the object.\n\n - formatter: special formatter for docstrings (see pdoc)\n\n - info: a structure with some information fields which may have been\n precomputed already.\n\n - detail_level: if set to 1, more information is given.\n \"\"\"\n\n obj_type = type(obj)\n\n if info is None:\n ismagic = 0\n isalias = 0\n ospace = ''\n else:\n ismagic = info.ismagic\n isalias = info.isalias\n ospace = info.namespace\n\n # Get docstring, special-casing aliases:\n if isalias:\n if not callable(obj):\n try:\n ds = \"Alias to the system command:\\n %s\" % obj[1]\n except:\n ds = \"Alias: \" + str(obj)\n else:\n ds = \"Alias to \" + str(obj)\n if obj.__doc__:\n ds += \"\\nDocstring:\\n\" + obj.__doc__\n else:\n ds = getdoc(obj)\n if ds is None:\n ds = '<no docstring>'\n if formatter is not None:\n ds = formatter(ds)\n\n # store output in a dict, we initialize it here and fill it as we go\n out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)\n\n string_max = 200 # max size of strings to show (snipped if longer)\n shalf = int((string_max -5)/2)\n\n if ismagic:\n obj_type_name = 'Magic function'\n elif isalias:\n obj_type_name = 'System alias'\n else:\n obj_type_name = obj_type.__name__\n out['type_name'] = obj_type_name\n\n try:\n bclass = obj.__class__\n out['base_class'] = str(bclass)\n except: pass\n\n # String form, but snip if too long in ? form (full in ??)\n if detail_level >= self.str_detail_level:\n try:\n ostr = str(obj)\n str_head = 'string_form'\n if not detail_level and len(ostr)>string_max:\n ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]\n ostr = (\"\\n\" + \" \" * len(str_head.expandtabs())).\\\n join(q.strip() for q in ostr.split(\"\\n\"))\n out[str_head] = ostr\n except:\n pass\n\n if ospace:\n out['namespace'] = ospace\n\n # Length (for strings and lists)\n try:\n out['length'] = str(len(obj))\n except: pass\n\n # Filename where object was defined\n binary_file = False\n fname = find_file(obj)\n if fname is None:\n # if anything goes wrong, we don't want to show source, so it's as\n # if the file was binary\n binary_file = True\n else:\n if fname.endswith(('.so', '.dll', '.pyd')):\n binary_file = True\n elif fname.endswith('<string>'):\n fname = 'Dynamically generated function. No source code available.'\n out['file'] = compress_user(fname)\n\n # Original source code for a callable, class or property.\n if detail_level:\n # Flush the source cache because inspect can return out-of-date\n # source\n linecache.checkcache()\n try:\n if isinstance(obj, property) or not binary_file:\n src = getsource(obj, oname)\n if src is not None:\n src = src.rstrip()\n out['source'] = src\n\n except Exception:\n pass\n\n # Add docstring only if no source is to be shown (avoid repetitions).\n if ds and out.get('source', None) is None:\n out['docstring'] = ds\n\n # Constructor docstring for classes\n if inspect.isclass(obj):\n out['isclass'] = True\n # reconstruct the function definition and print it:\n try:\n obj_init = obj.__init__\n except AttributeError:\n init_def = init_ds = None\n else:\n init_def = self._getdef(obj_init,oname)\n init_ds = getdoc(obj_init)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n\n if init_def or init_ds:\n if init_def:\n out['init_definition'] = self.format(init_def)\n if init_ds:\n out['init_docstring'] = init_ds\n\n # and class docstring for instances:\n else:\n # reconstruct the function definition and print it:\n defln = self._getdef(obj, oname)\n if defln:\n out['definition'] = self.format(defln)\n\n # First, check whether the instance docstring is identical to the\n # class one, and print it separately if they don't coincide. In\n # most cases they will, but it's nice to print all the info for\n # objects which use instance-customized docstrings.\n if ds:\n try:\n cls = getattr(obj,'__class__')\n except:\n class_ds = None\n else:\n class_ds = getdoc(cls)\n # Skip Python's auto-generated docstrings\n if class_ds in _builtin_type_docstrings:\n class_ds = None\n if class_ds and ds != class_ds:\n out['class_docstring'] = class_ds\n\n # Next, try to show constructor docstrings\n try:\n init_ds = getdoc(obj.__init__)\n # Skip Python's auto-generated docstrings\n if init_ds == _object_init_docstring:\n init_ds = None\n except AttributeError:\n init_ds = None\n if init_ds:\n out['init_docstring'] = init_ds\n\n # Call form docstring for callable instances\n if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):\n call_def = self._getdef(obj.__call__, oname)\n if call_def:\n call_def = self.format(call_def)\n # it may never be the case that call def and definition differ,\n # but don't include the same signature twice\n if call_def != out.get('definition'):\n out['call_def'] = call_def\n call_ds = getdoc(obj.__call__)\n # Skip Python's auto-generated docstrings\n if call_ds == _func_call_docstring:\n call_ds = None\n if call_ds:\n out['call_docstring'] = call_ds\n\n # Compute the object's argspec as a callable. The key is to decide\n # whether to pull it from the object itself, from its __init__ or\n # from its __call__ method.\n\n if inspect.isclass(obj):\n # Old-style classes need not have an __init__\n callable_obj = getattr(obj, \"__init__\", None)\n elif callable(obj):\n callable_obj = obj\n else:\n callable_obj = None\n\n if callable_obj is not None:\n try:\n argspec = getargspec(callable_obj)\n except (TypeError, AttributeError):\n # For extensions/builtins we can't retrieve the argspec\n pass\n else:\n # named tuples' _asdict() method returns an OrderedDict, but we\n # we want a normal\n out['argspec'] = argspec_dict = dict(argspec._asdict())\n # We called this varkw before argspec became a named tuple.\n # With getfullargspec it's also called varkw.\n if 'varkw' not in argspec_dict:\n argspec_dict['varkw'] = argspec_dict.pop('keywords')\n\n return object_info(**out)\n\n def psearch(self,pattern,ns_table,ns_search=[],\n ignore_case=False,show_all=False):\n \"\"\"Search namespaces with wildcards for objects.\n\n Arguments:\n\n - pattern: string containing shell-like wildcards to use in namespace\n searches and optionally a type specification to narrow the search to\n objects of that type.\n\n - ns_table: dict of name->namespaces for search.\n\n Optional arguments:\n\n - ns_search: list of namespace names to include in search.\n\n - ignore_case(False): make the search case-insensitive.\n\n - show_all(False): show all names, including those starting with\n underscores.\n \"\"\"\n #print 'ps pattern:<%r>' % pattern # dbg\n\n # defaults\n type_pattern = 'all'\n filter = ''\n\n cmds = pattern.split()\n len_cmds = len(cmds)\n if len_cmds == 1:\n # Only filter pattern given\n filter = cmds[0]\n elif len_cmds == 2:\n # Both filter and type specified\n filter,type_pattern = cmds\n else:\n raise ValueError('invalid argument string for psearch: <%s>' %\n pattern)\n\n # filter search namespaces\n for name in ns_search:\n if name not in ns_table:\n raise ValueError('invalid namespace <%s>. Valid names: %s' %\n (name,ns_table.keys()))\n\n #print 'type_pattern:',type_pattern # dbg\n search_result, namespaces_seen = set(), set()\n for ns_name in ns_search:\n ns = ns_table[ns_name]\n # Normally, locals and globals are the same, so we just check one.\n if id(ns) in namespaces_seen:\n continue\n namespaces_seen.add(id(ns))\n tmp_res = list_namespace(ns, type_pattern, filter,\n ignore_case=ignore_case, show_all=show_all)\n search_result.update(tmp_res)\n\n page.page('\\n'.join(sorted(search_result)))\n", "path": "IPython/core/oinspect.py" } ]
diff --git a/IPython/core/oinspect.py b/IPython/core/oinspect.py index e7be78b6541..f819990fc26 100644 --- a/IPython/core/oinspect.py +++ b/IPython/core/oinspect.py @@ -832,7 +832,7 @@ def info(self, obj, oname='', formatter=None, info=None, detail_level=0): else: callable_obj = None - if callable_obj: + if callable_obj is not None: try: argspec = getargspec(callable_obj) except (TypeError, AttributeError): diff --git a/IPython/core/tests/test_oinspect.py b/IPython/core/tests/test_oinspect.py index 0c71f0091d9..e868730b23f 100644 --- a/IPython/core/tests/test_oinspect.py +++ b/IPython/core/tests/test_oinspect.py @@ -172,6 +172,19 @@ class Awkward(object): def __getattr__(self, name): raise Exception(name) +class NoBoolCall: + """ + callable with `__bool__` raising should still be inspect-able. + """ + + def __call__(self): + """does nothing""" + pass + + def __bool__(self): + """just raise NotImplemented""" + raise NotImplementedError('Must be implemented') + def check_calltip(obj, name, call, docstring): """Generic check pattern all calltip tests will use""" @@ -281,6 +294,9 @@ def test_info_awkward(): # Just test that this doesn't throw an error. i = inspector.info(Awkward()) +def test_bool_raise(): + inspector.info(NoBoolCall()) + def test_calldef_none(): # We should ignore __call__ for all of these. for obj in [f, SimpleClass().method, any, str.upper]:
mitmproxy__mitmproxy-3070
[mitmweb] asyncio has no attribute call_soon ##### Steps to reproduce the problem: 1.Launch mitmweb 2.Save some flow 3.Load those flows ![asyncio_attribute_error](https://user-images.githubusercontent.com/15815157/38856635-48c7130c-4227-11e8-9019-0caf8f22038c.JPG) ##### Any other comments? What have you tried so far? https://github.com/mitmproxy/mitmproxy/blob/0fa1280daa94729defa8411d86266bd2b52ad0b6/mitmproxy/tools/web/app.py#L238-L239 I replaced this line with `asyncio.ensure_future(self.master.load_flows(i))` to make the loading works. ##### System information <!-- Paste the output of "mitmproxy --version" here. --> ![mitmdump_version](https://user-images.githubusercontent.com/15815157/38857072-93f56a8a-4228-11e8-8fbb-fed4e11c2745.JPG) <!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->
[ { "content": "import hashlib\nimport json\nimport logging\nimport os.path\nimport re\nfrom io import BytesIO\nimport asyncio\n\nimport mitmproxy.flow\nimport tornado.escape\nimport tornado.web\nimport tornado.websocket\nfrom mitmproxy import contentviews\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import http\nfrom mitmproxy import io\nfrom mitmproxy import log\nfrom mitmproxy import version\nfrom mitmproxy import optmanager\nimport mitmproxy.tools.web.master # noqa\n\n\ndef flow_to_json(flow: mitmproxy.flow.Flow) -> dict:\n \"\"\"\n Remove flow message content and cert to save transmission space.\n\n Args:\n flow: The original flow.\n \"\"\"\n f = {\n \"id\": flow.id,\n \"intercepted\": flow.intercepted,\n \"client_conn\": flow.client_conn.get_state(),\n \"server_conn\": flow.server_conn.get_state(),\n \"type\": flow.type,\n \"modified\": flow.modified(),\n \"marked\": flow.marked,\n }\n # .alpn_proto_negotiated is bytes, we need to decode that.\n for conn in \"client_conn\", \"server_conn\":\n if f[conn][\"alpn_proto_negotiated\"] is None:\n continue\n f[conn][\"alpn_proto_negotiated\"] = \\\n f[conn][\"alpn_proto_negotiated\"].decode(errors=\"backslashreplace\")\n # There are some bytes in here as well, let's skip it until we have them in the UI.\n f[\"client_conn\"].pop(\"tls_extensions\", None)\n if flow.error:\n f[\"error\"] = flow.error.get_state()\n\n if isinstance(flow, http.HTTPFlow):\n if flow.request:\n if flow.request.raw_content:\n content_length = len(flow.request.raw_content)\n content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()\n else:\n content_length = None\n content_hash = None\n f[\"request\"] = {\n \"method\": flow.request.method,\n \"scheme\": flow.request.scheme,\n \"host\": flow.request.host,\n \"port\": flow.request.port,\n \"path\": flow.request.path,\n \"http_version\": flow.request.http_version,\n \"headers\": tuple(flow.request.headers.items(True)),\n \"contentLength\": content_length,\n \"contentHash\": content_hash,\n \"timestamp_start\": flow.request.timestamp_start,\n \"timestamp_end\": flow.request.timestamp_end,\n \"is_replay\": flow.request.is_replay,\n \"pretty_host\": flow.request.pretty_host,\n }\n if flow.response:\n if flow.response.raw_content:\n content_length = len(flow.response.raw_content)\n content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()\n else:\n content_length = None\n content_hash = None\n f[\"response\"] = {\n \"http_version\": flow.response.http_version,\n \"status_code\": flow.response.status_code,\n \"reason\": flow.response.reason,\n \"headers\": tuple(flow.response.headers.items(True)),\n \"contentLength\": content_length,\n \"contentHash\": content_hash,\n \"timestamp_start\": flow.response.timestamp_start,\n \"timestamp_end\": flow.response.timestamp_end,\n \"is_replay\": flow.response.is_replay,\n }\n f.get(\"server_conn\", {}).pop(\"cert\", None)\n f.get(\"client_conn\", {}).pop(\"mitmcert\", None)\n\n return f\n\n\ndef logentry_to_json(e: log.LogEntry) -> dict:\n return {\n \"id\": id(e), # we just need some kind of id.\n \"message\": e.msg,\n \"level\": e.level\n }\n\n\nclass APIError(tornado.web.HTTPError):\n pass\n\n\nclass RequestHandler(tornado.web.RequestHandler):\n def write(self, chunk):\n # Writing arrays on the top level is ok nowadays.\n # http://flask.pocoo.org/docs/0.11/security/#json-security\n if isinstance(chunk, list):\n chunk = tornado.escape.json_encode(chunk)\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n super(RequestHandler, self).write(chunk)\n\n def set_default_headers(self):\n super().set_default_headers()\n self.set_header(\"Server\", version.MITMPROXY)\n self.set_header(\"X-Frame-Options\", \"DENY\")\n self.add_header(\"X-XSS-Protection\", \"1; mode=block\")\n self.add_header(\"X-Content-Type-Options\", \"nosniff\")\n self.add_header(\n \"Content-Security-Policy\",\n \"default-src 'self'; \"\n \"connect-src 'self' ws:; \"\n \"style-src 'self' 'unsafe-inline'\"\n )\n\n @property\n def json(self):\n if not self.request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n raise APIError(400, \"Invalid Content-Type, expected application/json.\")\n try:\n return json.loads(self.request.body.decode())\n except Exception as e:\n raise APIError(400, \"Malformed JSON: {}\".format(str(e)))\n\n @property\n def filecontents(self):\n \"\"\"\n Accept either a multipart/form file upload or just take the plain request body.\n\n \"\"\"\n if self.request.files:\n return next(iter(self.request.files.values()))[0].body\n else:\n return self.request.body\n\n @property\n def view(self) -> \"mitmproxy.addons.view.View\":\n return self.application.master.view\n\n @property\n def master(self) -> \"mitmproxy.tools.web.master.WebMaster\":\n return self.application.master\n\n @property\n def flow(self) -> mitmproxy.flow.Flow:\n flow_id = str(self.path_kwargs[\"flow_id\"])\n # FIXME: Add a facility to addon.view to safely access the store\n flow = self.view.get_by_id(flow_id)\n if flow:\n return flow\n else:\n raise APIError(404, \"Flow not found.\")\n\n def write_error(self, status_code: int, **kwargs):\n if \"exc_info\" in kwargs and isinstance(kwargs[\"exc_info\"][1], APIError):\n self.finish(kwargs[\"exc_info\"][1].log_message)\n else:\n super().write_error(status_code, **kwargs)\n\n\nclass IndexHandler(RequestHandler):\n def get(self):\n token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645\n assert token\n self.render(\"index.html\")\n\n\nclass FilterHelp(RequestHandler):\n def get(self):\n self.write(dict(\n commands=flowfilter.help\n ))\n\n\nclass WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):\n # raise an error if inherited class doesn't specify its own instance.\n connections: set = None\n\n def open(self):\n self.connections.add(self)\n\n def on_close(self):\n self.connections.remove(self)\n\n @classmethod\n def broadcast(cls, **kwargs):\n message = json.dumps(kwargs, ensure_ascii=False).encode(\"utf8\", \"surrogateescape\")\n\n for conn in cls.connections:\n try:\n conn.write_message(message)\n except Exception: # pragma: no cover\n logging.error(\"Error sending message\", exc_info=True)\n\n\nclass ClientConnection(WebSocketEventBroadcaster):\n connections: set = set()\n\n\nclass Flows(RequestHandler):\n def get(self):\n self.write([flow_to_json(f) for f in self.view])\n\n\nclass DumpFlows(RequestHandler):\n def get(self):\n self.set_header(\"Content-Disposition\", \"attachment; filename=flows\")\n self.set_header(\"Content-Type\", \"application/octet-stream\")\n\n bio = BytesIO()\n fw = io.FlowWriter(bio)\n for f in self.view:\n fw.add(f)\n\n self.write(bio.getvalue())\n bio.close()\n\n def post(self):\n self.view.clear()\n bio = BytesIO(self.filecontents)\n for i in io.FlowReader(bio).stream():\n asyncio.call_soon(self.master.load_flow, i)\n bio.close()\n\n\nclass ClearAll(RequestHandler):\n def post(self):\n self.view.clear()\n self.master.events.clear()\n\n\nclass ResumeFlows(RequestHandler):\n def post(self):\n for f in self.view:\n f.resume()\n self.view.update([f])\n\n\nclass KillFlows(RequestHandler):\n def post(self):\n for f in self.view:\n if f.killable:\n f.kill()\n self.view.update([f])\n\n\nclass ResumeFlow(RequestHandler):\n def post(self, flow_id):\n self.flow.resume()\n self.view.update([self.flow])\n\n\nclass KillFlow(RequestHandler):\n def post(self, flow_id):\n if self.flow.killable:\n self.flow.kill()\n self.view.update([self.flow])\n\n\nclass FlowHandler(RequestHandler):\n def delete(self, flow_id):\n if self.flow.killable:\n self.flow.kill()\n self.view.remove([self.flow])\n\n def put(self, flow_id):\n flow = self.flow\n flow.backup()\n try:\n for a, b in self.json.items():\n if a == \"request\" and hasattr(flow, \"request\"):\n request = flow.request\n for k, v in b.items():\n if k in [\"method\", \"scheme\", \"host\", \"path\", \"http_version\"]:\n setattr(request, k, str(v))\n elif k == \"port\":\n request.port = int(v)\n elif k == \"headers\":\n request.headers.clear()\n for header in v:\n request.headers.add(*header)\n elif k == \"content\":\n request.text = v\n else:\n raise APIError(400, \"Unknown update request.{}: {}\".format(k, v))\n\n elif a == \"response\" and hasattr(flow, \"response\"):\n response = flow.response\n for k, v in b.items():\n if k in [\"msg\", \"http_version\"]:\n setattr(response, k, str(v))\n elif k == \"code\":\n response.status_code = int(v)\n elif k == \"headers\":\n response.headers.clear()\n for header in v:\n response.headers.add(*header)\n elif k == \"content\":\n response.text = v\n else:\n raise APIError(400, \"Unknown update response.{}: {}\".format(k, v))\n else:\n raise APIError(400, \"Unknown update {}: {}\".format(a, b))\n except APIError:\n flow.revert()\n raise\n self.view.update([flow])\n\n\nclass DuplicateFlow(RequestHandler):\n def post(self, flow_id):\n f = self.flow.copy()\n self.view.add([f])\n self.write(f.id)\n\n\nclass RevertFlow(RequestHandler):\n def post(self, flow_id):\n if self.flow.modified():\n self.flow.revert()\n self.view.update([self.flow])\n\n\nclass ReplayFlow(RequestHandler):\n def post(self, flow_id):\n self.flow.backup()\n self.flow.response = None\n self.view.update([self.flow])\n\n try:\n self.master.replay_request(self.flow)\n except exceptions.ReplayException as e:\n raise APIError(400, str(e))\n\n\nclass FlowContent(RequestHandler):\n def post(self, flow_id, message):\n self.flow.backup()\n message = getattr(self.flow, message)\n message.content = self.filecontents\n self.view.update([self.flow])\n\n def get(self, flow_id, message):\n message = getattr(self.flow, message)\n\n if not message.raw_content:\n raise APIError(400, \"No content.\")\n\n content_encoding = message.headers.get(\"Content-Encoding\", None)\n if content_encoding:\n content_encoding = re.sub(r\"[^\\w]\", \"\", content_encoding)\n self.set_header(\"Content-Encoding\", content_encoding)\n\n original_cd = message.headers.get(\"Content-Disposition\", None)\n filename = None\n if original_cd:\n filename = re.search('filename=([-\\w\" .()]+)', original_cd)\n if filename:\n filename = filename.group(1)\n if not filename:\n filename = self.flow.request.path.split(\"?\")[0].split(\"/\")[-1]\n\n filename = re.sub(r'[^-\\w\" .()]', \"\", filename)\n cd = \"attachment; filename={}\".format(filename)\n self.set_header(\"Content-Disposition\", cd)\n self.set_header(\"Content-Type\", \"application/text\")\n self.set_header(\"X-Content-Type-Options\", \"nosniff\")\n self.set_header(\"X-Frame-Options\", \"DENY\")\n self.write(message.raw_content)\n\n\nclass FlowContentView(RequestHandler):\n def get(self, flow_id, message, content_view):\n message = getattr(self.flow, message)\n\n description, lines, error = contentviews.get_message_content_view(\n content_view.replace('_', ' '), message\n )\n # if error:\n # add event log\n\n self.write(dict(\n lines=list(lines),\n description=description\n ))\n\n\nclass Events(RequestHandler):\n def get(self):\n self.write([logentry_to_json(e) for e in self.master.events.data])\n\n\nclass Settings(RequestHandler):\n def get(self):\n self.write(dict(\n version=version.VERSION,\n mode=str(self.master.options.mode),\n intercept_active=self.master.options.intercept_active,\n intercept=self.master.options.intercept,\n showhost=self.master.options.showhost,\n upstream_cert=self.master.options.upstream_cert,\n rawtcp=self.master.options.rawtcp,\n http2=self.master.options.http2,\n websocket=self.master.options.websocket,\n anticache=self.master.options.anticache,\n anticomp=self.master.options.anticomp,\n stickyauth=self.master.options.stickyauth,\n stickycookie=self.master.options.stickycookie,\n stream=self.master.options.stream_large_bodies,\n contentViews=[v.name.replace(' ', '_') for v in contentviews.views],\n listen_host=self.master.options.listen_host,\n listen_port=self.master.options.listen_port,\n server=self.master.options.server,\n ))\n\n def put(self):\n update = self.json\n option_whitelist = {\n \"intercept\", \"showhost\", \"upstream_cert\",\n \"rawtcp\", \"http2\", \"websocket\", \"anticache\", \"anticomp\",\n \"stickycookie\", \"stickyauth\", \"stream_large_bodies\"\n }\n for k in update:\n if k not in option_whitelist:\n raise APIError(400, \"Unknown setting {}\".format(k))\n self.master.options.update(**update)\n\n\nclass Options(RequestHandler):\n def get(self):\n self.write(optmanager.dump_dicts(self.master.options))\n\n def put(self):\n update = self.json\n try:\n self.master.options.update(**update)\n except Exception as err:\n raise APIError(400, \"{}\".format(err))\n\n\nclass SaveOptions(RequestHandler):\n def post(self):\n # try:\n # optmanager.save(self.master.options, CONFIG_PATH, True)\n # except Exception as err:\n # raise APIError(400, \"{}\".format(err))\n pass\n\n\nclass Application(tornado.web.Application):\n def __init__(self, master, debug):\n self.master = master\n handlers = [\n (r\"/\", IndexHandler),\n (r\"/filter-help(?:\\.json)?\", FilterHelp),\n (r\"/updates\", ClientConnection),\n (r\"/events(?:\\.json)?\", Events),\n (r\"/flows(?:\\.json)?\", Flows),\n (r\"/flows/dump\", DumpFlows),\n (r\"/flows/resume\", ResumeFlows),\n (r\"/flows/kill\", KillFlows),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)\", FlowHandler),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/resume\", ResumeFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/kill\", KillFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/duplicate\", DuplicateFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/replay\", ReplayFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/revert\", RevertFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/(?P<message>request|response)/content.data\", FlowContent),\n (\n r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\\-\\_]+)(?:\\.json)?\",\n FlowContentView),\n (r\"/settings(?:\\.json)?\", Settings),\n (r\"/clear\", ClearAll),\n (r\"/options(?:\\.json)?\", Options),\n (r\"/options/save\", SaveOptions)\n ]\n settings = dict(\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=True,\n cookie_secret=os.urandom(256),\n debug=debug,\n autoreload=False,\n )\n super().__init__(handlers, **settings)\n", "path": "mitmproxy/tools/web/app.py" } ]
[ { "content": "import hashlib\nimport json\nimport logging\nimport os.path\nimport re\nfrom io import BytesIO\nimport asyncio\n\nimport mitmproxy.flow\nimport tornado.escape\nimport tornado.web\nimport tornado.websocket\nfrom mitmproxy import contentviews\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import http\nfrom mitmproxy import io\nfrom mitmproxy import log\nfrom mitmproxy import version\nfrom mitmproxy import optmanager\nimport mitmproxy.tools.web.master # noqa\n\n\ndef flow_to_json(flow: mitmproxy.flow.Flow) -> dict:\n \"\"\"\n Remove flow message content and cert to save transmission space.\n\n Args:\n flow: The original flow.\n \"\"\"\n f = {\n \"id\": flow.id,\n \"intercepted\": flow.intercepted,\n \"client_conn\": flow.client_conn.get_state(),\n \"server_conn\": flow.server_conn.get_state(),\n \"type\": flow.type,\n \"modified\": flow.modified(),\n \"marked\": flow.marked,\n }\n # .alpn_proto_negotiated is bytes, we need to decode that.\n for conn in \"client_conn\", \"server_conn\":\n if f[conn][\"alpn_proto_negotiated\"] is None:\n continue\n f[conn][\"alpn_proto_negotiated\"] = \\\n f[conn][\"alpn_proto_negotiated\"].decode(errors=\"backslashreplace\")\n # There are some bytes in here as well, let's skip it until we have them in the UI.\n f[\"client_conn\"].pop(\"tls_extensions\", None)\n if flow.error:\n f[\"error\"] = flow.error.get_state()\n\n if isinstance(flow, http.HTTPFlow):\n if flow.request:\n if flow.request.raw_content:\n content_length = len(flow.request.raw_content)\n content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()\n else:\n content_length = None\n content_hash = None\n f[\"request\"] = {\n \"method\": flow.request.method,\n \"scheme\": flow.request.scheme,\n \"host\": flow.request.host,\n \"port\": flow.request.port,\n \"path\": flow.request.path,\n \"http_version\": flow.request.http_version,\n \"headers\": tuple(flow.request.headers.items(True)),\n \"contentLength\": content_length,\n \"contentHash\": content_hash,\n \"timestamp_start\": flow.request.timestamp_start,\n \"timestamp_end\": flow.request.timestamp_end,\n \"is_replay\": flow.request.is_replay,\n \"pretty_host\": flow.request.pretty_host,\n }\n if flow.response:\n if flow.response.raw_content:\n content_length = len(flow.response.raw_content)\n content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()\n else:\n content_length = None\n content_hash = None\n f[\"response\"] = {\n \"http_version\": flow.response.http_version,\n \"status_code\": flow.response.status_code,\n \"reason\": flow.response.reason,\n \"headers\": tuple(flow.response.headers.items(True)),\n \"contentLength\": content_length,\n \"contentHash\": content_hash,\n \"timestamp_start\": flow.response.timestamp_start,\n \"timestamp_end\": flow.response.timestamp_end,\n \"is_replay\": flow.response.is_replay,\n }\n f.get(\"server_conn\", {}).pop(\"cert\", None)\n f.get(\"client_conn\", {}).pop(\"mitmcert\", None)\n\n return f\n\n\ndef logentry_to_json(e: log.LogEntry) -> dict:\n return {\n \"id\": id(e), # we just need some kind of id.\n \"message\": e.msg,\n \"level\": e.level\n }\n\n\nclass APIError(tornado.web.HTTPError):\n pass\n\n\nclass RequestHandler(tornado.web.RequestHandler):\n def write(self, chunk):\n # Writing arrays on the top level is ok nowadays.\n # http://flask.pocoo.org/docs/0.11/security/#json-security\n if isinstance(chunk, list):\n chunk = tornado.escape.json_encode(chunk)\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n super(RequestHandler, self).write(chunk)\n\n def set_default_headers(self):\n super().set_default_headers()\n self.set_header(\"Server\", version.MITMPROXY)\n self.set_header(\"X-Frame-Options\", \"DENY\")\n self.add_header(\"X-XSS-Protection\", \"1; mode=block\")\n self.add_header(\"X-Content-Type-Options\", \"nosniff\")\n self.add_header(\n \"Content-Security-Policy\",\n \"default-src 'self'; \"\n \"connect-src 'self' ws:; \"\n \"style-src 'self' 'unsafe-inline'\"\n )\n\n @property\n def json(self):\n if not self.request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n raise APIError(400, \"Invalid Content-Type, expected application/json.\")\n try:\n return json.loads(self.request.body.decode())\n except Exception as e:\n raise APIError(400, \"Malformed JSON: {}\".format(str(e)))\n\n @property\n def filecontents(self):\n \"\"\"\n Accept either a multipart/form file upload or just take the plain request body.\n\n \"\"\"\n if self.request.files:\n return next(iter(self.request.files.values()))[0].body\n else:\n return self.request.body\n\n @property\n def view(self) -> \"mitmproxy.addons.view.View\":\n return self.application.master.view\n\n @property\n def master(self) -> \"mitmproxy.tools.web.master.WebMaster\":\n return self.application.master\n\n @property\n def flow(self) -> mitmproxy.flow.Flow:\n flow_id = str(self.path_kwargs[\"flow_id\"])\n # FIXME: Add a facility to addon.view to safely access the store\n flow = self.view.get_by_id(flow_id)\n if flow:\n return flow\n else:\n raise APIError(404, \"Flow not found.\")\n\n def write_error(self, status_code: int, **kwargs):\n if \"exc_info\" in kwargs and isinstance(kwargs[\"exc_info\"][1], APIError):\n self.finish(kwargs[\"exc_info\"][1].log_message)\n else:\n super().write_error(status_code, **kwargs)\n\n\nclass IndexHandler(RequestHandler):\n def get(self):\n token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645\n assert token\n self.render(\"index.html\")\n\n\nclass FilterHelp(RequestHandler):\n def get(self):\n self.write(dict(\n commands=flowfilter.help\n ))\n\n\nclass WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):\n # raise an error if inherited class doesn't specify its own instance.\n connections: set = None\n\n def open(self):\n self.connections.add(self)\n\n def on_close(self):\n self.connections.remove(self)\n\n @classmethod\n def broadcast(cls, **kwargs):\n message = json.dumps(kwargs, ensure_ascii=False).encode(\"utf8\", \"surrogateescape\")\n\n for conn in cls.connections:\n try:\n conn.write_message(message)\n except Exception: # pragma: no cover\n logging.error(\"Error sending message\", exc_info=True)\n\n\nclass ClientConnection(WebSocketEventBroadcaster):\n connections: set = set()\n\n\nclass Flows(RequestHandler):\n def get(self):\n self.write([flow_to_json(f) for f in self.view])\n\n\nclass DumpFlows(RequestHandler):\n def get(self):\n self.set_header(\"Content-Disposition\", \"attachment; filename=flows\")\n self.set_header(\"Content-Type\", \"application/octet-stream\")\n\n bio = BytesIO()\n fw = io.FlowWriter(bio)\n for f in self.view:\n fw.add(f)\n\n self.write(bio.getvalue())\n bio.close()\n\n def post(self):\n self.view.clear()\n bio = BytesIO(self.filecontents)\n for i in io.FlowReader(bio).stream():\n asyncio.ensure_future(self.master.load_flow(i))\n bio.close()\n\n\nclass ClearAll(RequestHandler):\n def post(self):\n self.view.clear()\n self.master.events.clear()\n\n\nclass ResumeFlows(RequestHandler):\n def post(self):\n for f in self.view:\n f.resume()\n self.view.update([f])\n\n\nclass KillFlows(RequestHandler):\n def post(self):\n for f in self.view:\n if f.killable:\n f.kill()\n self.view.update([f])\n\n\nclass ResumeFlow(RequestHandler):\n def post(self, flow_id):\n self.flow.resume()\n self.view.update([self.flow])\n\n\nclass KillFlow(RequestHandler):\n def post(self, flow_id):\n if self.flow.killable:\n self.flow.kill()\n self.view.update([self.flow])\n\n\nclass FlowHandler(RequestHandler):\n def delete(self, flow_id):\n if self.flow.killable:\n self.flow.kill()\n self.view.remove([self.flow])\n\n def put(self, flow_id):\n flow = self.flow\n flow.backup()\n try:\n for a, b in self.json.items():\n if a == \"request\" and hasattr(flow, \"request\"):\n request = flow.request\n for k, v in b.items():\n if k in [\"method\", \"scheme\", \"host\", \"path\", \"http_version\"]:\n setattr(request, k, str(v))\n elif k == \"port\":\n request.port = int(v)\n elif k == \"headers\":\n request.headers.clear()\n for header in v:\n request.headers.add(*header)\n elif k == \"content\":\n request.text = v\n else:\n raise APIError(400, \"Unknown update request.{}: {}\".format(k, v))\n\n elif a == \"response\" and hasattr(flow, \"response\"):\n response = flow.response\n for k, v in b.items():\n if k in [\"msg\", \"http_version\"]:\n setattr(response, k, str(v))\n elif k == \"code\":\n response.status_code = int(v)\n elif k == \"headers\":\n response.headers.clear()\n for header in v:\n response.headers.add(*header)\n elif k == \"content\":\n response.text = v\n else:\n raise APIError(400, \"Unknown update response.{}: {}\".format(k, v))\n else:\n raise APIError(400, \"Unknown update {}: {}\".format(a, b))\n except APIError:\n flow.revert()\n raise\n self.view.update([flow])\n\n\nclass DuplicateFlow(RequestHandler):\n def post(self, flow_id):\n f = self.flow.copy()\n self.view.add([f])\n self.write(f.id)\n\n\nclass RevertFlow(RequestHandler):\n def post(self, flow_id):\n if self.flow.modified():\n self.flow.revert()\n self.view.update([self.flow])\n\n\nclass ReplayFlow(RequestHandler):\n def post(self, flow_id):\n self.flow.backup()\n self.flow.response = None\n self.view.update([self.flow])\n\n try:\n self.master.replay_request(self.flow)\n except exceptions.ReplayException as e:\n raise APIError(400, str(e))\n\n\nclass FlowContent(RequestHandler):\n def post(self, flow_id, message):\n self.flow.backup()\n message = getattr(self.flow, message)\n message.content = self.filecontents\n self.view.update([self.flow])\n\n def get(self, flow_id, message):\n message = getattr(self.flow, message)\n\n if not message.raw_content:\n raise APIError(400, \"No content.\")\n\n content_encoding = message.headers.get(\"Content-Encoding\", None)\n if content_encoding:\n content_encoding = re.sub(r\"[^\\w]\", \"\", content_encoding)\n self.set_header(\"Content-Encoding\", content_encoding)\n\n original_cd = message.headers.get(\"Content-Disposition\", None)\n filename = None\n if original_cd:\n filename = re.search('filename=([-\\w\" .()]+)', original_cd)\n if filename:\n filename = filename.group(1)\n if not filename:\n filename = self.flow.request.path.split(\"?\")[0].split(\"/\")[-1]\n\n filename = re.sub(r'[^-\\w\" .()]', \"\", filename)\n cd = \"attachment; filename={}\".format(filename)\n self.set_header(\"Content-Disposition\", cd)\n self.set_header(\"Content-Type\", \"application/text\")\n self.set_header(\"X-Content-Type-Options\", \"nosniff\")\n self.set_header(\"X-Frame-Options\", \"DENY\")\n self.write(message.raw_content)\n\n\nclass FlowContentView(RequestHandler):\n def get(self, flow_id, message, content_view):\n message = getattr(self.flow, message)\n\n description, lines, error = contentviews.get_message_content_view(\n content_view.replace('_', ' '), message\n )\n # if error:\n # add event log\n\n self.write(dict(\n lines=list(lines),\n description=description\n ))\n\n\nclass Events(RequestHandler):\n def get(self):\n self.write([logentry_to_json(e) for e in self.master.events.data])\n\n\nclass Settings(RequestHandler):\n def get(self):\n self.write(dict(\n version=version.VERSION,\n mode=str(self.master.options.mode),\n intercept_active=self.master.options.intercept_active,\n intercept=self.master.options.intercept,\n showhost=self.master.options.showhost,\n upstream_cert=self.master.options.upstream_cert,\n rawtcp=self.master.options.rawtcp,\n http2=self.master.options.http2,\n websocket=self.master.options.websocket,\n anticache=self.master.options.anticache,\n anticomp=self.master.options.anticomp,\n stickyauth=self.master.options.stickyauth,\n stickycookie=self.master.options.stickycookie,\n stream=self.master.options.stream_large_bodies,\n contentViews=[v.name.replace(' ', '_') for v in contentviews.views],\n listen_host=self.master.options.listen_host,\n listen_port=self.master.options.listen_port,\n server=self.master.options.server,\n ))\n\n def put(self):\n update = self.json\n option_whitelist = {\n \"intercept\", \"showhost\", \"upstream_cert\",\n \"rawtcp\", \"http2\", \"websocket\", \"anticache\", \"anticomp\",\n \"stickycookie\", \"stickyauth\", \"stream_large_bodies\"\n }\n for k in update:\n if k not in option_whitelist:\n raise APIError(400, \"Unknown setting {}\".format(k))\n self.master.options.update(**update)\n\n\nclass Options(RequestHandler):\n def get(self):\n self.write(optmanager.dump_dicts(self.master.options))\n\n def put(self):\n update = self.json\n try:\n self.master.options.update(**update)\n except Exception as err:\n raise APIError(400, \"{}\".format(err))\n\n\nclass SaveOptions(RequestHandler):\n def post(self):\n # try:\n # optmanager.save(self.master.options, CONFIG_PATH, True)\n # except Exception as err:\n # raise APIError(400, \"{}\".format(err))\n pass\n\n\nclass Application(tornado.web.Application):\n def __init__(self, master, debug):\n self.master = master\n handlers = [\n (r\"/\", IndexHandler),\n (r\"/filter-help(?:\\.json)?\", FilterHelp),\n (r\"/updates\", ClientConnection),\n (r\"/events(?:\\.json)?\", Events),\n (r\"/flows(?:\\.json)?\", Flows),\n (r\"/flows/dump\", DumpFlows),\n (r\"/flows/resume\", ResumeFlows),\n (r\"/flows/kill\", KillFlows),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)\", FlowHandler),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/resume\", ResumeFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/kill\", KillFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/duplicate\", DuplicateFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/replay\", ReplayFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/revert\", RevertFlow),\n (r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/(?P<message>request|response)/content.data\", FlowContent),\n (\n r\"/flows/(?P<flow_id>[0-9a-f\\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\\-\\_]+)(?:\\.json)?\",\n FlowContentView),\n (r\"/settings(?:\\.json)?\", Settings),\n (r\"/clear\", ClearAll),\n (r\"/options(?:\\.json)?\", Options),\n (r\"/options/save\", SaveOptions)\n ]\n settings = dict(\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=True,\n cookie_secret=os.urandom(256),\n debug=debug,\n autoreload=False,\n )\n super().__init__(handlers, **settings)\n", "path": "mitmproxy/tools/web/app.py" } ]
diff --git a/mitmproxy/tools/web/app.py b/mitmproxy/tools/web/app.py index 61e30a2138..184778b084 100644 --- a/mitmproxy/tools/web/app.py +++ b/mitmproxy/tools/web/app.py @@ -235,7 +235,7 @@ def post(self): self.view.clear() bio = BytesIO(self.filecontents) for i in io.FlowReader(bio).stream(): - asyncio.call_soon(self.master.load_flow, i) + asyncio.ensure_future(self.master.load_flow(i)) bio.close()
ansible__molecule-1615
CMD in pre-built docker image replaced with `while true; do sleep 10000; done` # Issue Type - Bug report Was kind of reported in #1441 # Molecule and Ansible details ``` ansible 2.7.2 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/fabian/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /home/fabian/.local/lib/python2.7/site-packages/ansible executable location = /home/fabian/.local/bin/ansible python version = 2.7.15 (default, May 15 2018, 15:37:31) [GCC 7.3.1 20180303 (Red Hat 7.3.1-5)] molecule, version 2.19.1.dev54 ``` Molecule installation method (one of): - source Ansible installation method (one of): - pip # Desired Behavior If I don't specify a `command` or specify `null` as the value for `command`, the existing `CMD` on the image should be used. # Actual Behaviour When consuming an image with the docker driver, the existing `CMD` on the image is overwritten, even if `command` is unspecified or null. In order to get the behavior I desire from molecule, I currently need to copy-paste the content of the `CMD` directive into the `command` section of the `molecule.yml`, but this is inherently fragile as every time the container used changes its `CMD` I would need to reflect that in every related molecule specification.
[ { "content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport copy\nimport functools\nimport re\n\nimport cerberus\nimport cerberus.errors\n\nfrom molecule import interpolation\nfrom molecule import util\n\n\ndef coerce_env(env, keep_string, v):\n i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env)\n\n return i.interpolate(v, keep_string)\n\n\ndef pre_validate_base_schema(env, keep_string):\n return {\n 'dependency': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'galaxy',\n 'gilt',\n 'shell',\n ],\n },\n }\n },\n 'driver': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type':\n 'string',\n 'molecule_env_var':\n True,\n 'allowed': [\n 'azure',\n 'delegated',\n 'docker',\n 'ec2',\n 'gce',\n 'lxc',\n 'lxd',\n 'openstack',\n 'vagrant',\n ],\n # NOTE(retr0h): Some users use an environment variable to\n # change the driver name. May add this coercion to rest of\n # config using allowed validation.\n 'coerce': (str,\n functools.partial(coerce_env, env, keep_string))\n },\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'yamllint',\n ],\n },\n }\n },\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'registry': {\n 'type': 'dict',\n 'schema': {\n 'credentials': {\n 'type': 'dict',\n 'schema': {\n 'password': {\n 'type': 'string',\n 'regex': '^[{$]+[a-z0-9A-Z]+[}]*$',\n },\n }\n },\n }\n },\n }\n }\n },\n 'provisioner': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'ansible',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'ansible-lint',\n ],\n },\n }\n },\n }\n },\n 'scenario': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n },\n }\n },\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'testinfra',\n 'inspec',\n 'goss',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'flake8',\n 'rubocop',\n 'yamllint',\n ],\n },\n }\n },\n }\n },\n }\n\n\nbase_schema = {\n 'dependency': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n 'command': {\n 'type': 'string',\n 'nullable': True,\n },\n }\n },\n 'driver': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'provider': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'nullable': True,\n },\n }\n },\n 'options': {\n 'type': 'dict',\n 'schema': {\n 'managed': {\n 'type': 'boolean',\n },\n }\n },\n 'ssh_connection_options': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'safe_files': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n }\n },\n 'platforms': {},\n 'provisioner': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'log': {\n 'type': 'boolean',\n },\n 'config_options': {\n 'type': 'dict',\n 'schema': {\n 'defaults': {\n 'type': 'dict',\n 'schema': {\n 'roles_path': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'library': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'filter_plugins': {\n 'type': 'string',\n 'disallowed': True,\n },\n }\n },\n 'privilege_escalation': {\n 'type': 'dict',\n 'disallowed': True,\n },\n }\n },\n 'connection_options': {\n 'type': 'dict',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n 'valueschema': {\n 'nullable': False,\n },\n 'schema': {\n 'ANSIBLE_BECOME': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'ANSIBLE_BECOME_METHOD': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'ANSIBLE_BECOME_USER': {\n 'type': 'string',\n 'disallowed': True,\n },\n }\n },\n 'inventory': {\n 'type': 'dict',\n 'schema': {\n 'host_vars': {\n 'type': 'dict',\n },\n 'group_vars': {\n 'type': 'dict',\n },\n 'links': {\n 'type': 'dict',\n },\n }\n },\n 'children': {\n 'type': 'dict',\n },\n 'playbooks': {\n 'type': 'dict',\n 'schema': {\n 'create': {\n 'type': 'string',\n },\n 'converge': {\n 'type': 'string',\n },\n 'destroy': {\n 'type': 'string',\n },\n 'prepare': {\n 'type': 'string',\n },\n 'side_effect': {\n 'type': 'string',\n },\n 'verify': {\n 'type': 'string',\n },\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n }\n },\n }\n },\n 'scenario': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'check_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'converge_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'create_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'destroy_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'test_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n }\n },\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n 'directory': {\n 'type': 'string',\n },\n 'additional_files_or_dirs': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n }\n },\n }\n },\n}\n\ndriver_vagrant_provider_section_schema = {\n 'driver': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'provider': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type':\n 'string',\n 'nullable':\n False,\n 'allowed': [\n 'virtualbox',\n 'vmware_fusion',\n 'vmware_workstation',\n 'vmware_desktop',\n 'parallels',\n 'libvirt',\n ],\n },\n }\n },\n }\n },\n}\n\nplatforms_base_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'required': True,\n },\n 'groups': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'children': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n }\n }\n },\n}\n\nplatforms_vagrant_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'interfaces': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n }\n },\n 'instance_raw_config_args': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'config_options': {\n 'type': 'dict',\n },\n 'box': {\n 'type': 'string',\n },\n 'box_version': {\n 'type': 'string',\n },\n 'box_url': {\n 'type': 'string',\n },\n 'memory': {\n 'type': 'integer',\n },\n 'cpus': {\n 'type': 'integer',\n },\n 'provider_options': {\n 'type': 'dict',\n },\n 'provider_raw_config_args': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'provision': {\n 'type': 'boolean',\n },\n }\n }\n },\n}\n\nplatforms_docker_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'hostname': {\n 'type': 'string',\n },\n 'image': {\n 'type': 'string',\n },\n 'pull': {\n 'type': 'boolean',\n },\n 'pre_build_image': {\n 'type': 'boolean',\n },\n 'registry': {\n 'type': 'dict',\n 'schema': {\n 'url': {\n 'type': 'string',\n },\n 'credentials': {\n 'type': 'dict',\n 'schema': {\n 'username': {\n 'type': 'string',\n },\n 'password': {\n 'type': 'string',\n },\n 'email': {\n 'type': 'string',\n },\n }\n },\n }\n },\n 'command': {\n 'type': 'string',\n },\n 'privileged': {\n 'type': 'boolean',\n },\n 'security_opts': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'volumes': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'tmpfs': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'capabilities': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'exposed_ports': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'published_ports': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'ulimits': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'dns_servers': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[a-zA-Z0-9_-]+$',\n }\n },\n 'restart_policy': {\n 'type': 'string',\n },\n 'restart_retries': {\n 'type': 'integer',\n },\n 'networks': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n }\n }\n },\n 'network_mode': {\n 'type': 'string',\n },\n }\n }\n },\n}\n\nplatforms_lxd_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'url': {\n 'type': 'string',\n },\n 'cert_file': {\n 'type': 'string',\n },\n 'key_file': {\n 'type': 'string',\n },\n 'trust_password': {\n 'type': 'string',\n },\n 'source': {\n 'type': 'dict',\n 'schema': {\n 'type': {\n 'type': 'string',\n },\n 'mode': {\n 'type': 'string',\n 'allowed': [\n 'pull',\n 'local',\n ],\n },\n 'server': {\n 'type': 'string',\n },\n 'protocol': {\n 'type': 'string',\n 'allowed': [\n 'lxd',\n 'simplestreams',\n ],\n },\n 'alias': {\n 'type': 'string',\n },\n },\n },\n 'architecture': {\n 'type': 'string',\n 'allowed': [\n 'x86_64',\n 'i686',\n ],\n },\n 'config': {\n 'type': 'dict',\n 'allow_unknown': True,\n },\n 'devices': {\n 'type': 'dict',\n 'allow_unknown': True,\n },\n 'profiles': {\n 'type': 'list',\n 'schema': {\n 'type': 'string'\n }\n },\n 'force_stop': {\n 'type': 'boolean',\n },\n }\n }\n },\n}\n\ndependency_command_nullable_schema = {\n 'dependency': {\n 'type': 'dict',\n 'schema': {\n 'command': {\n 'type': 'string',\n 'nullable': False,\n },\n }\n },\n}\n\nverifier_options_readonly_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'options': {\n 'keyschema': {\n 'readonly': True,\n },\n },\n }\n },\n}\n\nverifier_goss_mutually_exclusive_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'goss',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'yamllint',\n ],\n },\n }\n },\n }\n },\n}\n\nverifier_inspec_mutually_exclusive_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'inspec',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'rubocop',\n ],\n },\n }\n },\n }\n },\n}\nverifier_testinfra_mutually_exclusive_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'testinfra',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'flake8',\n ],\n },\n }\n },\n }\n },\n}\n\n\nclass Validator(cerberus.Validator):\n def __init__(self, *args, **kwargs):\n super(Validator, self).__init__(*args, **kwargs)\n\n def _validate_disallowed(self, disallowed, field, value):\n \"\"\" Readonly but with a custom error.\n\n The rule's arguments are validated against this schema:\n {'type': 'boolean'}\n \"\"\"\n if disallowed:\n msg = 'disallowed user provided config option'\n self._error(field, msg)\n\n def _validate_molecule_env_var(self, molecule_env_var, field, value):\n \"\"\" Readonly but with a custom error.\n\n The rule's arguments are validated against this schema:\n {'type': 'boolean'}\n \"\"\"\n # TODO(retr0h): This needs to be better handled.\n pattern = r'^[{$]+MOLECULE[_a-z0-9A-Z]+[}]*$'\n\n if molecule_env_var:\n if re.match(pattern, value):\n msg = ('cannot reference $MOLECULE special variables '\n 'in this section')\n self._error(field, msg)\n\n\ndef pre_validate(stream, env, keep_string):\n data = util.safe_load(stream)\n\n v = Validator(allow_unknown=True)\n v.validate(data, pre_validate_base_schema(env, keep_string))\n\n return v.errors\n\n\ndef validate(c):\n schema = copy.deepcopy(base_schema)\n\n # Dependency\n if c['dependency']['name'] == 'shell':\n util.merge_dicts(schema, dependency_command_nullable_schema)\n\n # Driver\n util.merge_dicts(schema, platforms_base_schema)\n if c['driver']['name'] == 'docker':\n util.merge_dicts(schema, platforms_docker_schema)\n elif c['driver']['name'] == 'vagrant':\n util.merge_dicts(schema, driver_vagrant_provider_section_schema)\n util.merge_dicts(schema, platforms_vagrant_schema)\n elif c['driver']['name'] == 'lxd':\n util.merge_dicts(schema, platforms_lxd_schema)\n else:\n util.merge_dicts(schema, platforms_base_schema)\n\n # Verifier\n if c['verifier']['name'] == 'goss':\n util.merge_dicts(schema, verifier_options_readonly_schema)\n util.merge_dicts(schema, verifier_goss_mutually_exclusive_schema)\n elif c['verifier']['name'] == 'inspec':\n util.merge_dicts(schema, verifier_options_readonly_schema)\n util.merge_dicts(schema, verifier_inspec_mutually_exclusive_schema)\n elif c['verifier']['name'] == 'testinfra':\n util.merge_dicts(schema, verifier_testinfra_mutually_exclusive_schema)\n\n v = Validator(allow_unknown=True)\n v.validate(c, schema)\n\n return v.errors\n", "path": "molecule/model/schema_v2.py" } ]
[ { "content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport copy\nimport functools\nimport re\n\nimport cerberus\nimport cerberus.errors\n\nfrom molecule import interpolation\nfrom molecule import util\n\n\ndef coerce_env(env, keep_string, v):\n i = interpolation.Interpolator(interpolation.TemplateWithDefaults, env)\n\n return i.interpolate(v, keep_string)\n\n\ndef pre_validate_base_schema(env, keep_string):\n return {\n 'dependency': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'galaxy',\n 'gilt',\n 'shell',\n ],\n },\n }\n },\n 'driver': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type':\n 'string',\n 'molecule_env_var':\n True,\n 'allowed': [\n 'azure',\n 'delegated',\n 'docker',\n 'ec2',\n 'gce',\n 'lxc',\n 'lxd',\n 'openstack',\n 'vagrant',\n ],\n # NOTE(retr0h): Some users use an environment variable to\n # change the driver name. May add this coercion to rest of\n # config using allowed validation.\n 'coerce': (str,\n functools.partial(coerce_env, env, keep_string))\n },\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'yamllint',\n ],\n },\n }\n },\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'registry': {\n 'type': 'dict',\n 'schema': {\n 'credentials': {\n 'type': 'dict',\n 'schema': {\n 'password': {\n 'type': 'string',\n 'regex': '^[{$]+[a-z0-9A-Z]+[}]*$',\n },\n }\n },\n }\n },\n }\n }\n },\n 'provisioner': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'ansible',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'ansible-lint',\n ],\n },\n }\n },\n }\n },\n 'scenario': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n },\n }\n },\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'testinfra',\n 'inspec',\n 'goss',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'molecule_env_var': True,\n 'allowed': [\n 'flake8',\n 'rubocop',\n 'yamllint',\n ],\n },\n }\n },\n }\n },\n }\n\n\nbase_schema = {\n 'dependency': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n 'command': {\n 'type': 'string',\n 'nullable': True,\n },\n }\n },\n 'driver': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'provider': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'nullable': True,\n },\n }\n },\n 'options': {\n 'type': 'dict',\n 'schema': {\n 'managed': {\n 'type': 'boolean',\n },\n }\n },\n 'ssh_connection_options': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'safe_files': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n }\n },\n 'platforms': {},\n 'provisioner': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'log': {\n 'type': 'boolean',\n },\n 'config_options': {\n 'type': 'dict',\n 'schema': {\n 'defaults': {\n 'type': 'dict',\n 'schema': {\n 'roles_path': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'library': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'filter_plugins': {\n 'type': 'string',\n 'disallowed': True,\n },\n }\n },\n 'privilege_escalation': {\n 'type': 'dict',\n 'disallowed': True,\n },\n }\n },\n 'connection_options': {\n 'type': 'dict',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n 'valueschema': {\n 'nullable': False,\n },\n 'schema': {\n 'ANSIBLE_BECOME': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'ANSIBLE_BECOME_METHOD': {\n 'type': 'string',\n 'disallowed': True,\n },\n 'ANSIBLE_BECOME_USER': {\n 'type': 'string',\n 'disallowed': True,\n },\n }\n },\n 'inventory': {\n 'type': 'dict',\n 'schema': {\n 'host_vars': {\n 'type': 'dict',\n },\n 'group_vars': {\n 'type': 'dict',\n },\n 'links': {\n 'type': 'dict',\n },\n }\n },\n 'children': {\n 'type': 'dict',\n },\n 'playbooks': {\n 'type': 'dict',\n 'schema': {\n 'create': {\n 'type': 'string',\n },\n 'converge': {\n 'type': 'string',\n },\n 'destroy': {\n 'type': 'string',\n },\n 'prepare': {\n 'type': 'string',\n },\n 'side_effect': {\n 'type': 'string',\n },\n 'verify': {\n 'type': 'string',\n },\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n }\n },\n }\n },\n 'scenario': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'check_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'converge_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'create_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'destroy_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'test_sequence': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n }\n },\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n 'directory': {\n 'type': 'string',\n },\n 'additional_files_or_dirs': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'enabled': {\n 'type': 'boolean',\n },\n 'options': {\n 'type': 'dict',\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[A-Z0-9_-]+$',\n },\n },\n }\n },\n }\n },\n}\n\ndriver_vagrant_provider_section_schema = {\n 'driver': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'provider': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type':\n 'string',\n 'nullable':\n False,\n 'allowed': [\n 'virtualbox',\n 'vmware_fusion',\n 'vmware_workstation',\n 'vmware_desktop',\n 'parallels',\n 'libvirt',\n ],\n },\n }\n },\n }\n },\n}\n\nplatforms_base_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'required': True,\n },\n 'groups': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'children': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n }\n }\n },\n}\n\nplatforms_vagrant_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'interfaces': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n }\n },\n 'instance_raw_config_args': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'config_options': {\n 'type': 'dict',\n },\n 'box': {\n 'type': 'string',\n },\n 'box_version': {\n 'type': 'string',\n },\n 'box_url': {\n 'type': 'string',\n },\n 'memory': {\n 'type': 'integer',\n },\n 'cpus': {\n 'type': 'integer',\n },\n 'provider_options': {\n 'type': 'dict',\n },\n 'provider_raw_config_args': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'provision': {\n 'type': 'boolean',\n },\n }\n }\n },\n}\n\nplatforms_docker_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'hostname': {\n 'type': 'string',\n },\n 'image': {\n 'type': 'string',\n },\n 'pull': {\n 'type': 'boolean',\n },\n 'pre_build_image': {\n 'type': 'boolean',\n },\n 'registry': {\n 'type': 'dict',\n 'schema': {\n 'url': {\n 'type': 'string',\n },\n 'credentials': {\n 'type': 'dict',\n 'schema': {\n 'username': {\n 'type': 'string',\n },\n 'password': {\n 'type': 'string',\n },\n 'email': {\n 'type': 'string',\n },\n }\n },\n }\n },\n 'command': {\n 'type': 'string',\n 'nullable': True,\n },\n 'privileged': {\n 'type': 'boolean',\n },\n 'security_opts': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'volumes': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'tmpfs': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'capabilities': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'exposed_ports': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'published_ports': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'ulimits': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'dns_servers': {\n 'type': 'list',\n 'schema': {\n 'type': 'string',\n }\n },\n 'env': {\n 'type': 'dict',\n 'keyschema': {\n 'type': 'string',\n 'regex': '^[a-zA-Z0-9_-]+$',\n }\n },\n 'restart_policy': {\n 'type': 'string',\n },\n 'restart_retries': {\n 'type': 'integer',\n },\n 'networks': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n }\n }\n },\n 'network_mode': {\n 'type': 'string',\n },\n }\n }\n },\n}\n\nplatforms_lxd_schema = {\n 'platforms': {\n 'type': 'list',\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n },\n 'url': {\n 'type': 'string',\n },\n 'cert_file': {\n 'type': 'string',\n },\n 'key_file': {\n 'type': 'string',\n },\n 'trust_password': {\n 'type': 'string',\n },\n 'source': {\n 'type': 'dict',\n 'schema': {\n 'type': {\n 'type': 'string',\n },\n 'mode': {\n 'type': 'string',\n 'allowed': [\n 'pull',\n 'local',\n ],\n },\n 'server': {\n 'type': 'string',\n },\n 'protocol': {\n 'type': 'string',\n 'allowed': [\n 'lxd',\n 'simplestreams',\n ],\n },\n 'alias': {\n 'type': 'string',\n },\n },\n },\n 'architecture': {\n 'type': 'string',\n 'allowed': [\n 'x86_64',\n 'i686',\n ],\n },\n 'config': {\n 'type': 'dict',\n 'allow_unknown': True,\n },\n 'devices': {\n 'type': 'dict',\n 'allow_unknown': True,\n },\n 'profiles': {\n 'type': 'list',\n 'schema': {\n 'type': 'string'\n }\n },\n 'force_stop': {\n 'type': 'boolean',\n },\n }\n }\n },\n}\n\ndependency_command_nullable_schema = {\n 'dependency': {\n 'type': 'dict',\n 'schema': {\n 'command': {\n 'type': 'string',\n 'nullable': False,\n },\n }\n },\n}\n\nverifier_options_readonly_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'options': {\n 'keyschema': {\n 'readonly': True,\n },\n },\n }\n },\n}\n\nverifier_goss_mutually_exclusive_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'goss',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'yamllint',\n ],\n },\n }\n },\n }\n },\n}\n\nverifier_inspec_mutually_exclusive_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'inspec',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'rubocop',\n ],\n },\n }\n },\n }\n },\n}\nverifier_testinfra_mutually_exclusive_schema = {\n 'verifier': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'testinfra',\n ],\n },\n 'lint': {\n 'type': 'dict',\n 'schema': {\n 'name': {\n 'type': 'string',\n 'allowed': [\n 'flake8',\n ],\n },\n }\n },\n }\n },\n}\n\n\nclass Validator(cerberus.Validator):\n def __init__(self, *args, **kwargs):\n super(Validator, self).__init__(*args, **kwargs)\n\n def _validate_disallowed(self, disallowed, field, value):\n \"\"\" Readonly but with a custom error.\n\n The rule's arguments are validated against this schema:\n {'type': 'boolean'}\n \"\"\"\n if disallowed:\n msg = 'disallowed user provided config option'\n self._error(field, msg)\n\n def _validate_molecule_env_var(self, molecule_env_var, field, value):\n \"\"\" Readonly but with a custom error.\n\n The rule's arguments are validated against this schema:\n {'type': 'boolean'}\n \"\"\"\n # TODO(retr0h): This needs to be better handled.\n pattern = r'^[{$]+MOLECULE[_a-z0-9A-Z]+[}]*$'\n\n if molecule_env_var:\n if re.match(pattern, value):\n msg = ('cannot reference $MOLECULE special variables '\n 'in this section')\n self._error(field, msg)\n\n\ndef pre_validate(stream, env, keep_string):\n data = util.safe_load(stream)\n\n v = Validator(allow_unknown=True)\n v.validate(data, pre_validate_base_schema(env, keep_string))\n\n return v.errors\n\n\ndef validate(c):\n schema = copy.deepcopy(base_schema)\n\n # Dependency\n if c['dependency']['name'] == 'shell':\n util.merge_dicts(schema, dependency_command_nullable_schema)\n\n # Driver\n util.merge_dicts(schema, platforms_base_schema)\n if c['driver']['name'] == 'docker':\n util.merge_dicts(schema, platforms_docker_schema)\n elif c['driver']['name'] == 'vagrant':\n util.merge_dicts(schema, driver_vagrant_provider_section_schema)\n util.merge_dicts(schema, platforms_vagrant_schema)\n elif c['driver']['name'] == 'lxd':\n util.merge_dicts(schema, platforms_lxd_schema)\n else:\n util.merge_dicts(schema, platforms_base_schema)\n\n # Verifier\n if c['verifier']['name'] == 'goss':\n util.merge_dicts(schema, verifier_options_readonly_schema)\n util.merge_dicts(schema, verifier_goss_mutually_exclusive_schema)\n elif c['verifier']['name'] == 'inspec':\n util.merge_dicts(schema, verifier_options_readonly_schema)\n util.merge_dicts(schema, verifier_inspec_mutually_exclusive_schema)\n elif c['verifier']['name'] == 'testinfra':\n util.merge_dicts(schema, verifier_testinfra_mutually_exclusive_schema)\n\n v = Validator(allow_unknown=True)\n v.validate(c, schema)\n\n return v.errors\n", "path": "molecule/model/schema_v2.py" } ]
diff --git a/molecule/cookiecutter/scenario/driver/docker/{{cookiecutter.molecule_directory}}/{{cookiecutter.scenario_name}}/Dockerfile.j2 b/molecule/cookiecutter/scenario/driver/docker/{{cookiecutter.molecule_directory}}/{{cookiecutter.scenario_name}}/Dockerfile.j2 index c727a0564b..88feb252c3 100644 --- a/molecule/cookiecutter/scenario/driver/docker/{{cookiecutter.molecule_directory}}/{{cookiecutter.scenario_name}}/Dockerfile.j2 +++ b/molecule/cookiecutter/scenario/driver/docker/{{cookiecutter.molecule_directory}}/{{cookiecutter.scenario_name}}/Dockerfile.j2 @@ -14,3 +14,5 @@ RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y pyth elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi {%- endraw %} + +CMD ["sh", "-c", "while true; do sleep 10000; done"] diff --git a/molecule/model/schema_v2.py b/molecule/model/schema_v2.py index db7498e6aa..bc66682286 100644 --- a/molecule/model/schema_v2.py +++ b/molecule/model/schema_v2.py @@ -638,6 +638,7 @@ def pre_validate_base_schema(env, keep_string): }, 'command': { 'type': 'string', + 'nullable': True, }, 'privileged': { 'type': 'boolean', diff --git a/molecule/provisioner/ansible/playbooks/docker/create.yml b/molecule/provisioner/ansible/playbooks/docker/create.yml index fa88eca5dd..343c22c40c 100644 --- a/molecule/provisioner/ansible/playbooks/docker/create.yml +++ b/molecule/provisioner/ansible/playbooks/docker/create.yml @@ -63,7 +63,7 @@ state: started recreate: false log_driver: json-file - command: "{{ item.command | default('bash -c \"while true; do sleep 10000; done\"') }}" + command: "{{ item.command | default(omit) }}" privileged: "{{ item.privileged | default(omit) }}" security_opts: "{{ item.security_opts | default(omit) }}" volumes: "{{ item.volumes | default(omit) }}" diff --git a/test/resources/playbooks/docker/Dockerfile.j2 b/test/resources/playbooks/docker/Dockerfile.j2 index 0a605536a2..7605e1bcb3 100644 --- a/test/resources/playbooks/docker/Dockerfile.j2 +++ b/test/resources/playbooks/docker/Dockerfile.j2 @@ -12,3 +12,5 @@ RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y pyth elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi + +CMD ["sh", "-c", "while true; do sleep 10000; done"]
pyca__cryptography-1237
0.5 fails to compile on OS X 10.8 Full traceback: http://pastebin.com/raw.php?i=M9N6Fgzi @reaperhulk has diagnosed, but this will require an 0.5.2 release to fix for supported platform.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <Security/SecItem.h>\n\"\"\"\n\nTYPES = \"\"\"\nconst CFTypeRef kSecAttrKeyType;\nconst CFTypeRef kSecAttrKeySizeInBits;\nconst CFTypeRef kSecAttrIsPermanent;\nconst CFTypeRef kSecAttrKeyTypeRSA;\nconst CFTypeRef kSecAttrKeyTypeDSA;\nconst CFTypeRef kSecAttrKeyTypeEC;\nconst CFTypeRef kSecAttrKeyTypeEC;\nconst CFTypeRef kSecUseKeychain;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n\"\"\"\n\nMACROS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n", "path": "cryptography/hazmat/bindings/commoncrypto/secitem.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <Security/SecItem.h>\n\"\"\"\n\nTYPES = \"\"\"\nconst CFTypeRef kSecAttrKeyType;\nconst CFTypeRef kSecAttrKeySizeInBits;\nconst CFTypeRef kSecAttrIsPermanent;\nconst CFTypeRef kSecAttrKeyTypeRSA;\nconst CFTypeRef kSecAttrKeyTypeDSA;\nconst CFTypeRef kSecUseKeychain;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n\"\"\"\n\nMACROS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n", "path": "cryptography/hazmat/bindings/commoncrypto/secitem.py" } ]
diff --git a/cryptography/hazmat/bindings/commoncrypto/secitem.py b/cryptography/hazmat/bindings/commoncrypto/secitem.py index 4d7710bdf893..ac3dad3ffadb 100644 --- a/cryptography/hazmat/bindings/commoncrypto/secitem.py +++ b/cryptography/hazmat/bindings/commoncrypto/secitem.py @@ -23,8 +23,6 @@ const CFTypeRef kSecAttrIsPermanent; const CFTypeRef kSecAttrKeyTypeRSA; const CFTypeRef kSecAttrKeyTypeDSA; -const CFTypeRef kSecAttrKeyTypeEC; -const CFTypeRef kSecAttrKeyTypeEC; const CFTypeRef kSecUseKeychain; """
litestar-org__litestar-1005
Bug: openapi render for multiple tags isn't consistent **Describe the bug** When the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it. I'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense... **To Reproduce** ```python from typing import Dict from starlite import Starlite, Controller, get class TestController(Controller): tags = ["a"] @get("/", tags=["b"]) def hello_world(self) -> Dict[str, str]: """Handler function that returns a greeting dictionary.""" return {"hello": "world"} app = Starlite(route_handlers=[TestController]) print(app.openapi_schema.paths["/"].get.tags) ``` If you run that multiple times, you will see you get either: ```python ['a', 'b'] ``` or ```python ['b', 'a'] ``` **Additional context** I believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe? But as I said, it may not be a problem you care about as I could be doing something silly.
[ { "content": "from inspect import cleandoc\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast\n\nfrom pydantic_openapi_schema.v3_1_0.operation import Operation\nfrom pydantic_openapi_schema.v3_1_0.path_item import PathItem\n\nfrom starlite.openapi.parameters import create_parameter_for_handler\nfrom starlite.openapi.request_body import create_request_body\nfrom starlite.openapi.responses import create_responses\nfrom starlite.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from pydantic import BaseModel\n from pydantic_openapi_schema.v3_1_0 import SecurityRequirement\n\n from starlite.handlers import HTTPRouteHandler\n from starlite.plugins.base import PluginProtocol\n from starlite.routes import HTTPRoute\n\n\ndef get_description_for_handler(route_handler: \"HTTPRouteHandler\", use_handler_docstrings: bool) -> Optional[str]:\n \"\"\"Produce the operation description for a route handler, either by using the description value if provided,\n\n or the docstring - if config is enabled.\n\n Args:\n route_handler: A route handler instance.\n use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped\n handler function.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn.value)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef extract_layered_values(\n route_handler: \"HTTPRouteHandler\",\n) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:\n \"\"\"Extract the tags and security values from the route handler layers.\n\n Args:\n route_handler: A Route Handler instance.\n\n Returns:\n A tuple of optional lists.\n \"\"\"\n tags: List[str] = []\n security: List[\"SecurityRequirement\"] = []\n for layer in route_handler.ownership_layers:\n if layer.tags:\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n return list(set(tags)) if tags else None, security or None\n\n\ndef create_path_item(\n route: \"HTTPRoute\", create_examples: bool, plugins: List[\"PluginProtocol\"], use_handler_docstrings: bool\n) -> PathItem:\n \"\"\"Create a PathItem model for the given route parsing all http_methods into Operation Models.\"\"\"\n path_item = PathItem()\n for http_method, handler_tuple in route.route_handler_map.items():\n route_handler, _ = handler_tuple\n if route_handler.include_in_schema:\n handler_fields = cast(\"BaseModel\", route_handler.signature_model).__fields__\n parameters = (\n create_parameter_for_handler(\n route_handler=route_handler,\n handler_fields=handler_fields,\n path_parameters=route.path_parameters,\n generate_examples=create_examples,\n )\n or None\n )\n raises_validation_error = bool(\"data\" in handler_fields or path_item.parameters or parameters)\n handler_name = unwrap_partial(route_handler.handler_name).replace(\"_\", \" \").title()\n request_body = None\n if \"data\" in handler_fields:\n request_body = create_request_body(\n field=handler_fields[\"data\"], generate_examples=create_examples, plugins=plugins\n )\n\n tags, security = extract_layered_values(route_handler)\n operation = Operation(\n operationId=route_handler.operation_id or handler_name,\n tags=tags,\n summary=route_handler.summary,\n description=get_description_for_handler(route_handler, use_handler_docstrings),\n deprecated=route_handler.deprecated,\n responses=create_responses(\n route_handler=route_handler,\n raises_validation_error=raises_validation_error,\n generate_examples=create_examples,\n plugins=plugins,\n ),\n requestBody=request_body,\n parameters=parameters, # type: ignore[arg-type]\n security=security,\n )\n setattr(path_item, http_method.lower(), operation)\n return path_item\n", "path": "starlite/openapi/path_item.py" } ]
[ { "content": "from inspect import cleandoc\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast\n\nfrom pydantic_openapi_schema.v3_1_0.operation import Operation\nfrom pydantic_openapi_schema.v3_1_0.path_item import PathItem\n\nfrom starlite.openapi.parameters import create_parameter_for_handler\nfrom starlite.openapi.request_body import create_request_body\nfrom starlite.openapi.responses import create_responses\nfrom starlite.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from pydantic import BaseModel\n from pydantic_openapi_schema.v3_1_0 import SecurityRequirement\n\n from starlite.handlers import HTTPRouteHandler\n from starlite.plugins.base import PluginProtocol\n from starlite.routes import HTTPRoute\n\n\ndef get_description_for_handler(route_handler: \"HTTPRouteHandler\", use_handler_docstrings: bool) -> Optional[str]:\n \"\"\"Produce the operation description for a route handler, either by using the description value if provided,\n\n or the docstring - if config is enabled.\n\n Args:\n route_handler: A route handler instance.\n use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped\n handler function.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn.value)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef extract_layered_values(\n route_handler: \"HTTPRouteHandler\",\n) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:\n \"\"\"Extract the tags and security values from the route handler layers.\n\n Args:\n route_handler: A Route Handler instance.\n\n Returns:\n A tuple of optional lists.\n \"\"\"\n tags: List[str] = []\n security: List[\"SecurityRequirement\"] = []\n for layer in route_handler.ownership_layers:\n if layer.tags:\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n return sorted(set(tags)) if tags else None, security or None\n\n\ndef create_path_item(\n route: \"HTTPRoute\", create_examples: bool, plugins: List[\"PluginProtocol\"], use_handler_docstrings: bool\n) -> PathItem:\n \"\"\"Create a PathItem model for the given route parsing all http_methods into Operation Models.\"\"\"\n path_item = PathItem()\n for http_method, handler_tuple in route.route_handler_map.items():\n route_handler, _ = handler_tuple\n if route_handler.include_in_schema:\n handler_fields = cast(\"BaseModel\", route_handler.signature_model).__fields__\n parameters = (\n create_parameter_for_handler(\n route_handler=route_handler,\n handler_fields=handler_fields,\n path_parameters=route.path_parameters,\n generate_examples=create_examples,\n )\n or None\n )\n raises_validation_error = bool(\"data\" in handler_fields or path_item.parameters or parameters)\n handler_name = unwrap_partial(route_handler.handler_name).replace(\"_\", \" \").title()\n request_body = None\n if \"data\" in handler_fields:\n request_body = create_request_body(\n field=handler_fields[\"data\"], generate_examples=create_examples, plugins=plugins\n )\n\n tags, security = extract_layered_values(route_handler)\n operation = Operation(\n operationId=route_handler.operation_id or handler_name,\n tags=tags,\n summary=route_handler.summary,\n description=get_description_for_handler(route_handler, use_handler_docstrings),\n deprecated=route_handler.deprecated,\n responses=create_responses(\n route_handler=route_handler,\n raises_validation_error=raises_validation_error,\n generate_examples=create_examples,\n plugins=plugins,\n ),\n requestBody=request_body,\n parameters=parameters, # type: ignore[arg-type]\n security=security,\n )\n setattr(path_item, http_method.lower(), operation)\n return path_item\n", "path": "starlite/openapi/path_item.py" } ]
diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py index 7c1ca54aba..af24207f26 100644 --- a/starlite/openapi/path_item.py +++ b/starlite/openapi/path_item.py @@ -56,7 +56,7 @@ def extract_layered_values( tags.extend(layer.tags) if layer.security: security.extend(layer.security) - return list(set(tags)) if tags else None, security or None + return sorted(set(tags)) if tags else None, security or None def create_path_item( diff --git a/tests/openapi/test_tags.py b/tests/openapi/test_tags.py index 542c8136cc..344e2c13af 100644 --- a/tests/openapi/test_tags.py +++ b/tests/openapi/test_tags.py @@ -23,7 +23,7 @@ class _Controller(Controller): path = "/controller" tags = ["controller"] - @get(tags=["handler"]) + @get(tags=["handler", "a"]) def _handler(self) -> Any: ... @@ -50,8 +50,8 @@ def test_openapi_schema_handler_tags(openapi_schema: "OpenAPI") -> None: def test_openapi_schema_controller_tags(openapi_schema: "OpenAPI") -> None: - assert set(openapi_schema.paths["/controller"].get.tags) == {"handler", "controller"} # type: ignore + assert openapi_schema.paths["/controller"].get.tags == ["a", "controller", "handler"] # type: ignore def test_openapi_schema_router_tags(openapi_schema: "OpenAPI") -> None: - assert set(openapi_schema.paths["/router/controller"].get.tags) == {"handler", "controller", "router"} # type: ignore + assert openapi_schema.paths["/router/controller"].get.tags == ["a", "controller", "handler", "router"] # type: ignore
pwndbg__pwndbg-1627
Heap heuristic will fail on big-endian architectures This can be reproduced by: ```console $ cat test.c #include <stdlib.h> int main(){   free(malloc(0x20));   return 0; } $ mips-linux-gnu-gcc test.c -ggdb $ qemu-mips -g 1234 -L /usr/mips-linux-gnu ./a.out ``` The libc I used is from: http://kr.archive.ubuntu.com/ubuntu/pool/universe/c/cross-toolchain-base-mipsen/libc6-mips-cross_2.30-0ubuntu2cross2_all.deb ``` console $ file /usr/mips-linux-gnu/lib/libc-2.30.so /usr/mips-linux-gnu/lib/libc-2.30.so: ELF 32-bit MSB shared object, MIPS, MIPS32 rel2 version 1 (SYSV), dynamically linked, interpreter /lib/ld.so.1, BuildID[sha1]=fff2eaa960489be0b3b109d4e52230c8fe34b2a1, for GNU/Linux 3.2.0, stripped ``` Then: ```console $ gdb -q ./a.out -ex 'set exception-verbose on' -ex 'set solib-absolute-prefix /usr/mips-linux-gnu/lib' -ex 'set solib-search-path /usr/mips-linux-gnu/lib' -ex 'target remote :1234' -ex 'break main' -ex 'continue' -ex 'next' -ex 'heap' ``` We will get: `KeyError: <class 'pwndbg.heap.structs.c_pvoid'>`. That’s because the keys in the dict are little-endian, but we try to access the dict with a big-endian type. We should use the correct endian for the keys in the dict. ---- Btw, I’m not sure if we really need to support heap heuristic for this architecture in the future, but the heuristic seems will also work for the single-threaded program for this architecture after we resolve this issue, which is good news :)
[ { "content": "import ctypes\n\nimport gdb\n\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.typeinfo\nimport pwndbg.glibc\nfrom pwndbg.gdblib.ctypes import Structure\n\n\ndef request2size(req):\n if req + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE:\n return MINSIZE\n return (req + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK\n\n\ndef fastbin_index(size: int) -> int:\n if pwndbg.gdblib.arch.ptrsize == 8:\n return (size >> 4) - 2\n else:\n return (size >> 3) - 2\n\n\nSIZE_SZ = pwndbg.gdblib.arch.ptrsize\nMINSIZE = pwndbg.gdblib.arch.ptrsize * 4\n# i386 will override it to 16.\n# See https://elixir.bootlin.com/glibc/glibc-2.26/source/sysdeps/i386/malloc-alignment.h#L22\nMALLOC_ALIGN = (\n 16\n if pwndbg.gdblib.arch.current == \"i386\" and pwndbg.glibc.get_version() >= (2, 26)\n else pwndbg.gdblib.arch.ptrsize * 2\n)\nMALLOC_ALIGN_MASK = MALLOC_ALIGN - 1\nMAX_FAST_SIZE = 80 * SIZE_SZ // 4\nNBINS = 128\nBINMAPSIZE = 4\nTCACHE_MAX_BINS = 64\nNFASTBINS = fastbin_index(request2size(MAX_FAST_SIZE)) + 1\n\nif pwndbg.gdblib.arch.ptrsize == 4:\n PTR = ctypes.c_uint32\n SIZE_T = ctypes.c_uint32\nelse:\n PTR = ctypes.c_uint64 # type: ignore[misc]\n SIZE_T = ctypes.c_uint64 # type: ignore[misc]\n\nDEFAULT_TOP_PAD = 131072\nDEFAULT_MMAP_MAX = 65536\nDEFAULT_MMAP_THRESHOLD = 128 * 1024\nDEFAULT_TRIM_THRESHOLD = 128 * 1024\nTCACHE_FILL_COUNT = 7\n\n\nclass c_pvoid(PTR):\n \"\"\"\n Represents a pointer.\n \"\"\"\n\n\nclass c_size_t(SIZE_T):\n \"\"\"\n Represents a size_t.\n \"\"\"\n\n\nC2GDB_MAPPING = {\n ctypes.c_char: pwndbg.gdblib.typeinfo.char,\n ctypes.c_int8: pwndbg.gdblib.typeinfo.int8,\n ctypes.c_int16: pwndbg.gdblib.typeinfo.int16,\n ctypes.c_int32: pwndbg.gdblib.typeinfo.int32,\n ctypes.c_int64: pwndbg.gdblib.typeinfo.int64,\n ctypes.c_uint8: pwndbg.gdblib.typeinfo.uint8,\n ctypes.c_uint16: pwndbg.gdblib.typeinfo.uint16,\n ctypes.c_uint32: pwndbg.gdblib.typeinfo.uint32,\n ctypes.c_uint64: pwndbg.gdblib.typeinfo.uint64,\n c_pvoid: pwndbg.gdblib.typeinfo.pvoid,\n c_size_t: pwndbg.gdblib.typeinfo.size_t,\n}\n\n\nclass FakeGDBField:\n \"\"\"\n Fake gdb.Field for compatibility\n \"\"\"\n\n def __init__(\n self,\n bitpos,\n name,\n type,\n parent_type,\n enumval=None,\n artificial=False,\n is_base_class=False,\n bitsize=0,\n ) -> None:\n # Note: pwndbg only uses `name` currently\n self.bitpos = bitpos\n self.name = name\n self.type = type\n self.parent_type = parent_type\n if enumval:\n self.enumval = enumval\n self.artificial = artificial\n self.is_base_class = is_base_class\n self.bitsize = bitsize\n\n\nclass CStruct2GDB:\n code = gdb.TYPE_CODE_STRUCT\n _c_struct = None\n\n def __init__(self, address: int) -> None:\n self.address = address\n\n def __int__(self) -> int:\n \"\"\"\n Returns the address of the C struct.\n \"\"\"\n return self.address\n\n def __getitem__(self, key: str) -> gdb.Value:\n \"\"\"\n Returns the value of the specified field as a `gdb.Value`.\n \"\"\"\n return self.read_field(key)\n\n def __getattr__(self, key: str) -> gdb.Value:\n \"\"\"\n Returns the value of the specified field as a `gdb.Value`.\n \"\"\"\n return self.read_field(key)\n\n def __eq__(self, other) -> bool:\n return self.address == int(other)\n\n def __str__(self) -> str:\n \"\"\"\n Returns a string representation of the C struct like `gdb.Value` does.\n \"\"\"\n output = \"{\\n\"\n for f in self._c_struct._fields_:\n output += \" %s = %s,\\n\" % (f[0], self.read_field(f[0]))\n output += \"}\"\n return output\n\n def read_field(self, field: str) -> gdb.Value:\n \"\"\"\n Returns the value of the specified field as a `gdb.Value`.\n \"\"\"\n field_address = self.get_field_address(field)\n field_type = next((f for f in self._c_struct._fields_ if f[0] == field))[1]\n if hasattr(field_type, \"_length_\"): # f is a ctypes Array\n t = C2GDB_MAPPING[field_type._type_]\n return pwndbg.gdblib.memory.poi(t.array(field_type._length_ - 1), field_address)\n return pwndbg.gdblib.memory.poi(C2GDB_MAPPING[field_type], field_address)\n\n @property\n def type(self):\n \"\"\"\n Returns type(self) to make it compatible with the `gdb.Value` interface.\n \"\"\"\n return type(self)\n\n @classmethod\n def unqualified(cls):\n \"\"\"\n Returns cls to make it compatible with the `gdb.types.has_field()` interface.\n \"\"\"\n return cls\n\n @classmethod\n def fields(cls):\n \"\"\"\n Return fields of the struct to make it compatible with the `gdb.Type` interface.\n \"\"\"\n fake_gdb_fields = []\n for f in cls._c_struct._fields_:\n field_name = f[0]\n field_type = f[1]\n bitpos = getattr(cls._c_struct, field_name).offset * 8\n if hasattr(field_type, \"_length_\"): # f is a ctypes Array\n t = C2GDB_MAPPING[field_type._type_]\n _type = t.array(field_type._length_ - 1)\n else:\n _type = C2GDB_MAPPING[field_type]\n fake_gdb_fields.append(FakeGDBField(bitpos, field_name, _type, cls))\n return fake_gdb_fields\n\n @classmethod\n def keys(cls) -> list:\n \"\"\"\n Return a list of the names of the fields in the struct to make it compatible with the `gdb.Type` interface.\n \"\"\"\n return [f[0] for f in cls._c_struct._fields_]\n\n def get_field_address(self, field: str) -> int:\n \"\"\"\n Returns the address of the specified field.\n \"\"\"\n return self.address + getattr(self._c_struct, field).offset\n\n def items(self) -> tuple:\n \"\"\"\n Returns a tuple of (field name, field value) pairs.\n \"\"\"\n return tuple((field[0], getattr(self, field[0])) for field in self._c_struct._fields_)\n\n\nclass c_malloc_state_2_26(Structure):\n \"\"\"\n This class represents malloc_state struct for GLIBC < 2.27 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/1c9a5c270d8b66f30dcfaf1cb2d6cf39d3e18369/malloc/malloc.c#L1678-L1716\n\n struct malloc_state\n {\n /* Serialize access. */\n __libc_lock_define (, mutex);\n\n /* Flags (formerly in max_fast). */\n int flags;\n\n /* Fastbins */\n mfastbinptr fastbinsY[NFASTBINS];\n\n /* Base of the topmost chunk -- not otherwise kept in a bin */\n mchunkptr top;\n\n /* The remainder from the most recent split of a small request */\n mchunkptr last_remainder;\n\n /* Normal bins packed as described above */\n mchunkptr bins[NBINS * 2 - 2];\n\n /* Bitmap of bins */\n unsigned int binmap[BINMAPSIZE];\n\n /* Linked list */\n struct malloc_state *next;\n\n /* Linked list for free arenas. Access to this field is serialized\n by free_list_lock in arena.c. */\n struct malloc_state *next_free;\n\n /* Number of threads attached to this arena. 0 if the arena is on\n the free list. Access to this field is serialized by\n free_list_lock in arena.c. */\n INTERNAL_SIZE_T attached_threads;\n\n /* Memory allocated from the system in this arena. */\n INTERNAL_SIZE_T system_mem;\n INTERNAL_SIZE_T max_system_mem;\n };\n \"\"\"\n\n _fields_ = [\n (\"mutex\", ctypes.c_int32),\n (\"flags\", ctypes.c_int32),\n (\"fastbinsY\", c_pvoid * NFASTBINS),\n (\"top\", c_pvoid),\n (\"last_remainder\", c_pvoid),\n (\"bins\", c_pvoid * (NBINS * 2 - 2)),\n (\"binmap\", ctypes.c_int32 * BINMAPSIZE),\n (\"next\", c_pvoid),\n (\"next_free\", c_pvoid),\n (\"attached_threads\", c_size_t),\n (\"system_mem\", c_size_t),\n (\"max_system_mem\", c_size_t),\n ]\n\n\nclass c_malloc_state_2_27(Structure):\n \"\"\"\n This class represents malloc_state struct for GLIBC >= 2.27 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L1831\n\n\n struct malloc_state\n {\n /* Serialize access. */\n __libc_lock_define (, mutex);\n\n /* Flags (formerly in max_fast). */\n int flags;\n\n /* Set if the fastbin chunks contain recently inserted free blocks. */\n /* Note this is a bool but not all targets support atomics on booleans. */\n int have_fastchunks;\n\n /* Fastbins */\n mfastbinptr fastbinsY[NFASTBINS];\n\n /* Base of the topmost chunk -- not otherwise kept in a bin */\n mchunkptr top;\n\n /* The remainder from the most recent split of a small request */\n mchunkptr last_remainder;\n\n /* Normal bins packed as described above */\n mchunkptr bins[NBINS * 2 - 2];\n\n /* Bitmap of bins */\n unsigned int binmap[BINMAPSIZE];\n\n /* Linked list */\n struct malloc_state *next;\n\n /* Linked list for free arenas. Access to this field is serialized\n by free_list_lock in arena.c. */\n struct malloc_state *next_free;\n\n /* Number of threads attached to this arena. 0 if the arena is on\n the free list. Access to this field is serialized by\n free_list_lock in arena.c. */\n INTERNAL_SIZE_T attached_threads;\n\n /* Memory allocated from the system in this arena. */\n INTERNAL_SIZE_T system_mem;\n INTERNAL_SIZE_T max_system_mem;\n };\n \"\"\"\n\n _fields_ = [\n (\"mutex\", ctypes.c_int32),\n (\"flags\", ctypes.c_int32),\n (\"have_fastchunks\", ctypes.c_int32),\n (\"fastbinsY\", c_pvoid * NFASTBINS),\n (\"top\", c_pvoid),\n (\"last_remainder\", c_pvoid),\n (\"bins\", c_pvoid * (NBINS * 2 - 2)),\n (\"binmap\", ctypes.c_int32 * BINMAPSIZE),\n (\"next\", c_pvoid),\n (\"next_free\", c_pvoid),\n (\"attached_threads\", c_size_t),\n (\"system_mem\", c_size_t),\n (\"max_system_mem\", c_size_t),\n ]\n\n\nclass MallocState(CStruct2GDB):\n \"\"\"\n This class represents malloc_state struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 27):\n _c_struct = c_malloc_state_2_27\n else:\n _c_struct = c_malloc_state_2_26\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_heap_info(Structure):\n \"\"\"\n This class represents heap_info struct as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/arena.c#L53\n\n typedef struct _heap_info\n {\n mstate ar_ptr; /* Arena for this heap. */\n struct _heap_info *prev; /* Previous heap. */\n size_t size; /* Current size in bytes. */\n size_t mprotect_size; /* Size in bytes that has been mprotected\n PROT_READ|PROT_WRITE. */\n /* Make sure the following data is properly aligned, particularly\n that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of\n MALLOC_ALIGNMENT. */\n char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];\n } heap_info;\n \"\"\"\n\n _fields_ = [\n (\"ar_ptr\", c_pvoid),\n (\"prev\", c_pvoid),\n (\"size\", c_size_t),\n (\"mprotect_size\", c_size_t),\n (\"pad\", ctypes.c_uint8 * (-6 * SIZE_SZ & MALLOC_ALIGN_MASK)),\n ]\n\n\nclass HeapInfo(CStruct2GDB):\n \"\"\"\n This class represents heap_info struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n _c_struct = c_heap_info\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_malloc_chunk(Structure):\n \"\"\"\n This class represents malloc_chunk struct as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L1154\n\n struct malloc_chunk {\n\n INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */\n INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */\n\n struct malloc_chunk* fd; /* double links -- used only if free. */\n struct malloc_chunk* bk;\n\n /* Only used for large blocks: pointer to next larger size. */\n struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */\n struct malloc_chunk* bk_nextsize;\n };\n \"\"\"\n\n _fields_ = [\n (\"prev_size\", c_size_t),\n (\"size\", c_size_t),\n (\"fd\", c_pvoid),\n (\"bk\", c_pvoid),\n (\"fd_nextsize\", c_pvoid),\n (\"bk_nextsize\", c_pvoid),\n ]\n\n\nclass MallocChunk(CStruct2GDB):\n \"\"\"\n This class represents malloc_chunk struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n _c_struct = c_malloc_chunk\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_tcache_perthread_struct_2_29(Structure):\n \"\"\"\n This class represents tcache_perthread_struct for GLIBC < 2.30 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.29/malloc/malloc.c#L2916\n\n typedef struct tcache_perthread_struct\n {\n char counts[TCACHE_MAX_BINS];\n tcache_entry *entries[TCACHE_MAX_BINS];\n } tcache_perthread_struct;\n \"\"\"\n\n _fields_ = [\n (\"counts\", ctypes.c_char * TCACHE_MAX_BINS),\n (\"entries\", c_pvoid * TCACHE_MAX_BINS),\n ]\n\n\nclass c_tcache_perthread_struct_2_30(Structure):\n \"\"\"\n This class represents the tcache_perthread_struct for GLIBC >= 2.30 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L3025\n\n typedef struct tcache_perthread_struct\n {\n uint16_t counts[TCACHE_MAX_BINS];\n tcache_entry *entries[TCACHE_MAX_BINS];\n } tcache_perthread_struct;\n \"\"\"\n\n _fields_ = [\n (\"counts\", ctypes.c_uint16 * TCACHE_MAX_BINS),\n (\"entries\", c_pvoid * TCACHE_MAX_BINS),\n ]\n\n\nclass TcachePerthreadStruct(CStruct2GDB):\n \"\"\"\n This class represents tcache_perthread_struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 30):\n _c_struct = c_tcache_perthread_struct_2_30\n else:\n _c_struct = c_tcache_perthread_struct_2_29\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_tcache_entry_2_28(Structure):\n \"\"\"\n This class represents the tcache_entry struct for GLIBC < 2.29 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.28/malloc/malloc.c#L2888\n\n typedef struct tcache_entry\n {\n struct tcache_entry *next;\n } tcache_entry;\n \"\"\"\n\n _fields_ = [(\"next\", c_pvoid)]\n\n\nclass c_tcache_entry_2_29(Structure):\n \"\"\"\n This class represents the tcache_entry struct for GLIBC >= 2.29 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L3013\n\n typedef struct tcache_entry\n {\n struct tcache_entry *next;\n /* This field exists to detect double frees. */\n uintptr_t key;\n } tcache_entry;\n \"\"\"\n\n _fields_ = [(\"next\", c_pvoid), (\"key\", c_pvoid)]\n\n\nclass TcacheEntry(CStruct2GDB):\n \"\"\"\n This class represents the tcache_entry struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 29):\n _c_struct = c_tcache_entry_2_29\n else:\n _c_struct = c_tcache_entry_2_28\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_malloc_par_2_23(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC < 2.24 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.23/malloc/malloc.c#L1726\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n /*INTERNAL_SIZE_T sbrked_mem;*/\n /*INTERNAL_SIZE_T max_sbrked_mem;*/\n INTERNAL_SIZE_T max_mmapped_mem;\n INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"max_total_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n ]\n\n\nclass c_malloc_par_2_24(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC >= 2.24 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.25/malloc/malloc.c#L1690\n https://github.com/bminor/glibc/blob/glibc-2.24/malloc/malloc.c#L1719\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n INTERNAL_SIZE_T max_mmapped_mem;\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n ]\n\n\nclass c_malloc_par_2_26(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC >= 2.26 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L1875\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n INTERNAL_SIZE_T max_mmapped_mem;\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n\n #if USE_TCACHE\n /* Maximum number of buckets to use. */\n size_t tcache_bins;\n size_t tcache_max_bytes;\n /* Maximum number of chunks in each bucket. */\n size_t tcache_count;\n /* Maximum number of chunks to remove from the unsorted list, which\n aren't used to prefill the cache. */\n size_t tcache_unsorted_limit;\n #endif\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n (\"tcache_bins\", c_size_t),\n (\"tcache_max_bytes\", c_size_t),\n (\"tcache_count\", ctypes.c_int32),\n (\"tcache_unsorted_limit\", c_size_t),\n ]\n\n\nclass c_malloc_par_2_35(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC >= 2.35 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.35/malloc/malloc.c#L1874\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n #if HAVE_TUNABLES\n /* Transparent Large Page support. */\n INTERNAL_SIZE_T thp_pagesize;\n /* A value different than 0 means to align mmap allocation to hp_pagesize\n add hp_flags on flags. */\n INTERNAL_SIZE_T hp_pagesize;\n int hp_flags;\n #endif\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n INTERNAL_SIZE_T max_mmapped_mem;\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n\n #if USE_TCACHE\n /* Maximum number of buckets to use. */\n size_t tcache_bins;\n size_t tcache_max_bytes;\n /* Maximum number of chunks in each bucket. */\n size_t tcache_count;\n /* Maximum number of chunks to remove from the unsorted list, which\n aren't used to prefill the cache. */\n size_t tcache_unsorted_limit;\n #endif\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"thp_pagesize\", c_size_t),\n (\"hp_pagesize\", c_size_t),\n (\"hp_flags\", ctypes.c_int32),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n (\"tcache_bins\", c_size_t),\n (\"tcache_max_bytes\", c_size_t),\n (\"tcache_count\", ctypes.c_int32),\n (\"tcache_unsorted_limit\", c_size_t),\n ]\n\n\nclass MallocPar(CStruct2GDB):\n \"\"\"\n This class represents the malloc_par struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 35):\n _c_struct = c_malloc_par_2_35\n elif pwndbg.glibc.get_version() >= (2, 26):\n _c_struct = c_malloc_par_2_26\n elif pwndbg.glibc.get_version() >= (2, 24):\n _c_struct = c_malloc_par_2_24\n else:\n _c_struct = c_malloc_par_2_23\n sizeof = ctypes.sizeof(_c_struct)\n\n\n# https://github.com/bminor/glibc/blob/glibc-2.37/malloc/malloc.c#L1911-L1926\n# static struct malloc_par mp_ =\n# {\n# .top_pad = DEFAULT_TOP_PAD,\n# .n_mmaps_max = DEFAULT_MMAP_MAX,\n# .mmap_threshold = DEFAULT_MMAP_THRESHOLD,\n# .trim_threshold = DEFAULT_TRIM_THRESHOLD,\n# #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))\n# .arena_test = NARENAS_FROM_NCORES (1)\n# #if USE_TCACHE\n# ,\n# .tcache_count = TCACHE_FILL_COUNT,\n# .tcache_bins = TCACHE_MAX_BINS,\n# .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),\n# .tcache_unsorted_limit = 0 /* No limit. */\n# #endif\n# };\nDEFAULT_MP_ = MallocPar._c_struct()\nDEFAULT_MP_.top_pad = DEFAULT_TOP_PAD\nDEFAULT_MP_.n_mmaps_max = DEFAULT_MMAP_MAX\nDEFAULT_MP_.mmap_threshold = DEFAULT_MMAP_THRESHOLD\nDEFAULT_MP_.trim_threshold = DEFAULT_TRIM_THRESHOLD\nDEFAULT_MP_.arena_test = 2 if pwndbg.gdblib.arch.ptrsize == 4 else 8\nif MallocPar._c_struct != c_malloc_par_2_23:\n # the only difference between 2.23 and the rest is the lack of tcache\n DEFAULT_MP_.tcache_count = TCACHE_FILL_COUNT\n DEFAULT_MP_.tcache_bins = TCACHE_MAX_BINS\n DEFAULT_MP_.tcache_max_bytes = (TCACHE_MAX_BINS - 1) * MALLOC_ALIGN + MINSIZE - SIZE_SZ\n", "path": "pwndbg/heap/structs.py" } ]
[ { "content": "import ctypes\n\nimport gdb\n\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.typeinfo\nimport pwndbg.glibc\nfrom pwndbg.gdblib.ctypes import Structure\n\n\ndef request2size(req):\n if req + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE:\n return MINSIZE\n return (req + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK\n\n\ndef fastbin_index(size: int) -> int:\n if pwndbg.gdblib.arch.ptrsize == 8:\n return (size >> 4) - 2\n else:\n return (size >> 3) - 2\n\n\nSIZE_SZ = pwndbg.gdblib.arch.ptrsize\nMINSIZE = pwndbg.gdblib.arch.ptrsize * 4\n# i386 will override it to 16.\n# See https://elixir.bootlin.com/glibc/glibc-2.26/source/sysdeps/i386/malloc-alignment.h#L22\nMALLOC_ALIGN = (\n 16\n if pwndbg.gdblib.arch.current == \"i386\" and pwndbg.glibc.get_version() >= (2, 26)\n else pwndbg.gdblib.arch.ptrsize * 2\n)\nMALLOC_ALIGN_MASK = MALLOC_ALIGN - 1\nMAX_FAST_SIZE = 80 * SIZE_SZ // 4\nNBINS = 128\nBINMAPSIZE = 4\nTCACHE_MAX_BINS = 64\nNFASTBINS = fastbin_index(request2size(MAX_FAST_SIZE)) + 1\n\nif pwndbg.gdblib.arch.ptrsize == 4:\n PTR = ctypes.c_uint32\n SIZE_T = ctypes.c_uint32\nelse:\n PTR = ctypes.c_uint64 # type: ignore[misc]\n SIZE_T = ctypes.c_uint64 # type: ignore[misc]\n\nDEFAULT_TOP_PAD = 131072\nDEFAULT_MMAP_MAX = 65536\nDEFAULT_MMAP_THRESHOLD = 128 * 1024\nDEFAULT_TRIM_THRESHOLD = 128 * 1024\nTCACHE_FILL_COUNT = 7\n\n\nclass c_pvoid(PTR):\n \"\"\"\n Represents a pointer.\n \"\"\"\n\n\nclass c_size_t(SIZE_T):\n \"\"\"\n Represents a size_t.\n \"\"\"\n\n\nC2GDB_MAPPING = {\n ctypes.c_char: pwndbg.gdblib.typeinfo.char,\n ctypes.c_int8: pwndbg.gdblib.typeinfo.int8,\n ctypes.c_int16: pwndbg.gdblib.typeinfo.int16,\n ctypes.c_int32: pwndbg.gdblib.typeinfo.int32,\n ctypes.c_int64: pwndbg.gdblib.typeinfo.int64,\n ctypes.c_uint8: pwndbg.gdblib.typeinfo.uint8,\n ctypes.c_uint16: pwndbg.gdblib.typeinfo.uint16,\n ctypes.c_uint32: pwndbg.gdblib.typeinfo.uint32,\n ctypes.c_uint64: pwndbg.gdblib.typeinfo.uint64,\n c_pvoid: pwndbg.gdblib.typeinfo.pvoid,\n c_size_t: pwndbg.gdblib.typeinfo.size_t,\n}\n\n# Use correct endian for the dictionary keys\nif pwndbg.gdblib.arch.endian == \"little\":\n C2GDB_MAPPING = {k.__ctype_le__: v for k, v in C2GDB_MAPPING.items()}\nelse:\n C2GDB_MAPPING = {k.__ctype_be__: v for k, v in C2GDB_MAPPING.items()}\n\n\nclass FakeGDBField:\n \"\"\"\n Fake gdb.Field for compatibility\n \"\"\"\n\n def __init__(\n self,\n bitpos,\n name,\n type,\n parent_type,\n enumval=None,\n artificial=False,\n is_base_class=False,\n bitsize=0,\n ) -> None:\n # Note: pwndbg only uses `name` currently\n self.bitpos = bitpos\n self.name = name\n self.type = type\n self.parent_type = parent_type\n if enumval:\n self.enumval = enumval\n self.artificial = artificial\n self.is_base_class = is_base_class\n self.bitsize = bitsize\n\n\nclass CStruct2GDB:\n code = gdb.TYPE_CODE_STRUCT\n _c_struct = None\n\n def __init__(self, address: int) -> None:\n self.address = address\n\n def __int__(self) -> int:\n \"\"\"\n Returns the address of the C struct.\n \"\"\"\n return self.address\n\n def __getitem__(self, key: str) -> gdb.Value:\n \"\"\"\n Returns the value of the specified field as a `gdb.Value`.\n \"\"\"\n return self.read_field(key)\n\n def __getattr__(self, key: str) -> gdb.Value:\n \"\"\"\n Returns the value of the specified field as a `gdb.Value`.\n \"\"\"\n return self.read_field(key)\n\n def __eq__(self, other) -> bool:\n return self.address == int(other)\n\n def __str__(self) -> str:\n \"\"\"\n Returns a string representation of the C struct like `gdb.Value` does.\n \"\"\"\n output = \"{\\n\"\n for f in self._c_struct._fields_:\n output += \" %s = %s,\\n\" % (f[0], self.read_field(f[0]))\n output += \"}\"\n return output\n\n def read_field(self, field: str) -> gdb.Value:\n \"\"\"\n Returns the value of the specified field as a `gdb.Value`.\n \"\"\"\n field_address = self.get_field_address(field)\n field_type = next((f for f in self._c_struct._fields_ if f[0] == field))[1]\n if hasattr(field_type, \"_length_\"): # f is a ctypes Array\n t = C2GDB_MAPPING[field_type._type_]\n return pwndbg.gdblib.memory.poi(t.array(field_type._length_ - 1), field_address)\n return pwndbg.gdblib.memory.poi(C2GDB_MAPPING[field_type], field_address)\n\n @property\n def type(self):\n \"\"\"\n Returns type(self) to make it compatible with the `gdb.Value` interface.\n \"\"\"\n return type(self)\n\n @classmethod\n def unqualified(cls):\n \"\"\"\n Returns cls to make it compatible with the `gdb.types.has_field()` interface.\n \"\"\"\n return cls\n\n @classmethod\n def fields(cls):\n \"\"\"\n Return fields of the struct to make it compatible with the `gdb.Type` interface.\n \"\"\"\n fake_gdb_fields = []\n for f in cls._c_struct._fields_:\n field_name = f[0]\n field_type = f[1]\n bitpos = getattr(cls._c_struct, field_name).offset * 8\n if hasattr(field_type, \"_length_\"): # f is a ctypes Array\n t = C2GDB_MAPPING[field_type._type_]\n _type = t.array(field_type._length_ - 1)\n else:\n _type = C2GDB_MAPPING[field_type]\n fake_gdb_fields.append(FakeGDBField(bitpos, field_name, _type, cls))\n return fake_gdb_fields\n\n @classmethod\n def keys(cls) -> list:\n \"\"\"\n Return a list of the names of the fields in the struct to make it compatible with the `gdb.Type` interface.\n \"\"\"\n return [f[0] for f in cls._c_struct._fields_]\n\n def get_field_address(self, field: str) -> int:\n \"\"\"\n Returns the address of the specified field.\n \"\"\"\n return self.address + getattr(self._c_struct, field).offset\n\n def items(self) -> tuple:\n \"\"\"\n Returns a tuple of (field name, field value) pairs.\n \"\"\"\n return tuple((field[0], getattr(self, field[0])) for field in self._c_struct._fields_)\n\n\nclass c_malloc_state_2_26(Structure):\n \"\"\"\n This class represents malloc_state struct for GLIBC < 2.27 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/1c9a5c270d8b66f30dcfaf1cb2d6cf39d3e18369/malloc/malloc.c#L1678-L1716\n\n struct malloc_state\n {\n /* Serialize access. */\n __libc_lock_define (, mutex);\n\n /* Flags (formerly in max_fast). */\n int flags;\n\n /* Fastbins */\n mfastbinptr fastbinsY[NFASTBINS];\n\n /* Base of the topmost chunk -- not otherwise kept in a bin */\n mchunkptr top;\n\n /* The remainder from the most recent split of a small request */\n mchunkptr last_remainder;\n\n /* Normal bins packed as described above */\n mchunkptr bins[NBINS * 2 - 2];\n\n /* Bitmap of bins */\n unsigned int binmap[BINMAPSIZE];\n\n /* Linked list */\n struct malloc_state *next;\n\n /* Linked list for free arenas. Access to this field is serialized\n by free_list_lock in arena.c. */\n struct malloc_state *next_free;\n\n /* Number of threads attached to this arena. 0 if the arena is on\n the free list. Access to this field is serialized by\n free_list_lock in arena.c. */\n INTERNAL_SIZE_T attached_threads;\n\n /* Memory allocated from the system in this arena. */\n INTERNAL_SIZE_T system_mem;\n INTERNAL_SIZE_T max_system_mem;\n };\n \"\"\"\n\n _fields_ = [\n (\"mutex\", ctypes.c_int32),\n (\"flags\", ctypes.c_int32),\n (\"fastbinsY\", c_pvoid * NFASTBINS),\n (\"top\", c_pvoid),\n (\"last_remainder\", c_pvoid),\n (\"bins\", c_pvoid * (NBINS * 2 - 2)),\n (\"binmap\", ctypes.c_int32 * BINMAPSIZE),\n (\"next\", c_pvoid),\n (\"next_free\", c_pvoid),\n (\"attached_threads\", c_size_t),\n (\"system_mem\", c_size_t),\n (\"max_system_mem\", c_size_t),\n ]\n\n\nclass c_malloc_state_2_27(Structure):\n \"\"\"\n This class represents malloc_state struct for GLIBC >= 2.27 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L1831\n\n\n struct malloc_state\n {\n /* Serialize access. */\n __libc_lock_define (, mutex);\n\n /* Flags (formerly in max_fast). */\n int flags;\n\n /* Set if the fastbin chunks contain recently inserted free blocks. */\n /* Note this is a bool but not all targets support atomics on booleans. */\n int have_fastchunks;\n\n /* Fastbins */\n mfastbinptr fastbinsY[NFASTBINS];\n\n /* Base of the topmost chunk -- not otherwise kept in a bin */\n mchunkptr top;\n\n /* The remainder from the most recent split of a small request */\n mchunkptr last_remainder;\n\n /* Normal bins packed as described above */\n mchunkptr bins[NBINS * 2 - 2];\n\n /* Bitmap of bins */\n unsigned int binmap[BINMAPSIZE];\n\n /* Linked list */\n struct malloc_state *next;\n\n /* Linked list for free arenas. Access to this field is serialized\n by free_list_lock in arena.c. */\n struct malloc_state *next_free;\n\n /* Number of threads attached to this arena. 0 if the arena is on\n the free list. Access to this field is serialized by\n free_list_lock in arena.c. */\n INTERNAL_SIZE_T attached_threads;\n\n /* Memory allocated from the system in this arena. */\n INTERNAL_SIZE_T system_mem;\n INTERNAL_SIZE_T max_system_mem;\n };\n \"\"\"\n\n _fields_ = [\n (\"mutex\", ctypes.c_int32),\n (\"flags\", ctypes.c_int32),\n (\"have_fastchunks\", ctypes.c_int32),\n (\"fastbinsY\", c_pvoid * NFASTBINS),\n (\"top\", c_pvoid),\n (\"last_remainder\", c_pvoid),\n (\"bins\", c_pvoid * (NBINS * 2 - 2)),\n (\"binmap\", ctypes.c_int32 * BINMAPSIZE),\n (\"next\", c_pvoid),\n (\"next_free\", c_pvoid),\n (\"attached_threads\", c_size_t),\n (\"system_mem\", c_size_t),\n (\"max_system_mem\", c_size_t),\n ]\n\n\nclass MallocState(CStruct2GDB):\n \"\"\"\n This class represents malloc_state struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 27):\n _c_struct = c_malloc_state_2_27\n else:\n _c_struct = c_malloc_state_2_26\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_heap_info(Structure):\n \"\"\"\n This class represents heap_info struct as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/arena.c#L53\n\n typedef struct _heap_info\n {\n mstate ar_ptr; /* Arena for this heap. */\n struct _heap_info *prev; /* Previous heap. */\n size_t size; /* Current size in bytes. */\n size_t mprotect_size; /* Size in bytes that has been mprotected\n PROT_READ|PROT_WRITE. */\n /* Make sure the following data is properly aligned, particularly\n that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of\n MALLOC_ALIGNMENT. */\n char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];\n } heap_info;\n \"\"\"\n\n _fields_ = [\n (\"ar_ptr\", c_pvoid),\n (\"prev\", c_pvoid),\n (\"size\", c_size_t),\n (\"mprotect_size\", c_size_t),\n (\"pad\", ctypes.c_uint8 * (-6 * SIZE_SZ & MALLOC_ALIGN_MASK)),\n ]\n\n\nclass HeapInfo(CStruct2GDB):\n \"\"\"\n This class represents heap_info struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n _c_struct = c_heap_info\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_malloc_chunk(Structure):\n \"\"\"\n This class represents malloc_chunk struct as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L1154\n\n struct malloc_chunk {\n\n INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */\n INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */\n\n struct malloc_chunk* fd; /* double links -- used only if free. */\n struct malloc_chunk* bk;\n\n /* Only used for large blocks: pointer to next larger size. */\n struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */\n struct malloc_chunk* bk_nextsize;\n };\n \"\"\"\n\n _fields_ = [\n (\"prev_size\", c_size_t),\n (\"size\", c_size_t),\n (\"fd\", c_pvoid),\n (\"bk\", c_pvoid),\n (\"fd_nextsize\", c_pvoid),\n (\"bk_nextsize\", c_pvoid),\n ]\n\n\nclass MallocChunk(CStruct2GDB):\n \"\"\"\n This class represents malloc_chunk struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n _c_struct = c_malloc_chunk\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_tcache_perthread_struct_2_29(Structure):\n \"\"\"\n This class represents tcache_perthread_struct for GLIBC < 2.30 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.29/malloc/malloc.c#L2916\n\n typedef struct tcache_perthread_struct\n {\n char counts[TCACHE_MAX_BINS];\n tcache_entry *entries[TCACHE_MAX_BINS];\n } tcache_perthread_struct;\n \"\"\"\n\n _fields_ = [\n (\"counts\", ctypes.c_char * TCACHE_MAX_BINS),\n (\"entries\", c_pvoid * TCACHE_MAX_BINS),\n ]\n\n\nclass c_tcache_perthread_struct_2_30(Structure):\n \"\"\"\n This class represents the tcache_perthread_struct for GLIBC >= 2.30 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L3025\n\n typedef struct tcache_perthread_struct\n {\n uint16_t counts[TCACHE_MAX_BINS];\n tcache_entry *entries[TCACHE_MAX_BINS];\n } tcache_perthread_struct;\n \"\"\"\n\n _fields_ = [\n (\"counts\", ctypes.c_uint16 * TCACHE_MAX_BINS),\n (\"entries\", c_pvoid * TCACHE_MAX_BINS),\n ]\n\n\nclass TcachePerthreadStruct(CStruct2GDB):\n \"\"\"\n This class represents tcache_perthread_struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 30):\n _c_struct = c_tcache_perthread_struct_2_30\n else:\n _c_struct = c_tcache_perthread_struct_2_29\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_tcache_entry_2_28(Structure):\n \"\"\"\n This class represents the tcache_entry struct for GLIBC < 2.29 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.28/malloc/malloc.c#L2888\n\n typedef struct tcache_entry\n {\n struct tcache_entry *next;\n } tcache_entry;\n \"\"\"\n\n _fields_ = [(\"next\", c_pvoid)]\n\n\nclass c_tcache_entry_2_29(Structure):\n \"\"\"\n This class represents the tcache_entry struct for GLIBC >= 2.29 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L3013\n\n typedef struct tcache_entry\n {\n struct tcache_entry *next;\n /* This field exists to detect double frees. */\n uintptr_t key;\n } tcache_entry;\n \"\"\"\n\n _fields_ = [(\"next\", c_pvoid), (\"key\", c_pvoid)]\n\n\nclass TcacheEntry(CStruct2GDB):\n \"\"\"\n This class represents the tcache_entry struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 29):\n _c_struct = c_tcache_entry_2_29\n else:\n _c_struct = c_tcache_entry_2_28\n sizeof = ctypes.sizeof(_c_struct)\n\n\nclass c_malloc_par_2_23(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC < 2.24 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.23/malloc/malloc.c#L1726\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n /*INTERNAL_SIZE_T sbrked_mem;*/\n /*INTERNAL_SIZE_T max_sbrked_mem;*/\n INTERNAL_SIZE_T max_mmapped_mem;\n INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"max_total_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n ]\n\n\nclass c_malloc_par_2_24(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC >= 2.24 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.25/malloc/malloc.c#L1690\n https://github.com/bminor/glibc/blob/glibc-2.24/malloc/malloc.c#L1719\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n INTERNAL_SIZE_T max_mmapped_mem;\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n ]\n\n\nclass c_malloc_par_2_26(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC >= 2.26 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.34/malloc/malloc.c#L1875\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n INTERNAL_SIZE_T max_mmapped_mem;\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n\n #if USE_TCACHE\n /* Maximum number of buckets to use. */\n size_t tcache_bins;\n size_t tcache_max_bytes;\n /* Maximum number of chunks in each bucket. */\n size_t tcache_count;\n /* Maximum number of chunks to remove from the unsorted list, which\n aren't used to prefill the cache. */\n size_t tcache_unsorted_limit;\n #endif\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n (\"tcache_bins\", c_size_t),\n (\"tcache_max_bytes\", c_size_t),\n (\"tcache_count\", ctypes.c_int32),\n (\"tcache_unsorted_limit\", c_size_t),\n ]\n\n\nclass c_malloc_par_2_35(Structure):\n \"\"\"\n This class represents the malloc_par struct for GLIBC >= 2.35 as a ctypes struct.\n\n https://github.com/bminor/glibc/blob/glibc-2.35/malloc/malloc.c#L1874\n\n struct malloc_par\n {\n /* Tunable parameters */\n unsigned long trim_threshold;\n INTERNAL_SIZE_T top_pad;\n INTERNAL_SIZE_T mmap_threshold;\n INTERNAL_SIZE_T arena_test;\n INTERNAL_SIZE_T arena_max;\n\n #if HAVE_TUNABLES\n /* Transparent Large Page support. */\n INTERNAL_SIZE_T thp_pagesize;\n /* A value different than 0 means to align mmap allocation to hp_pagesize\n add hp_flags on flags. */\n INTERNAL_SIZE_T hp_pagesize;\n int hp_flags;\n #endif\n\n /* Memory map support */\n int n_mmaps;\n int n_mmaps_max;\n int max_n_mmaps;\n /* the mmap_threshold is dynamic, until the user sets\n it manually, at which point we need to disable any\n dynamic behavior. */\n int no_dyn_threshold;\n\n /* Statistics */\n INTERNAL_SIZE_T mmapped_mem;\n INTERNAL_SIZE_T max_mmapped_mem;\n\n /* First address handed out by MORECORE/sbrk. */\n char *sbrk_base;\n\n #if USE_TCACHE\n /* Maximum number of buckets to use. */\n size_t tcache_bins;\n size_t tcache_max_bytes;\n /* Maximum number of chunks in each bucket. */\n size_t tcache_count;\n /* Maximum number of chunks to remove from the unsorted list, which\n aren't used to prefill the cache. */\n size_t tcache_unsorted_limit;\n #endif\n };\n \"\"\"\n\n _fields_ = [\n (\"trim_threshold\", c_size_t),\n (\"top_pad\", c_size_t),\n (\"mmap_threshold\", c_size_t),\n (\"arena_test\", c_size_t),\n (\"arena_max\", c_size_t),\n (\"thp_pagesize\", c_size_t),\n (\"hp_pagesize\", c_size_t),\n (\"hp_flags\", ctypes.c_int32),\n (\"n_mmaps\", ctypes.c_int32),\n (\"n_mmaps_max\", ctypes.c_int32),\n (\"max_n_mmaps\", ctypes.c_int32),\n (\"no_dyn_threshold\", ctypes.c_int32),\n (\"mmapped_mem\", c_size_t),\n (\"max_mmapped_mem\", c_size_t),\n (\"sbrk_base\", c_pvoid),\n (\"tcache_bins\", c_size_t),\n (\"tcache_max_bytes\", c_size_t),\n (\"tcache_count\", ctypes.c_int32),\n (\"tcache_unsorted_limit\", c_size_t),\n ]\n\n\nclass MallocPar(CStruct2GDB):\n \"\"\"\n This class represents the malloc_par struct with interface compatible with `gdb.Value`.\n \"\"\"\n\n if pwndbg.glibc.get_version() >= (2, 35):\n _c_struct = c_malloc_par_2_35\n elif pwndbg.glibc.get_version() >= (2, 26):\n _c_struct = c_malloc_par_2_26\n elif pwndbg.glibc.get_version() >= (2, 24):\n _c_struct = c_malloc_par_2_24\n else:\n _c_struct = c_malloc_par_2_23\n sizeof = ctypes.sizeof(_c_struct)\n\n\n# https://github.com/bminor/glibc/blob/glibc-2.37/malloc/malloc.c#L1911-L1926\n# static struct malloc_par mp_ =\n# {\n# .top_pad = DEFAULT_TOP_PAD,\n# .n_mmaps_max = DEFAULT_MMAP_MAX,\n# .mmap_threshold = DEFAULT_MMAP_THRESHOLD,\n# .trim_threshold = DEFAULT_TRIM_THRESHOLD,\n# #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))\n# .arena_test = NARENAS_FROM_NCORES (1)\n# #if USE_TCACHE\n# ,\n# .tcache_count = TCACHE_FILL_COUNT,\n# .tcache_bins = TCACHE_MAX_BINS,\n# .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),\n# .tcache_unsorted_limit = 0 /* No limit. */\n# #endif\n# };\nDEFAULT_MP_ = MallocPar._c_struct()\nDEFAULT_MP_.top_pad = DEFAULT_TOP_PAD\nDEFAULT_MP_.n_mmaps_max = DEFAULT_MMAP_MAX\nDEFAULT_MP_.mmap_threshold = DEFAULT_MMAP_THRESHOLD\nDEFAULT_MP_.trim_threshold = DEFAULT_TRIM_THRESHOLD\nDEFAULT_MP_.arena_test = 2 if pwndbg.gdblib.arch.ptrsize == 4 else 8\nif MallocPar._c_struct != c_malloc_par_2_23:\n # the only difference between 2.23 and the rest is the lack of tcache\n DEFAULT_MP_.tcache_count = TCACHE_FILL_COUNT\n DEFAULT_MP_.tcache_bins = TCACHE_MAX_BINS\n DEFAULT_MP_.tcache_max_bytes = (TCACHE_MAX_BINS - 1) * MALLOC_ALIGN + MINSIZE - SIZE_SZ\n", "path": "pwndbg/heap/structs.py" } ]
diff --git a/pwndbg/heap/structs.py b/pwndbg/heap/structs.py index 0e7913f48e4..72d40f5b40d 100644 --- a/pwndbg/heap/structs.py +++ b/pwndbg/heap/structs.py @@ -78,6 +78,12 @@ class c_size_t(SIZE_T): c_size_t: pwndbg.gdblib.typeinfo.size_t, } +# Use correct endian for the dictionary keys +if pwndbg.gdblib.arch.endian == "little": + C2GDB_MAPPING = {k.__ctype_le__: v for k, v in C2GDB_MAPPING.items()} +else: + C2GDB_MAPPING = {k.__ctype_be__: v for k, v in C2GDB_MAPPING.items()} + class FakeGDBField: """
zigpy__zha-device-handlers-112
Ikea group support bind method doesn't return status as expected https://github.com/dmulcahey/zha-device-handlers/blob/b5b383939944ff541ee38a94c7f4d6cf3edc611f/zhaquirks/ikea/__init__.py#L25 https://github.com/home-assistant/home-assistant/blob/a30c37017b7782473294d7999e85d7a369a0539a/homeassistant/components/zha/core/helpers.py#L56 reported by @Adminiuga we should return the status in [ ] so the bind helper in HA is happy.
[ { "content": "\"\"\"Ikea module.\"\"\"\nimport logging\nfrom zigpy.zcl.clusters.lightlink import LightLink\nfrom zigpy.quirks import CustomCluster\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LightLinkCluster(CustomCluster, LightLink):\n \"\"\"Ikea LightLink cluster.\"\"\"\n\n async def bind(self):\n \"\"\"Bind LightLink cluster to coordinator.\"\"\"\n application = self._endpoint.device.application\n try:\n coordinator = application.get_device(application.ieee)\n except KeyError:\n _LOGGER.warning(\n \"Aborting - unable to locate required coordinator device.\"\n )\n return\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n await coordinator.add_to_group(group_id)\n", "path": "zhaquirks/ikea/__init__.py" } ]
[ { "content": "\"\"\"Ikea module.\"\"\"\nimport logging\nfrom zigpy.zcl.clusters.lightlink import LightLink\nfrom zigpy.quirks import CustomCluster\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass LightLinkCluster(CustomCluster, LightLink):\n \"\"\"Ikea LightLink cluster.\"\"\"\n\n async def bind(self):\n \"\"\"Bind LightLink cluster to coordinator.\"\"\"\n application = self._endpoint.device.application\n try:\n coordinator = application.get_device(application.ieee)\n except KeyError:\n _LOGGER.warning(\n \"Aborting - unable to locate required coordinator device.\"\n )\n return\n group_list = await self.get_group_identifiers(0)\n group_record = group_list[2]\n group_id = group_record[0].group_id\n status = await coordinator.add_to_group(group_id)\n return [status]\n", "path": "zhaquirks/ikea/__init__.py" } ]
diff --git a/zhaquirks/ikea/__init__.py b/zhaquirks/ikea/__init__.py index 8742d38808..d1f643fbfb 100644 --- a/zhaquirks/ikea/__init__.py +++ b/zhaquirks/ikea/__init__.py @@ -22,4 +22,5 @@ async def bind(self): group_list = await self.get_group_identifiers(0) group_record = group_list[2] group_id = group_record[0].group_id - await coordinator.add_to_group(group_id) + status = await coordinator.add_to_group(group_id) + return [status]
scikit-hep__awkward-2266
`NotImplementedError` from `TypeTracerArray.nan_to_num` ### Description of new feature I'm trying to use `dask-awkward` and `vector` to do vector addition. It works fine for Cartesian coordinates but not spherical: ```python >>> import awkward as ak, dask_awkward as dak, vector >>> vector.register_awkward() >>> a = ak.Array([1.0]) >>> da = dak.from_awkward(a, 1) >>> dv1 = dak.with_name(dak.zip({'x': da, 'y': da, 'z': da}), 'Vector3D') >>> (dv1 + dv1).compute() <VectorArray3D [{x: 2, y: 2, z: 2}] type='1 * Vector3D[x: float64, y: float...'> >>> dv2 = dak.with_name(dak.zip({'rho': da, 'phi': da, 'theta': da}), 'Vector3D') >>> (dv2 + dv2).compute() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/user/miniconda3/envs/func_adl_uproot_rc/lib/python3.10/site-packages/numpy/lib/mixins.py", line 21, in func return ufunc(self, other) File "/home/user/iris-hep/src/dask-awkward/src/dask_awkward/lib/core.py", line 1027, in __array_ufunc__ new_meta = ufunc(*inputs_meta) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/highlevel.py", line 1373, in __array_ufunc__ return ak._connect.numpy.array_ufunc(ufunc, method, inputs, kwargs) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_connect/numpy.py", line 291, in array_ufunc out = ak._broadcasting.broadcast_and_apply( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_broadcasting.py", line 1063, in broadcast_and_apply out = apply_step( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_broadcasting.py", line 1042, in apply_step return continuation() File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_broadcasting.py", line 759, in continuation outcontent = apply_step( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_broadcasting.py", line 1028, in apply_step result = action( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_connect/numpy.py", line 193, in action return _array_ufunc_adjust(custom, inputs, kwargs, behavior) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_connect/numpy.py", line 130, in _array_ufunc_adjust out = custom(*args, **kwargs) File "/home/user/miniconda3/envs/func_adl_uproot_rc/lib/python3.10/site-packages/vector/backends/awkward.py", line 1514, in <lambda> behavior[numpy.add, left, right] = lambda v1, v2: v1.add(v2) File "/home/user/miniconda3/envs/func_adl_uproot_rc/lib/python3.10/site-packages/vector/_methods.py", line 2068, in add return module.add.dispatch(self, other) File "/home/user/miniconda3/envs/func_adl_uproot_rc/lib/python3.10/site-packages/vector/_compute/spatial/add.py", line 593, in dispatch function( File "/home/user/miniconda3/envs/func_adl_uproot_rc/lib/python3.10/site-packages/vector/_compute/spatial/add.py", line 310, in rhophi_theta_rhophi_theta z1 = z.rhophi_theta(lib, rho1, phi1, theta1) File "/home/user/miniconda3/envs/func_adl_uproot_rc/lib/python3.10/site-packages/vector/_compute/spatial/z.py", line 52, in rhophi_theta return lib.nan_to_num(rho / lib.tan(theta), nan=0.0, posinf=inf, neginf=-inf) File "<__array_function__ internals>", line 200, in nan_to_num File "/home/user/iris-hep/src/awkward-1.0/src/awkward/highlevel.py", line 1392, in __array_function__ return ak._connect.numpy.array_function( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_connect/numpy.py", line 75, in array_function return function(*args, **kwargs) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_connect/numpy.py", line 96, in ensure_valid_args return function(*args, **kwargs) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/operations/ak_nan_to_num.py", line 136, in _nep_18_impl return nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/operations/ak_nan_to_num.py", line 51, in nan_to_num return _impl(array, copy, nan, posinf, neginf, highlevel, behavior) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/operations/ak_nan_to_num.py", line 95, in _impl out = ak._do.recursively_apply(layout, action, behavior) File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_do.py", line 34, in recursively_apply return layout._recursively_apply( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/contents/numpyarray.py", line 1301, in _recursively_apply result = action( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/operations/ak_nan_to_num.py", line 85, in action nplike.nan_to_num( File "/home/user/iris-hep/src/awkward-1.0/src/awkward/_nplikes/typetracer.py", line 1190, in nan_to_num raise ak._errors.wrap_error(NotImplementedError) NotImplementedError: See if this has been reported at https://github.com/scikit-hep/awkward-1.0/issues ```
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\nfrom __future__ import annotations\n\nfrom numbers import Number\n\nimport numpy\n\nimport awkward as ak\nfrom awkward._errors import wrap_error\nfrom awkward._nplikes.numpylike import ArrayLike, IndexType, NumpyLike, NumpyMetadata\nfrom awkward._nplikes.shape import ShapeItem, unknown_length\nfrom awkward._util import NDArrayOperatorsMixin, is_non_string_like_sequence\nfrom awkward.typing import (\n Any,\n Final,\n Literal,\n Self,\n SupportsIndex,\n TypeVar,\n)\n\nnp = NumpyMetadata.instance()\n\n\ndef is_unknown_length(array: Any) -> bool:\n return array is unknown_length\n\n\ndef is_unknown_scalar(array: Any) -> bool:\n return isinstance(array, TypeTracerArray) and array.ndim == 0\n\n\ndef is_unknown_integer(array: Any) -> bool:\n return is_unknown_scalar(array) and np.issubdtype(array.dtype, np.integer)\n\n\ndef is_unknown_array(array: Any) -> bool:\n return isinstance(array, TypeTracerArray) and array.ndim > 0\n\n\nT = TypeVar(\"T\")\nS = TypeVar(\"S\")\n\n\ndef ensure_known_scalar(value: T, default: S) -> T | S:\n assert not is_unknown_scalar(default)\n return default if is_unknown_scalar(value) else value\n\n\ndef _emptyarray(x):\n if is_unknown_scalar(x):\n return numpy.empty(0, x._dtype)\n elif hasattr(x, \"dtype\"):\n return numpy.empty(0, x.dtype)\n else:\n return numpy.empty(0, numpy.array(x).dtype)\n\n\nclass MaybeNone:\n def __init__(self, content):\n self._content = content\n\n @property\n def content(self):\n return self._content\n\n def __eq__(self, other):\n if isinstance(other, MaybeNone):\n return self._content == other._content\n else:\n return False\n\n def __repr__(self):\n return f\"MaybeNone({self._content!r})\"\n\n def __str__(self):\n return f\"?{self._content}\"\n\n\nclass OneOf:\n def __init__(self, contents):\n self._contents = contents\n\n @property\n def contents(self):\n return self._contents\n\n def __eq__(self, other):\n if isinstance(other, OneOf):\n return set(self._contents) == set(other._contents)\n else:\n return False\n\n def __repr__(self):\n return f\"OneOf({self._contents!r})\"\n\n def __str__(self):\n return (\n f\"oneof-{'-'.join(str(x).replace('unknown-', '') for x in self._contents)}\"\n )\n\n\nclass TypeTracerReport:\n def __init__(self):\n # maybe the order will be useful information\n self._shape_touched_set = set()\n self._shape_touched = []\n self._data_touched_set = set()\n self._data_touched = []\n\n def __repr__(self):\n return f\"<TypeTracerReport with {len(self._shape_touched)} shape_touched, {len(self._data_touched)} data_touched>\"\n\n @property\n def shape_touched(self):\n return self._shape_touched\n\n @property\n def data_touched(self):\n return self._data_touched\n\n def touch_shape(self, label):\n if label not in self._shape_touched_set:\n self._shape_touched_set.add(label)\n self._shape_touched.append(label)\n\n def touch_data(self, label):\n if label not in self._data_touched_set:\n # touching data implies that the shape will be touched as well\n # implemented here so that the codebase doesn't need to be filled\n # with calls to both methods everywhere\n self._shape_touched_set.add(label)\n self._shape_touched.append(label)\n self._data_touched_set.add(label)\n self._data_touched.append(label)\n\n\ndef _attach_report(layout, form, report: TypeTracerReport):\n if isinstance(layout, (ak.contents.BitMaskedArray, ak.contents.ByteMaskedArray)):\n assert isinstance(form, (ak.forms.BitMaskedForm, ak.forms.ByteMaskedForm))\n layout.mask.data.form_key = form.form_key\n layout.mask.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.EmptyArray):\n assert isinstance(form, ak.forms.EmptyForm)\n\n elif isinstance(layout, (ak.contents.IndexedArray, ak.contents.IndexedOptionArray)):\n assert isinstance(form, (ak.forms.IndexedForm, ak.forms.IndexedOptionForm))\n layout.index.data.form_key = form.form_key\n layout.index.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.ListArray):\n assert isinstance(form, ak.forms.ListForm)\n layout.starts.data.form_key = form.form_key\n layout.starts.data.report = report\n layout.stops.data.form_key = form.form_key\n layout.stops.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.ListOffsetArray):\n assert isinstance(form, ak.forms.ListOffsetForm)\n layout.offsets.data.form_key = form.form_key\n layout.offsets.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.NumpyArray):\n assert isinstance(form, ak.forms.NumpyForm)\n layout.data.form_key = form.form_key\n layout.data.report = report\n\n elif isinstance(layout, ak.contents.RecordArray):\n assert isinstance(form, ak.forms.RecordForm)\n for x, y in zip(layout.contents, form.contents):\n _attach_report(x, y, report)\n\n elif isinstance(layout, (ak.contents.RegularArray, ak.contents.UnmaskedArray)):\n assert isinstance(form, (ak.forms.RegularForm, ak.forms.UnmaskedForm))\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.UnionArray):\n assert isinstance(form, ak.forms.UnionForm)\n layout.tags.data.form_key = form.form_key\n layout.tags.data.report = report\n layout.index.data.form_key = form.form_key\n layout.index.data.report = report\n for x, y in zip(layout.contents, form.contents):\n _attach_report(x, y, report)\n\n else:\n raise ak._errors.wrap_error(\n AssertionError(f\"unrecognized layout type {type(layout)}\")\n )\n\n\ndef typetracer_with_report(form, forget_length=True):\n layout = form.length_zero_array(highlevel=False).to_typetracer(\n forget_length=forget_length\n )\n report = TypeTracerReport()\n _attach_report(layout, form, report)\n return layout, report\n\n\nclass TypeTracerArray(NDArrayOperatorsMixin, ArrayLike):\n _dtype: numpy.dtype\n _shape: tuple[ShapeItem, ...]\n\n def __new__(cls, *args, **kwargs):\n raise wrap_error(\n TypeError(\n \"internal_error: the `TypeTracer` nplike's `TypeTracerArray` object should never be directly instantiated\"\n )\n )\n\n def __reduce__(self):\n # Fix pickling, as we ban `__new__`\n return object.__new__, (type(self),), vars(self)\n\n @classmethod\n def _new(\n cls,\n dtype: np.dtype,\n shape: tuple[ShapeItem, ...],\n form_key: str | None = None,\n report: TypeTracerReport | None = None,\n ):\n self = super().__new__(cls)\n self.form_key = form_key\n self.report = report\n\n if not isinstance(shape, tuple):\n raise wrap_error(TypeError(\"typetracer shape must be a tuple\"))\n self._shape = shape\n self._dtype = np.dtype(dtype)\n\n return self\n\n def __repr__(self):\n dtype = repr(self._dtype)\n if self.shape is None:\n shape = \"\"\n else:\n shape = \", shape=\" + repr(self._shape)\n return f\"TypeTracerArray({dtype}{shape})\"\n\n def __str__(self):\n if self.ndim == 0:\n return \"##\"\n\n else:\n return repr(self)\n\n @property\n def T(self) -> Self:\n return TypeTracerArray._new(\n self.dtype, self._shape[::-1], self.form_key, self.report\n )\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def size(self) -> ShapeItem:\n size = 1\n for item in self._shape:\n if ak._util.is_integer(item):\n size *= item\n else:\n return unknown_length\n return size\n\n @property\n def shape(self) -> tuple[ShapeItem, ...]:\n self.touch_shape()\n return self._shape\n\n @property\n def form_key(self):\n return self._form_key\n\n @form_key.setter\n def form_key(self, value):\n if value is not None and not isinstance(value, str):\n raise ak._errors.wrap_error(TypeError(\"form_key must be None or a string\"))\n self._form_key = value\n\n @property\n def report(self):\n return self._report\n\n @report.setter\n def report(self, value):\n if value is not None and not isinstance(value, TypeTracerReport):\n raise ak._errors.wrap_error(\n TypeError(\"report must be None or a TypeTracerReport\")\n )\n self._report = value\n\n def touch_shape(self):\n if self._report is not None:\n self._report.touch_shape(self._form_key)\n\n def touch_data(self):\n if self._report is not None:\n self._report.touch_data(self._form_key)\n\n @property\n def strides(self):\n self.touch_shape()\n out = (self._dtype.itemsize,)\n for x in self._shape[:0:-1]:\n out = (x * out[0], *out)\n return out\n\n @property\n def nplike(self) -> TypeTracer:\n return TypeTracer.instance()\n\n @property\n def ndim(self) -> int:\n self.touch_shape()\n return len(self._shape)\n\n def view(self, dtype: np.dtype) -> Self:\n if self.itemsize != np.dtype(dtype).itemsize and self._shape[-1] is not None:\n last = int(\n round(self._shape[-1] * self.itemsize / np.dtype(dtype).itemsize)\n )\n shape = self._shape[:-1] + (last,)\n else:\n shape = self._shape\n dtype = np.dtype(dtype)\n return self._new(\n dtype, shape=shape, form_key=self._form_key, report=self._report\n )\n\n def forget_length(self) -> Self:\n return self._new(\n self._dtype,\n (unknown_length,) + self._shape[1:],\n self._form_key,\n self._report,\n )\n\n def __iter__(self):\n raise ak._errors.wrap_error(\n AssertionError(\n \"bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array\"\n )\n )\n\n def __array__(self, dtype=None):\n raise ak._errors.wrap_error(\n AssertionError(\n \"bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array\"\n )\n )\n\n @property\n def itemsize(self):\n return self._dtype.itemsize\n\n class _CTypes:\n data = 0\n\n @property\n def ctypes(self):\n return self._CTypes\n\n def __len__(self):\n raise ak._errors.wrap_error(\n AssertionError(\n \"bug in Awkward Array: attempt to get length of a TypeTracerArray\"\n )\n )\n\n def __getitem__(\n self,\n key: SupportsIndex\n | slice\n | Ellipsis\n | tuple[SupportsIndex | slice | Ellipsis | ArrayLike, ...]\n | ArrayLike,\n ) -> Self:\n if not isinstance(key, tuple):\n key = (key,)\n\n # 1. Validate slice items\n has_seen_ellipsis = 0\n n_basic_non_ellipsis = 0\n n_advanced = 0\n for item in key:\n # Basic indexing\n if isinstance(item, (slice, int)) or is_unknown_integer(item):\n n_basic_non_ellipsis += 1\n # Advanced indexing\n elif isinstance(item, TypeTracerArray) and (\n np.issubdtype(item.dtype, np.integer)\n or np.issubdtype(item.dtype, np.bool_)\n ):\n n_advanced += 1\n # Basic ellipsis\n elif item is Ellipsis:\n if not has_seen_ellipsis:\n has_seen_ellipsis = True\n else:\n raise wrap_error(\n NotImplementedError(\n \"only one ellipsis value permitted for advanced index\"\n )\n )\n # Basic newaxis\n elif item is np.newaxis:\n pass\n else:\n raise wrap_error(\n NotImplementedError(\n \"only integer, unknown scalar, slice, ellipsis, or array indices are permitted\"\n )\n )\n\n # 2. Normalise Ellipsis and boolean arrays\n key_parts = []\n for item in key:\n if item is Ellipsis:\n n_missing_dims = self.ndim - n_advanced - n_basic_non_ellipsis\n key_parts.extend((slice(None),) * n_missing_dims)\n elif is_unknown_array(item) and np.issubdtype(item, np.bool_):\n key_parts.append(self.nplike.nonzero(item)[0])\n else:\n key_parts.append(item)\n key = tuple(key_parts)\n\n # 3. Apply Indexing\n advanced_is_at_front = False\n previous_item_is_basic = True\n advanced_shapes = []\n adjacent_advanced_shape = []\n result_shape_parts = []\n iter_shape = iter(self.shape)\n for item in key:\n # New axes don't reference existing dimensions\n if item is np.newaxis:\n result_shape_parts.append((1,))\n previous_item_is_basic = True\n # Otherwise, consume the dimension\n else:\n dimension_length = next(iter_shape)\n # Advanced index\n if n_advanced and (\n isinstance(item, int)\n or is_unknown_integer(item)\n or is_unknown_array(item)\n ):\n if is_unknown_scalar(item):\n item = self.nplike.promote_scalar(item)\n\n # If this is the first advanced index, insert the location\n if not advanced_shapes:\n result_shape_parts.append(adjacent_advanced_shape)\n # If a previous item was basic and we have an advanced shape\n # we have a split index\n elif previous_item_is_basic:\n advanced_is_at_front = True\n\n advanced_shapes.append(item.shape)\n previous_item_is_basic = False\n # Slice\n elif isinstance(item, slice):\n (\n start,\n stop,\n step,\n slice_length,\n ) = self.nplike.derive_slice_for_length(item, dimension_length)\n result_shape_parts.append((slice_length,))\n previous_item_is_basic = True\n # Integer\n elif isinstance(item, int) or is_unknown_integer(item):\n item = self.nplike.promote_scalar(item)\n\n if is_unknown_length(dimension_length) or is_unknown_integer(item):\n continue\n\n if not 0 <= item < dimension_length:\n raise wrap_error(\n NotImplementedError(\"integer index out of bounds\")\n )\n\n advanced_shape = self.nplike.broadcast_shapes(*advanced_shapes)\n if advanced_is_at_front:\n result_shape_parts.insert(0, advanced_shape)\n else:\n adjacent_advanced_shape[:] = advanced_shape\n\n broadcast_shape = tuple(i for p in result_shape_parts for i in p)\n result_shape = broadcast_shape + tuple(iter_shape)\n\n return self._new(\n self._dtype,\n result_shape,\n self._form_key,\n self._report,\n )\n\n def __setitem__(\n self,\n key: SupportsIndex\n | slice\n | Ellipsis\n | tuple[SupportsIndex | slice | Ellipsis | ArrayLike, ...]\n | ArrayLike,\n value: int | float | bool | complex | ArrayLike,\n ):\n existing_value = self.__getitem__(key)\n if isinstance(value, TypeTracerArray) and value.ndim > existing_value.ndim:\n raise wrap_error(ValueError(\"cannot assign shape larger than destination\"))\n\n def copy(self):\n self.touch_data()\n return self\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n # raise ak._errors.wrap_error(\n # RuntimeError(\n # \"TypeTracerArray objects should not be used directly with ufuncs\"\n # )\n # )\n kwargs.pop(\"out\", None)\n\n if method != \"__call__\" or len(inputs) == 0:\n raise ak._errors.wrap_error(NotImplementedError)\n\n if len(kwargs) > 0:\n raise ak._errors.wrap_error(\n ValueError(\"TypeTracerArray does not support kwargs for ufuncs\")\n )\n return self.nplike._apply_ufunc(ufunc, *inputs)\n\n def __bool__(self) -> bool:\n raise ak._errors.wrap_error(RuntimeError(\"cannot realise an unknown value\"))\n\n def __int__(self) -> int:\n raise ak._errors.wrap_error(RuntimeError(\"cannot realise an unknown value\"))\n\n def __index__(self) -> int:\n raise ak._errors.wrap_error(RuntimeError(\"cannot realise an unknown value\"))\n\n\ndef _scalar_type_of(obj) -> numpy.dtype:\n if is_unknown_scalar(obj):\n return obj.dtype\n else:\n return numpy.obj2sctype(obj)\n\n\ndef try_touch_data(array):\n if isinstance(array, TypeTracerArray):\n array.touch_data()\n\n\ndef try_touch_shape(array):\n if isinstance(array, TypeTracerArray):\n array.touch_shape()\n\n\nclass TypeTracer(NumpyLike):\n known_data: Final = False\n is_eager: Final = True\n\n def _apply_ufunc(self, ufunc, *inputs):\n for x in inputs:\n try_touch_data(x)\n\n broadcasted = self.broadcast_arrays(*inputs)\n placeholders = [numpy.empty(0, x.dtype) for x in broadcasted]\n\n result = ufunc(*placeholders)\n if isinstance(result, numpy.ndarray):\n return TypeTracerArray._new(result.dtype, shape=broadcasted[0].shape)\n elif isinstance(result, tuple):\n return (\n TypeTracerArray._new(x.dtype, shape=b.shape)\n for x, b in zip(result, broadcasted)\n )\n else:\n raise wrap_error(TypeError)\n\n def to_rectilinear(self, array, *args, **kwargs):\n try_touch_shape(array)\n raise ak._errors.wrap_error(NotImplementedError)\n\n @property\n def ma(self):\n raise ak._errors.wrap_error(NotImplementedError)\n\n @property\n def char(self):\n raise ak._errors.wrap_error(NotImplementedError)\n\n @property\n def ndarray(self):\n return TypeTracerArray\n\n ############################ array creation\n\n def asarray(\n self,\n obj,\n *,\n dtype: numpy.dtype | None = None,\n copy: bool | None = None,\n ) -> TypeTracerArray:\n try_touch_data(obj)\n\n if isinstance(obj, ak.index.Index):\n obj = obj.data\n\n if isinstance(obj, TypeTracerArray):\n form_key = obj._form_key\n report = obj._report\n\n if dtype is None:\n return obj\n elif copy is False and dtype != obj.dtype:\n raise ak._errors.wrap_error(\n ValueError(\n \"asarray was called with copy=False for an array of a different dtype\"\n )\n )\n else:\n return TypeTracerArray._new(\n dtype, obj.shape, form_key=form_key, report=report\n )\n else:\n # Convert NumPy generics to scalars\n if isinstance(obj, np.generic):\n obj = numpy.asarray(obj)\n\n # Support array-like objects\n if hasattr(obj, \"shape\") and hasattr(obj, \"dtype\"):\n if obj.dtype.kind == \"S\":\n raise ak._errors.wrap_error(\n TypeError(\"TypeTracerArray cannot be created from strings\")\n )\n elif copy is False and dtype != obj.dtype:\n raise ak._errors.wrap_error(\n ValueError(\n \"asarray was called with copy=False for an array of a different dtype\"\n )\n )\n else:\n return TypeTracerArray._new(obj.dtype, obj.shape)\n # Python objects\n elif isinstance(obj, (Number, bool)):\n as_array = numpy.asarray(obj)\n return TypeTracerArray._new(as_array.dtype, ())\n\n elif is_non_string_like_sequence(obj):\n assert not any(is_non_string_like_sequence(x) for x in obj)\n shape = (len(obj),)\n result_type = numpy.result_type(*obj) # TODO: result_type\n return TypeTracerArray._new(result_type, shape)\n else:\n raise wrap_error(TypeError)\n\n def ascontiguousarray(\n self, x: ArrayLike, *, dtype: numpy.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(dtype or x.dtype, shape=x.shape)\n\n def frombuffer(\n self, buffer, *, dtype: np.dtype | None = None, count: int = -1\n ) -> TypeTracerArray:\n for x in (buffer, count):\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def zeros(\n self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n return TypeTracerArray._new(dtype, shape)\n\n def ones(\n self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n return TypeTracerArray._new(dtype, shape)\n\n def empty(\n self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n return TypeTracerArray._new(dtype, shape)\n\n def full(\n self,\n shape: ShapeItem | tuple[ShapeItem, ...],\n fill_value,\n *,\n dtype: np.dtype | None = None,\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n dtype = _scalar_type_of(fill_value) if dtype is None else dtype\n return TypeTracerArray._new(dtype, shape)\n\n def zeros_like(\n self, x: ArrayLike, *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_shape(x)\n if is_unknown_scalar(x):\n return TypeTracerArray._new(dtype or x.dtype, shape=())\n else:\n return TypeTracerArray._new(dtype or x.dtype, shape=x.shape)\n\n def ones_like(\n self, x: ArrayLike, *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_shape(x)\n return self.zeros_like(x, dtype=dtype)\n\n def full_like(\n self, x: ArrayLike, fill_value, *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_shape(x)\n return self.zeros_like(x, dtype=dtype)\n\n def arange(\n self,\n start: float | int,\n stop: float | int | None = None,\n step: float | int = 1,\n *,\n dtype: np.dtype | None = None,\n ) -> TypeTracerArray:\n try_touch_data(start)\n try_touch_data(stop)\n try_touch_data(step)\n if stop is None:\n start, stop = 0, start\n\n if (\n ak._util.is_integer(start)\n and ak._util.is_integer(stop)\n and ak._util.is_integer(step)\n ):\n length = max(0, (stop - start + (step - (1 if step > 0 else -1))) // step)\n else:\n length = unknown_length\n\n default_int_type = np.int64 if (ak._util.win or ak._util.bits32) else np.int32\n return TypeTracerArray._new(dtype or default_int_type, (length,))\n\n def meshgrid(\n self, *arrays: ArrayLike, indexing: Literal[\"xy\", \"ij\"] = \"xy\"\n ) -> list[TypeTracerArray]:\n for x in arrays:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n ############################ testing\n\n def array_equal(\n self, x1: ArrayLike, x2: ArrayLike, *, equal_nan: bool = False\n ) -> TypeTracerArray:\n try_touch_data(x1)\n try_touch_data(x2)\n return TypeTracerArray._new(np.bool_, shape=())\n\n def searchsorted(\n self,\n x: ArrayLike,\n values: ArrayLike,\n *,\n side: Literal[\"left\", \"right\"] = \"left\",\n sorter: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n try_touch_data(values)\n try_touch_data(sorter)\n if (\n not (\n is_unknown_length(x.size)\n or sorter is None\n or is_unknown_length(sorter.size)\n )\n and x.size != sorter.size\n ):\n raise wrap_error(ValueError(\"x.size should equal sorter.size\"))\n\n return TypeTracerArray._new(x.dtype, (values.size,))\n\n ############################ manipulation\n\n def promote_scalar(self, obj) -> TypeTracerArray:\n if is_unknown_scalar(obj):\n return obj\n elif isinstance(obj, (Number, bool)):\n # TODO: statically define these types for all nplikes\n as_array = numpy.asarray(obj)\n return TypeTracerArray._new(as_array.dtype, ())\n else:\n raise wrap_error(TypeError(f\"expected scalar type, received {obj}\"))\n\n def shape_item_as_index(self, x1: ShapeItem) -> IndexType:\n if x1 is unknown_length:\n return TypeTracerArray._new(np.int64, shape=())\n elif isinstance(x1, int):\n return x1\n else:\n raise wrap_error(TypeError(f\"expected None or int type, received {x1}\"))\n\n def index_as_shape_item(self, x1: IndexType) -> ShapeItem:\n if is_unknown_scalar(x1) and np.issubdtype(x1.dtype, np.integer):\n return unknown_length\n else:\n return int(x1)\n\n def regularize_index_for_length(\n self, index: IndexType, length: ShapeItem\n ) -> IndexType:\n \"\"\"\n Args:\n index: index value\n length: length of array\n\n Returns regularized index that is guaranteed to be in-bounds.\n \"\"\"\n # Unknown indices are already regularized\n if is_unknown_scalar(index):\n return index\n\n # Without a known length the result must be unknown, as we cannot regularize the index\n length_scalar = self.shape_item_as_index(length)\n if length is unknown_length:\n return length_scalar\n\n # We have known length and index\n if index < 0:\n index = index + length\n\n if 0 <= index < length:\n return index\n else:\n raise wrap_error(\n IndexError(f\"index value out of bounds (0, {length}): {index}\")\n )\n\n def derive_slice_for_length(\n self, slice_: slice, length: ShapeItem\n ) -> tuple[IndexType, IndexType, IndexType, ShapeItem]:\n \"\"\"\n Args:\n slice_: normalized slice object\n length: length of layout\n\n Return a tuple of (start, stop, step, length) indices into a layout, suitable for\n `_getitem_range` (if step == 1). Normalize lengths to fit length of array,\n and for arrays with unknown lengths, these offsets become none.\n \"\"\"\n start = slice_.start\n stop = slice_.stop\n step = slice_.step\n\n # Unknown lengths mean that the slice index is unknown\n length_scalar = self.shape_item_as_index(length)\n if length is unknown_length:\n return length_scalar, length_scalar, step, length\n else:\n # Normalise `None` values\n if step is None:\n step = 1\n\n if start is None:\n # `step` is unknown → `start` is unknown\n if is_unknown_scalar(step):\n start = step\n elif step < 0:\n start = length_scalar - 1\n else:\n start = 0\n # Normalise negative integers\n elif not is_unknown_scalar(start):\n if start < 0:\n start = start + length_scalar\n # Clamp values into length bounds\n if is_unknown_scalar(length_scalar):\n start = length_scalar\n else:\n start = min(max(start, 0), length_scalar)\n\n if stop is None:\n # `step` is unknown → `stop` is unknown\n if is_unknown_scalar(step):\n stop = step\n elif step < 0:\n stop = -1\n else:\n stop = length_scalar\n # Normalise negative integers\n elif not is_unknown_scalar(stop):\n if stop < 0:\n stop = stop + length_scalar\n # Clamp values into length bounds\n if is_unknown_scalar(length_scalar):\n stop = length_scalar\n else:\n stop = min(max(stop, 0), length_scalar)\n\n # Compute the length of the slice for downstream use\n slice_length, remainder = divmod((stop - start), step)\n if not is_unknown_scalar(slice_length):\n # Take ceiling of division\n if remainder != 0:\n slice_length += 1\n\n slice_length = max(0, slice_length)\n\n return start, stop, step, self.index_as_shape_item(slice_length)\n\n def broadcast_shapes(self, *shapes: tuple[ShapeItem, ...]) -> tuple[ShapeItem, ...]:\n ndim = max([len(s) for s in shapes], default=0)\n result: list[ShapeItem] = [1] * ndim\n\n for shape in shapes:\n # Right broadcasting\n missing_dim = ndim - len(shape)\n if missing_dim > 0:\n head: tuple[int, ...] = (1,) * missing_dim\n shape = head + shape\n\n # Fail if we absolutely know the shapes aren't compatible\n for i, item in enumerate(shape):\n # Item is unknown, take it\n if is_unknown_length(item):\n result[i] = item\n # Existing item is unknown, keep it\n elif is_unknown_length(result[i]):\n continue\n # Items match, continue\n elif result[i] == item:\n continue\n # Item is broadcastable, take existing\n elif item == 1:\n continue\n # Existing is broadcastable, take it\n elif result[i] == 1:\n result[i] = item\n else:\n raise wrap_error(\n ValueError(\n \"known component of shape does not match broadcast result\"\n )\n )\n return tuple(result)\n\n def broadcast_arrays(self, *arrays: ArrayLike) -> list[TypeTracerArray]:\n for x in arrays:\n try_touch_data(x)\n\n if len(arrays) == 0:\n return []\n\n all_arrays = []\n for x in arrays:\n if not hasattr(x, \"shape\"):\n x = self.promote_scalar(x)\n all_arrays.append(x)\n\n shapes = [x.shape for x in all_arrays]\n shape = self.broadcast_shapes(*shapes)\n\n return [TypeTracerArray._new(x.dtype, shape=shape) for x in all_arrays]\n\n def broadcast_to(\n self, x: ArrayLike, shape: tuple[ShapeItem, ...]\n ) -> TypeTracerArray:\n raise ak._errors.wrap_error(NotImplementedError)\n\n def reshape(\n self, x: ArrayLike, shape: tuple[ShapeItem, ...], *, copy: bool | None = None\n ) -> TypeTracerArray:\n x.touch_shape()\n\n size = x.size\n\n # Validate new shape to ensure that it only contains at-most one placeholder\n n_placeholders = 0\n new_size = 1\n for item in shape:\n if item is unknown_length:\n # Size is no longer defined\n new_size = unknown_length\n elif not ak._util.is_integer(item):\n raise wrap_error(\n ValueError(\n \"shape must be comprised of positive integers, -1 (for placeholders), or unknown lengths\"\n )\n )\n elif item == -1:\n if n_placeholders == 1:\n raise wrap_error(\n ValueError(\"only one placeholder dimension permitted per shape\")\n )\n n_placeholders += 1\n elif item == 0:\n raise wrap_error(ValueError(\"shape items cannot be zero\"))\n else:\n new_size *= item\n\n # Populate placeholders\n new_shape = [*shape]\n for i, item in enumerate(shape):\n if item == -1:\n new_shape[i] = size // new_size\n break\n\n return TypeTracerArray._new(x.dtype, tuple(new_shape), x.form_key, x.report)\n\n def cumsum(\n self,\n x: ArrayLike,\n *,\n axis: int | None = None,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def nonzero(self, x: ArrayLike) -> tuple[TypeTracerArray, ...]:\n # array\n try_touch_data(x)\n return (TypeTracerArray._new(np.int64, (unknown_length,)),) * len(x.shape)\n\n def where(\n self, condition: ArrayLike, x1: ArrayLike, x2: ArrayLike\n ) -> TypeTracerArray:\n condition, x1, x2 = self.broadcast_arrays(condition, x1, x2)\n result_dtype = numpy.result_type(x1, x2)\n return TypeTracerArray._new(result_dtype, shape=condition.shape)\n\n def unique_values(self, x: ArrayLike) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(x.dtype, shape=(unknown_length,))\n\n def concat(self, arrays, *, axis: int | None = 0) -> TypeTracerArray:\n if axis is None:\n assert all(x.ndim == 1 for x in arrays)\n elif axis != 0:\n raise ak._errors.wrap_error(NotImplementedError(\"concat with axis != 0\"))\n for x in arrays:\n try_touch_data(x)\n\n inner_shape = None\n emptyarrays = []\n for x in arrays:\n if inner_shape is None:\n inner_shape = x.shape[1:]\n elif inner_shape != x.shape[1:]:\n raise ak._errors.wrap_error(\n ValueError(\n \"inner dimensions don't match in concatenate: {} vs {}\".format(\n inner_shape, x.shape[1:]\n )\n )\n )\n emptyarrays.append(_emptyarray(x))\n\n if inner_shape is None:\n raise ak._errors.wrap_error(\n ValueError(\"need at least one array to concatenate\")\n )\n\n return TypeTracerArray._new(\n numpy.concatenate(emptyarrays).dtype, (unknown_length, *inner_shape)\n )\n\n def repeat(\n self,\n x: ArrayLike,\n repeats: ArrayLike | int,\n *,\n axis: int | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n try_touch_data(repeats)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def tile(self, x: ArrayLike, reps: int) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def stack(\n self,\n arrays: list[ArrayLike] | tuple[ArrayLike, ...],\n *,\n axis: int = 0,\n ) -> TypeTracerArray:\n for x in arrays:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def packbits(\n self,\n x: ArrayLike,\n *,\n axis: int | None = None,\n bitorder: Literal[\"big\", \"little\"] = \"big\",\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def unpackbits(\n self,\n x: ArrayLike,\n *,\n axis: int | None = None,\n count: int | None = None,\n bitorder: Literal[\"big\", \"little\"] = \"big\",\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n ############################ ufuncs\n\n def add(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.add, x1, x2)\n\n def logical_and(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.logical_and, x1, x2)\n\n def logical_or(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.logical_or, x1, x2)\n\n def logical_not(\n self, x: ArrayLike, maybe_out: ArrayLike | None = None\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.logical_not, x)\n\n def sqrt(self, x: ArrayLike, maybe_out: ArrayLike | None = None) -> TypeTracerArray:\n return self._apply_ufunc(numpy.sqrt, x)\n\n def exp(self, x: ArrayLike, maybe_out: ArrayLike | None = None) -> TypeTracerArray:\n return self._apply_ufunc(numpy.exp, x)\n\n def divide(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.divide, x1, x2)\n\n ############################ almost-ufuncs\n\n def nan_to_num(\n self,\n x: ArrayLike,\n *,\n copy: bool = True,\n nan: int | float | None = 0.0,\n posinf: int | float | None = None,\n neginf: int | float | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def isclose(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n *,\n rtol: float = 1e-5,\n atol: float = 1e-8,\n equal_nan: bool = False,\n ) -> TypeTracerArray:\n try_touch_data(x1)\n try_touch_data(x2)\n out, _ = self.broadcast_arrays(x1, x2)\n return TypeTracerArray._new(np.bool_, shape=out.shape)\n\n def isnan(self, x: ArrayLike) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(np.bool_, shape=x.shape)\n\n ############################ reducers\n\n def all(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n if axis is None:\n return TypeTracerArray._new(np.bool_, shape=())\n else:\n raise ak._errors.wrap_error(NotImplementedError)\n\n def any(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n if axis is None:\n return TypeTracerArray._new(np.bool_, shape=())\n else:\n raise ak._errors.wrap_error(NotImplementedError)\n\n def count_nonzero(\n self, x: ArrayLike, *, axis: int | None = None, keepdims: bool = False\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def min(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def max(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def array_str(\n self,\n x: ArrayLike,\n *,\n max_line_width: int | None = None,\n precision: int | None = None,\n suppress_small: bool | None = None,\n ):\n try_touch_data(x)\n return \"[## ... ##]\"\n\n def astype(\n self, x: ArrayLike, dtype: numpy.dtype, *, copy: bool | None = True\n ) -> TypeTracerArray:\n x.touch_data()\n return TypeTracerArray._new(np.dtype(dtype), x.shape)\n\n def can_cast(self, from_: np.dtype | ArrayLike, to: np.dtype | ArrayLike) -> bool:\n return numpy.can_cast(from_, to, casting=\"same_kind\")\n\n @classmethod\n def is_own_array(cls, obj) -> bool:\n return isinstance(obj, TypeTracerArray)\n\n def is_c_contiguous(self, x: ArrayLike) -> bool:\n return True\n", "path": "src/awkward/_nplikes/typetracer.py" } ]
[ { "content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\nfrom __future__ import annotations\n\nfrom numbers import Number\n\nimport numpy\n\nimport awkward as ak\nfrom awkward._errors import wrap_error\nfrom awkward._nplikes.numpylike import ArrayLike, IndexType, NumpyLike, NumpyMetadata\nfrom awkward._nplikes.shape import ShapeItem, unknown_length\nfrom awkward._util import NDArrayOperatorsMixin, is_non_string_like_sequence\nfrom awkward.typing import (\n Any,\n Final,\n Literal,\n Self,\n SupportsIndex,\n TypeVar,\n)\n\nnp = NumpyMetadata.instance()\n\n\ndef is_unknown_length(array: Any) -> bool:\n return array is unknown_length\n\n\ndef is_unknown_scalar(array: Any) -> bool:\n return isinstance(array, TypeTracerArray) and array.ndim == 0\n\n\ndef is_unknown_integer(array: Any) -> bool:\n return is_unknown_scalar(array) and np.issubdtype(array.dtype, np.integer)\n\n\ndef is_unknown_array(array: Any) -> bool:\n return isinstance(array, TypeTracerArray) and array.ndim > 0\n\n\nT = TypeVar(\"T\")\nS = TypeVar(\"S\")\n\n\ndef ensure_known_scalar(value: T, default: S) -> T | S:\n assert not is_unknown_scalar(default)\n return default if is_unknown_scalar(value) else value\n\n\ndef _emptyarray(x):\n if is_unknown_scalar(x):\n return numpy.empty(0, x._dtype)\n elif hasattr(x, \"dtype\"):\n return numpy.empty(0, x.dtype)\n else:\n return numpy.empty(0, numpy.array(x).dtype)\n\n\nclass MaybeNone:\n def __init__(self, content):\n self._content = content\n\n @property\n def content(self):\n return self._content\n\n def __eq__(self, other):\n if isinstance(other, MaybeNone):\n return self._content == other._content\n else:\n return False\n\n def __repr__(self):\n return f\"MaybeNone({self._content!r})\"\n\n def __str__(self):\n return f\"?{self._content}\"\n\n\nclass OneOf:\n def __init__(self, contents):\n self._contents = contents\n\n @property\n def contents(self):\n return self._contents\n\n def __eq__(self, other):\n if isinstance(other, OneOf):\n return set(self._contents) == set(other._contents)\n else:\n return False\n\n def __repr__(self):\n return f\"OneOf({self._contents!r})\"\n\n def __str__(self):\n return (\n f\"oneof-{'-'.join(str(x).replace('unknown-', '') for x in self._contents)}\"\n )\n\n\nclass TypeTracerReport:\n def __init__(self):\n # maybe the order will be useful information\n self._shape_touched_set = set()\n self._shape_touched = []\n self._data_touched_set = set()\n self._data_touched = []\n\n def __repr__(self):\n return f\"<TypeTracerReport with {len(self._shape_touched)} shape_touched, {len(self._data_touched)} data_touched>\"\n\n @property\n def shape_touched(self):\n return self._shape_touched\n\n @property\n def data_touched(self):\n return self._data_touched\n\n def touch_shape(self, label):\n if label not in self._shape_touched_set:\n self._shape_touched_set.add(label)\n self._shape_touched.append(label)\n\n def touch_data(self, label):\n if label not in self._data_touched_set:\n # touching data implies that the shape will be touched as well\n # implemented here so that the codebase doesn't need to be filled\n # with calls to both methods everywhere\n self._shape_touched_set.add(label)\n self._shape_touched.append(label)\n self._data_touched_set.add(label)\n self._data_touched.append(label)\n\n\ndef _attach_report(layout, form, report: TypeTracerReport):\n if isinstance(layout, (ak.contents.BitMaskedArray, ak.contents.ByteMaskedArray)):\n assert isinstance(form, (ak.forms.BitMaskedForm, ak.forms.ByteMaskedForm))\n layout.mask.data.form_key = form.form_key\n layout.mask.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.EmptyArray):\n assert isinstance(form, ak.forms.EmptyForm)\n\n elif isinstance(layout, (ak.contents.IndexedArray, ak.contents.IndexedOptionArray)):\n assert isinstance(form, (ak.forms.IndexedForm, ak.forms.IndexedOptionForm))\n layout.index.data.form_key = form.form_key\n layout.index.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.ListArray):\n assert isinstance(form, ak.forms.ListForm)\n layout.starts.data.form_key = form.form_key\n layout.starts.data.report = report\n layout.stops.data.form_key = form.form_key\n layout.stops.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.ListOffsetArray):\n assert isinstance(form, ak.forms.ListOffsetForm)\n layout.offsets.data.form_key = form.form_key\n layout.offsets.data.report = report\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.NumpyArray):\n assert isinstance(form, ak.forms.NumpyForm)\n layout.data.form_key = form.form_key\n layout.data.report = report\n\n elif isinstance(layout, ak.contents.RecordArray):\n assert isinstance(form, ak.forms.RecordForm)\n for x, y in zip(layout.contents, form.contents):\n _attach_report(x, y, report)\n\n elif isinstance(layout, (ak.contents.RegularArray, ak.contents.UnmaskedArray)):\n assert isinstance(form, (ak.forms.RegularForm, ak.forms.UnmaskedForm))\n _attach_report(layout.content, form.content, report)\n\n elif isinstance(layout, ak.contents.UnionArray):\n assert isinstance(form, ak.forms.UnionForm)\n layout.tags.data.form_key = form.form_key\n layout.tags.data.report = report\n layout.index.data.form_key = form.form_key\n layout.index.data.report = report\n for x, y in zip(layout.contents, form.contents):\n _attach_report(x, y, report)\n\n else:\n raise ak._errors.wrap_error(\n AssertionError(f\"unrecognized layout type {type(layout)}\")\n )\n\n\ndef typetracer_with_report(form, forget_length=True):\n layout = form.length_zero_array(highlevel=False).to_typetracer(\n forget_length=forget_length\n )\n report = TypeTracerReport()\n _attach_report(layout, form, report)\n return layout, report\n\n\nclass TypeTracerArray(NDArrayOperatorsMixin, ArrayLike):\n _dtype: numpy.dtype\n _shape: tuple[ShapeItem, ...]\n\n def __new__(cls, *args, **kwargs):\n raise wrap_error(\n TypeError(\n \"internal_error: the `TypeTracer` nplike's `TypeTracerArray` object should never be directly instantiated\"\n )\n )\n\n def __reduce__(self):\n # Fix pickling, as we ban `__new__`\n return object.__new__, (type(self),), vars(self)\n\n @classmethod\n def _new(\n cls,\n dtype: np.dtype,\n shape: tuple[ShapeItem, ...],\n form_key: str | None = None,\n report: TypeTracerReport | None = None,\n ):\n self = super().__new__(cls)\n self.form_key = form_key\n self.report = report\n\n if not isinstance(shape, tuple):\n raise wrap_error(TypeError(\"typetracer shape must be a tuple\"))\n self._shape = shape\n self._dtype = np.dtype(dtype)\n\n return self\n\n def __repr__(self):\n dtype = repr(self._dtype)\n if self.shape is None:\n shape = \"\"\n else:\n shape = \", shape=\" + repr(self._shape)\n return f\"TypeTracerArray({dtype}{shape})\"\n\n def __str__(self):\n if self.ndim == 0:\n return \"##\"\n\n else:\n return repr(self)\n\n @property\n def T(self) -> Self:\n return TypeTracerArray._new(\n self.dtype, self._shape[::-1], self.form_key, self.report\n )\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def size(self) -> ShapeItem:\n size = 1\n for item in self._shape:\n if ak._util.is_integer(item):\n size *= item\n else:\n return unknown_length\n return size\n\n @property\n def shape(self) -> tuple[ShapeItem, ...]:\n self.touch_shape()\n return self._shape\n\n @property\n def form_key(self):\n return self._form_key\n\n @form_key.setter\n def form_key(self, value):\n if value is not None and not isinstance(value, str):\n raise ak._errors.wrap_error(TypeError(\"form_key must be None or a string\"))\n self._form_key = value\n\n @property\n def report(self):\n return self._report\n\n @report.setter\n def report(self, value):\n if value is not None and not isinstance(value, TypeTracerReport):\n raise ak._errors.wrap_error(\n TypeError(\"report must be None or a TypeTracerReport\")\n )\n self._report = value\n\n def touch_shape(self):\n if self._report is not None:\n self._report.touch_shape(self._form_key)\n\n def touch_data(self):\n if self._report is not None:\n self._report.touch_data(self._form_key)\n\n @property\n def strides(self):\n self.touch_shape()\n out = (self._dtype.itemsize,)\n for x in self._shape[:0:-1]:\n out = (x * out[0], *out)\n return out\n\n @property\n def nplike(self) -> TypeTracer:\n return TypeTracer.instance()\n\n @property\n def ndim(self) -> int:\n self.touch_shape()\n return len(self._shape)\n\n def view(self, dtype: np.dtype) -> Self:\n if self.itemsize != np.dtype(dtype).itemsize and self._shape[-1] is not None:\n last = int(\n round(self._shape[-1] * self.itemsize / np.dtype(dtype).itemsize)\n )\n shape = self._shape[:-1] + (last,)\n else:\n shape = self._shape\n dtype = np.dtype(dtype)\n return self._new(\n dtype, shape=shape, form_key=self._form_key, report=self._report\n )\n\n def forget_length(self) -> Self:\n return self._new(\n self._dtype,\n (unknown_length,) + self._shape[1:],\n self._form_key,\n self._report,\n )\n\n def __iter__(self):\n raise ak._errors.wrap_error(\n AssertionError(\n \"bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array\"\n )\n )\n\n def __array__(self, dtype=None):\n raise ak._errors.wrap_error(\n AssertionError(\n \"bug in Awkward Array: attempt to convert TypeTracerArray into a concrete array\"\n )\n )\n\n @property\n def itemsize(self):\n return self._dtype.itemsize\n\n class _CTypes:\n data = 0\n\n @property\n def ctypes(self):\n return self._CTypes\n\n def __len__(self):\n raise ak._errors.wrap_error(\n AssertionError(\n \"bug in Awkward Array: attempt to get length of a TypeTracerArray\"\n )\n )\n\n def __getitem__(\n self,\n key: SupportsIndex\n | slice\n | Ellipsis\n | tuple[SupportsIndex | slice | Ellipsis | ArrayLike, ...]\n | ArrayLike,\n ) -> Self:\n if not isinstance(key, tuple):\n key = (key,)\n\n # 1. Validate slice items\n has_seen_ellipsis = 0\n n_basic_non_ellipsis = 0\n n_advanced = 0\n for item in key:\n # Basic indexing\n if isinstance(item, (slice, int)) or is_unknown_integer(item):\n n_basic_non_ellipsis += 1\n # Advanced indexing\n elif isinstance(item, TypeTracerArray) and (\n np.issubdtype(item.dtype, np.integer)\n or np.issubdtype(item.dtype, np.bool_)\n ):\n n_advanced += 1\n # Basic ellipsis\n elif item is Ellipsis:\n if not has_seen_ellipsis:\n has_seen_ellipsis = True\n else:\n raise wrap_error(\n NotImplementedError(\n \"only one ellipsis value permitted for advanced index\"\n )\n )\n # Basic newaxis\n elif item is np.newaxis:\n pass\n else:\n raise wrap_error(\n NotImplementedError(\n \"only integer, unknown scalar, slice, ellipsis, or array indices are permitted\"\n )\n )\n\n # 2. Normalise Ellipsis and boolean arrays\n key_parts = []\n for item in key:\n if item is Ellipsis:\n n_missing_dims = self.ndim - n_advanced - n_basic_non_ellipsis\n key_parts.extend((slice(None),) * n_missing_dims)\n elif is_unknown_array(item) and np.issubdtype(item, np.bool_):\n key_parts.append(self.nplike.nonzero(item)[0])\n else:\n key_parts.append(item)\n key = tuple(key_parts)\n\n # 3. Apply Indexing\n advanced_is_at_front = False\n previous_item_is_basic = True\n advanced_shapes = []\n adjacent_advanced_shape = []\n result_shape_parts = []\n iter_shape = iter(self.shape)\n for item in key:\n # New axes don't reference existing dimensions\n if item is np.newaxis:\n result_shape_parts.append((1,))\n previous_item_is_basic = True\n # Otherwise, consume the dimension\n else:\n dimension_length = next(iter_shape)\n # Advanced index\n if n_advanced and (\n isinstance(item, int)\n or is_unknown_integer(item)\n or is_unknown_array(item)\n ):\n if is_unknown_scalar(item):\n item = self.nplike.promote_scalar(item)\n\n # If this is the first advanced index, insert the location\n if not advanced_shapes:\n result_shape_parts.append(adjacent_advanced_shape)\n # If a previous item was basic and we have an advanced shape\n # we have a split index\n elif previous_item_is_basic:\n advanced_is_at_front = True\n\n advanced_shapes.append(item.shape)\n previous_item_is_basic = False\n # Slice\n elif isinstance(item, slice):\n (\n start,\n stop,\n step,\n slice_length,\n ) = self.nplike.derive_slice_for_length(item, dimension_length)\n result_shape_parts.append((slice_length,))\n previous_item_is_basic = True\n # Integer\n elif isinstance(item, int) or is_unknown_integer(item):\n item = self.nplike.promote_scalar(item)\n\n if is_unknown_length(dimension_length) or is_unknown_integer(item):\n continue\n\n if not 0 <= item < dimension_length:\n raise wrap_error(\n NotImplementedError(\"integer index out of bounds\")\n )\n\n advanced_shape = self.nplike.broadcast_shapes(*advanced_shapes)\n if advanced_is_at_front:\n result_shape_parts.insert(0, advanced_shape)\n else:\n adjacent_advanced_shape[:] = advanced_shape\n\n broadcast_shape = tuple(i for p in result_shape_parts for i in p)\n result_shape = broadcast_shape + tuple(iter_shape)\n\n return self._new(\n self._dtype,\n result_shape,\n self._form_key,\n self._report,\n )\n\n def __setitem__(\n self,\n key: SupportsIndex\n | slice\n | Ellipsis\n | tuple[SupportsIndex | slice | Ellipsis | ArrayLike, ...]\n | ArrayLike,\n value: int | float | bool | complex | ArrayLike,\n ):\n existing_value = self.__getitem__(key)\n if isinstance(value, TypeTracerArray) and value.ndim > existing_value.ndim:\n raise wrap_error(ValueError(\"cannot assign shape larger than destination\"))\n\n def copy(self):\n self.touch_data()\n return self\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n # raise ak._errors.wrap_error(\n # RuntimeError(\n # \"TypeTracerArray objects should not be used directly with ufuncs\"\n # )\n # )\n kwargs.pop(\"out\", None)\n\n if method != \"__call__\" or len(inputs) == 0:\n raise ak._errors.wrap_error(NotImplementedError)\n\n if len(kwargs) > 0:\n raise ak._errors.wrap_error(\n ValueError(\"TypeTracerArray does not support kwargs for ufuncs\")\n )\n return self.nplike._apply_ufunc(ufunc, *inputs)\n\n def __bool__(self) -> bool:\n raise ak._errors.wrap_error(RuntimeError(\"cannot realise an unknown value\"))\n\n def __int__(self) -> int:\n raise ak._errors.wrap_error(RuntimeError(\"cannot realise an unknown value\"))\n\n def __index__(self) -> int:\n raise ak._errors.wrap_error(RuntimeError(\"cannot realise an unknown value\"))\n\n\ndef _scalar_type_of(obj) -> numpy.dtype:\n if is_unknown_scalar(obj):\n return obj.dtype\n else:\n return numpy.obj2sctype(obj)\n\n\ndef try_touch_data(array):\n if isinstance(array, TypeTracerArray):\n array.touch_data()\n\n\ndef try_touch_shape(array):\n if isinstance(array, TypeTracerArray):\n array.touch_shape()\n\n\nclass TypeTracer(NumpyLike):\n known_data: Final = False\n is_eager: Final = True\n\n def _apply_ufunc(self, ufunc, *inputs):\n for x in inputs:\n try_touch_data(x)\n\n broadcasted = self.broadcast_arrays(*inputs)\n placeholders = [numpy.empty(0, x.dtype) for x in broadcasted]\n\n result = ufunc(*placeholders)\n if isinstance(result, numpy.ndarray):\n return TypeTracerArray._new(result.dtype, shape=broadcasted[0].shape)\n elif isinstance(result, tuple):\n return (\n TypeTracerArray._new(x.dtype, shape=b.shape)\n for x, b in zip(result, broadcasted)\n )\n else:\n raise wrap_error(TypeError)\n\n def to_rectilinear(self, array, *args, **kwargs):\n try_touch_shape(array)\n raise ak._errors.wrap_error(NotImplementedError)\n\n @property\n def ma(self):\n raise ak._errors.wrap_error(NotImplementedError)\n\n @property\n def char(self):\n raise ak._errors.wrap_error(NotImplementedError)\n\n @property\n def ndarray(self):\n return TypeTracerArray\n\n ############################ array creation\n\n def asarray(\n self,\n obj,\n *,\n dtype: numpy.dtype | None = None,\n copy: bool | None = None,\n ) -> TypeTracerArray:\n try_touch_data(obj)\n\n if isinstance(obj, ak.index.Index):\n obj = obj.data\n\n if isinstance(obj, TypeTracerArray):\n form_key = obj._form_key\n report = obj._report\n\n if dtype is None:\n return obj\n elif copy is False and dtype != obj.dtype:\n raise ak._errors.wrap_error(\n ValueError(\n \"asarray was called with copy=False for an array of a different dtype\"\n )\n )\n else:\n return TypeTracerArray._new(\n dtype, obj.shape, form_key=form_key, report=report\n )\n else:\n # Convert NumPy generics to scalars\n if isinstance(obj, np.generic):\n obj = numpy.asarray(obj)\n\n # Support array-like objects\n if hasattr(obj, \"shape\") and hasattr(obj, \"dtype\"):\n if obj.dtype.kind == \"S\":\n raise ak._errors.wrap_error(\n TypeError(\"TypeTracerArray cannot be created from strings\")\n )\n elif copy is False and dtype != obj.dtype:\n raise ak._errors.wrap_error(\n ValueError(\n \"asarray was called with copy=False for an array of a different dtype\"\n )\n )\n else:\n return TypeTracerArray._new(obj.dtype, obj.shape)\n # Python objects\n elif isinstance(obj, (Number, bool)):\n as_array = numpy.asarray(obj)\n return TypeTracerArray._new(as_array.dtype, ())\n\n elif is_non_string_like_sequence(obj):\n assert not any(is_non_string_like_sequence(x) for x in obj)\n shape = (len(obj),)\n result_type = numpy.result_type(*obj) # TODO: result_type\n return TypeTracerArray._new(result_type, shape)\n else:\n raise wrap_error(TypeError)\n\n def ascontiguousarray(\n self, x: ArrayLike, *, dtype: numpy.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(dtype or x.dtype, shape=x.shape)\n\n def frombuffer(\n self, buffer, *, dtype: np.dtype | None = None, count: int = -1\n ) -> TypeTracerArray:\n for x in (buffer, count):\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def zeros(\n self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n return TypeTracerArray._new(dtype, shape)\n\n def ones(\n self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n return TypeTracerArray._new(dtype, shape)\n\n def empty(\n self, shape: ShapeItem | tuple[ShapeItem, ...], *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n return TypeTracerArray._new(dtype, shape)\n\n def full(\n self,\n shape: ShapeItem | tuple[ShapeItem, ...],\n fill_value,\n *,\n dtype: np.dtype | None = None,\n ) -> TypeTracerArray:\n if not isinstance(shape, tuple):\n shape = (shape,)\n dtype = _scalar_type_of(fill_value) if dtype is None else dtype\n return TypeTracerArray._new(dtype, shape)\n\n def zeros_like(\n self, x: ArrayLike, *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_shape(x)\n if is_unknown_scalar(x):\n return TypeTracerArray._new(dtype or x.dtype, shape=())\n else:\n return TypeTracerArray._new(dtype or x.dtype, shape=x.shape)\n\n def ones_like(\n self, x: ArrayLike, *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_shape(x)\n return self.zeros_like(x, dtype=dtype)\n\n def full_like(\n self, x: ArrayLike, fill_value, *, dtype: np.dtype | None = None\n ) -> TypeTracerArray:\n try_touch_shape(x)\n return self.zeros_like(x, dtype=dtype)\n\n def arange(\n self,\n start: float | int,\n stop: float | int | None = None,\n step: float | int = 1,\n *,\n dtype: np.dtype | None = None,\n ) -> TypeTracerArray:\n try_touch_data(start)\n try_touch_data(stop)\n try_touch_data(step)\n if stop is None:\n start, stop = 0, start\n\n if (\n ak._util.is_integer(start)\n and ak._util.is_integer(stop)\n and ak._util.is_integer(step)\n ):\n length = max(0, (stop - start + (step - (1 if step > 0 else -1))) // step)\n else:\n length = unknown_length\n\n default_int_type = np.int64 if (ak._util.win or ak._util.bits32) else np.int32\n return TypeTracerArray._new(dtype or default_int_type, (length,))\n\n def meshgrid(\n self, *arrays: ArrayLike, indexing: Literal[\"xy\", \"ij\"] = \"xy\"\n ) -> list[TypeTracerArray]:\n for x in arrays:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n ############################ testing\n\n def array_equal(\n self, x1: ArrayLike, x2: ArrayLike, *, equal_nan: bool = False\n ) -> TypeTracerArray:\n try_touch_data(x1)\n try_touch_data(x2)\n return TypeTracerArray._new(np.bool_, shape=())\n\n def searchsorted(\n self,\n x: ArrayLike,\n values: ArrayLike,\n *,\n side: Literal[\"left\", \"right\"] = \"left\",\n sorter: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n try_touch_data(values)\n try_touch_data(sorter)\n if (\n not (\n is_unknown_length(x.size)\n or sorter is None\n or is_unknown_length(sorter.size)\n )\n and x.size != sorter.size\n ):\n raise wrap_error(ValueError(\"x.size should equal sorter.size\"))\n\n return TypeTracerArray._new(x.dtype, (values.size,))\n\n ############################ manipulation\n\n def promote_scalar(self, obj) -> TypeTracerArray:\n if is_unknown_scalar(obj):\n return obj\n elif isinstance(obj, (Number, bool)):\n # TODO: statically define these types for all nplikes\n as_array = numpy.asarray(obj)\n return TypeTracerArray._new(as_array.dtype, ())\n else:\n raise wrap_error(TypeError(f\"expected scalar type, received {obj}\"))\n\n def shape_item_as_index(self, x1: ShapeItem) -> IndexType:\n if x1 is unknown_length:\n return TypeTracerArray._new(np.int64, shape=())\n elif isinstance(x1, int):\n return x1\n else:\n raise wrap_error(TypeError(f\"expected None or int type, received {x1}\"))\n\n def index_as_shape_item(self, x1: IndexType) -> ShapeItem:\n if is_unknown_scalar(x1) and np.issubdtype(x1.dtype, np.integer):\n return unknown_length\n else:\n return int(x1)\n\n def regularize_index_for_length(\n self, index: IndexType, length: ShapeItem\n ) -> IndexType:\n \"\"\"\n Args:\n index: index value\n length: length of array\n\n Returns regularized index that is guaranteed to be in-bounds.\n \"\"\"\n # Unknown indices are already regularized\n if is_unknown_scalar(index):\n return index\n\n # Without a known length the result must be unknown, as we cannot regularize the index\n length_scalar = self.shape_item_as_index(length)\n if length is unknown_length:\n return length_scalar\n\n # We have known length and index\n if index < 0:\n index = index + length\n\n if 0 <= index < length:\n return index\n else:\n raise wrap_error(\n IndexError(f\"index value out of bounds (0, {length}): {index}\")\n )\n\n def derive_slice_for_length(\n self, slice_: slice, length: ShapeItem\n ) -> tuple[IndexType, IndexType, IndexType, ShapeItem]:\n \"\"\"\n Args:\n slice_: normalized slice object\n length: length of layout\n\n Return a tuple of (start, stop, step, length) indices into a layout, suitable for\n `_getitem_range` (if step == 1). Normalize lengths to fit length of array,\n and for arrays with unknown lengths, these offsets become none.\n \"\"\"\n start = slice_.start\n stop = slice_.stop\n step = slice_.step\n\n # Unknown lengths mean that the slice index is unknown\n length_scalar = self.shape_item_as_index(length)\n if length is unknown_length:\n return length_scalar, length_scalar, step, length\n else:\n # Normalise `None` values\n if step is None:\n step = 1\n\n if start is None:\n # `step` is unknown → `start` is unknown\n if is_unknown_scalar(step):\n start = step\n elif step < 0:\n start = length_scalar - 1\n else:\n start = 0\n # Normalise negative integers\n elif not is_unknown_scalar(start):\n if start < 0:\n start = start + length_scalar\n # Clamp values into length bounds\n if is_unknown_scalar(length_scalar):\n start = length_scalar\n else:\n start = min(max(start, 0), length_scalar)\n\n if stop is None:\n # `step` is unknown → `stop` is unknown\n if is_unknown_scalar(step):\n stop = step\n elif step < 0:\n stop = -1\n else:\n stop = length_scalar\n # Normalise negative integers\n elif not is_unknown_scalar(stop):\n if stop < 0:\n stop = stop + length_scalar\n # Clamp values into length bounds\n if is_unknown_scalar(length_scalar):\n stop = length_scalar\n else:\n stop = min(max(stop, 0), length_scalar)\n\n # Compute the length of the slice for downstream use\n slice_length, remainder = divmod((stop - start), step)\n if not is_unknown_scalar(slice_length):\n # Take ceiling of division\n if remainder != 0:\n slice_length += 1\n\n slice_length = max(0, slice_length)\n\n return start, stop, step, self.index_as_shape_item(slice_length)\n\n def broadcast_shapes(self, *shapes: tuple[ShapeItem, ...]) -> tuple[ShapeItem, ...]:\n ndim = max([len(s) for s in shapes], default=0)\n result: list[ShapeItem] = [1] * ndim\n\n for shape in shapes:\n # Right broadcasting\n missing_dim = ndim - len(shape)\n if missing_dim > 0:\n head: tuple[int, ...] = (1,) * missing_dim\n shape = head + shape\n\n # Fail if we absolutely know the shapes aren't compatible\n for i, item in enumerate(shape):\n # Item is unknown, take it\n if is_unknown_length(item):\n result[i] = item\n # Existing item is unknown, keep it\n elif is_unknown_length(result[i]):\n continue\n # Items match, continue\n elif result[i] == item:\n continue\n # Item is broadcastable, take existing\n elif item == 1:\n continue\n # Existing is broadcastable, take it\n elif result[i] == 1:\n result[i] = item\n else:\n raise wrap_error(\n ValueError(\n \"known component of shape does not match broadcast result\"\n )\n )\n return tuple(result)\n\n def broadcast_arrays(self, *arrays: ArrayLike) -> list[TypeTracerArray]:\n for x in arrays:\n try_touch_data(x)\n\n if len(arrays) == 0:\n return []\n\n all_arrays = []\n for x in arrays:\n if not hasattr(x, \"shape\"):\n x = self.promote_scalar(x)\n all_arrays.append(x)\n\n shapes = [x.shape for x in all_arrays]\n shape = self.broadcast_shapes(*shapes)\n\n return [TypeTracerArray._new(x.dtype, shape=shape) for x in all_arrays]\n\n def broadcast_to(\n self, x: ArrayLike, shape: tuple[ShapeItem, ...]\n ) -> TypeTracerArray:\n raise ak._errors.wrap_error(NotImplementedError)\n\n def reshape(\n self, x: ArrayLike, shape: tuple[ShapeItem, ...], *, copy: bool | None = None\n ) -> TypeTracerArray:\n x.touch_shape()\n\n size = x.size\n\n # Validate new shape to ensure that it only contains at-most one placeholder\n n_placeholders = 0\n new_size = 1\n for item in shape:\n if item is unknown_length:\n # Size is no longer defined\n new_size = unknown_length\n elif not ak._util.is_integer(item):\n raise wrap_error(\n ValueError(\n \"shape must be comprised of positive integers, -1 (for placeholders), or unknown lengths\"\n )\n )\n elif item == -1:\n if n_placeholders == 1:\n raise wrap_error(\n ValueError(\"only one placeholder dimension permitted per shape\")\n )\n n_placeholders += 1\n elif item == 0:\n raise wrap_error(ValueError(\"shape items cannot be zero\"))\n else:\n new_size *= item\n\n # Populate placeholders\n new_shape = [*shape]\n for i, item in enumerate(shape):\n if item == -1:\n new_shape[i] = size // new_size\n break\n\n return TypeTracerArray._new(x.dtype, tuple(new_shape), x.form_key, x.report)\n\n def cumsum(\n self,\n x: ArrayLike,\n *,\n axis: int | None = None,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def nonzero(self, x: ArrayLike) -> tuple[TypeTracerArray, ...]:\n # array\n try_touch_data(x)\n return (TypeTracerArray._new(np.int64, (unknown_length,)),) * len(x.shape)\n\n def where(\n self, condition: ArrayLike, x1: ArrayLike, x2: ArrayLike\n ) -> TypeTracerArray:\n condition, x1, x2 = self.broadcast_arrays(condition, x1, x2)\n result_dtype = numpy.result_type(x1, x2)\n return TypeTracerArray._new(result_dtype, shape=condition.shape)\n\n def unique_values(self, x: ArrayLike) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(x.dtype, shape=(unknown_length,))\n\n def concat(self, arrays, *, axis: int | None = 0) -> TypeTracerArray:\n if axis is None:\n assert all(x.ndim == 1 for x in arrays)\n elif axis != 0:\n raise ak._errors.wrap_error(NotImplementedError(\"concat with axis != 0\"))\n for x in arrays:\n try_touch_data(x)\n\n inner_shape = None\n emptyarrays = []\n for x in arrays:\n if inner_shape is None:\n inner_shape = x.shape[1:]\n elif inner_shape != x.shape[1:]:\n raise ak._errors.wrap_error(\n ValueError(\n \"inner dimensions don't match in concatenate: {} vs {}\".format(\n inner_shape, x.shape[1:]\n )\n )\n )\n emptyarrays.append(_emptyarray(x))\n\n if inner_shape is None:\n raise ak._errors.wrap_error(\n ValueError(\"need at least one array to concatenate\")\n )\n\n return TypeTracerArray._new(\n numpy.concatenate(emptyarrays).dtype, (unknown_length, *inner_shape)\n )\n\n def repeat(\n self,\n x: ArrayLike,\n repeats: ArrayLike | int,\n *,\n axis: int | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n try_touch_data(repeats)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def tile(self, x: ArrayLike, reps: int) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def stack(\n self,\n arrays: list[ArrayLike] | tuple[ArrayLike, ...],\n *,\n axis: int = 0,\n ) -> TypeTracerArray:\n for x in arrays:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def packbits(\n self,\n x: ArrayLike,\n *,\n axis: int | None = None,\n bitorder: Literal[\"big\", \"little\"] = \"big\",\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def unpackbits(\n self,\n x: ArrayLike,\n *,\n axis: int | None = None,\n count: int | None = None,\n bitorder: Literal[\"big\", \"little\"] = \"big\",\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n ############################ ufuncs\n\n def add(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.add, x1, x2)\n\n def logical_and(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.logical_and, x1, x2)\n\n def logical_or(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.logical_or, x1, x2)\n\n def logical_not(\n self, x: ArrayLike, maybe_out: ArrayLike | None = None\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.logical_not, x)\n\n def sqrt(self, x: ArrayLike, maybe_out: ArrayLike | None = None) -> TypeTracerArray:\n return self._apply_ufunc(numpy.sqrt, x)\n\n def exp(self, x: ArrayLike, maybe_out: ArrayLike | None = None) -> TypeTracerArray:\n return self._apply_ufunc(numpy.exp, x)\n\n def divide(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n return self._apply_ufunc(numpy.divide, x1, x2)\n\n ############################ almost-ufuncs\n\n def nan_to_num(\n self,\n x: ArrayLike,\n *,\n copy: bool = True,\n nan: int | float | None = 0.0,\n posinf: int | float | None = None,\n neginf: int | float | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(x.dtype, shape=x.shape)\n\n def isclose(\n self,\n x1: ArrayLike,\n x2: ArrayLike,\n *,\n rtol: float = 1e-5,\n atol: float = 1e-8,\n equal_nan: bool = False,\n ) -> TypeTracerArray:\n try_touch_data(x1)\n try_touch_data(x2)\n out, _ = self.broadcast_arrays(x1, x2)\n return TypeTracerArray._new(np.bool_, shape=out.shape)\n\n def isnan(self, x: ArrayLike) -> TypeTracerArray:\n try_touch_data(x)\n return TypeTracerArray._new(np.bool_, shape=x.shape)\n\n ############################ reducers\n\n def all(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n if axis is None:\n return TypeTracerArray._new(np.bool_, shape=())\n else:\n raise ak._errors.wrap_error(NotImplementedError)\n\n def any(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n if axis is None:\n return TypeTracerArray._new(np.bool_, shape=())\n else:\n raise ak._errors.wrap_error(NotImplementedError)\n\n def count_nonzero(\n self, x: ArrayLike, *, axis: int | None = None, keepdims: bool = False\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def min(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def max(\n self,\n x: ArrayLike,\n *,\n axis: int | tuple[int, ...] | None = None,\n keepdims: bool = False,\n maybe_out: ArrayLike | None = None,\n ) -> TypeTracerArray:\n try_touch_data(x)\n raise ak._errors.wrap_error(NotImplementedError)\n\n def array_str(\n self,\n x: ArrayLike,\n *,\n max_line_width: int | None = None,\n precision: int | None = None,\n suppress_small: bool | None = None,\n ):\n try_touch_data(x)\n return \"[## ... ##]\"\n\n def astype(\n self, x: ArrayLike, dtype: numpy.dtype, *, copy: bool | None = True\n ) -> TypeTracerArray:\n x.touch_data()\n return TypeTracerArray._new(np.dtype(dtype), x.shape)\n\n def can_cast(self, from_: np.dtype | ArrayLike, to: np.dtype | ArrayLike) -> bool:\n return numpy.can_cast(from_, to, casting=\"same_kind\")\n\n @classmethod\n def is_own_array(cls, obj) -> bool:\n return isinstance(obj, TypeTracerArray)\n\n def is_c_contiguous(self, x: ArrayLike) -> bool:\n return True\n", "path": "src/awkward/_nplikes/typetracer.py" } ]
diff --git a/src/awkward/_nplikes/typetracer.py b/src/awkward/_nplikes/typetracer.py index 62f77e3dbf..b00ed99a28 100644 --- a/src/awkward/_nplikes/typetracer.py +++ b/src/awkward/_nplikes/typetracer.py @@ -1187,7 +1187,7 @@ def nan_to_num( neginf: int | float | None = None, ) -> TypeTracerArray: try_touch_data(x) - raise ak._errors.wrap_error(NotImplementedError) + return TypeTracerArray._new(x.dtype, shape=x.shape) def isclose( self, diff --git a/tests/test_2266_fix_nan_to_num.py b/tests/test_2266_fix_nan_to_num.py new file mode 100644 index 0000000000..7f91497aa5 --- /dev/null +++ b/tests/test_2266_fix_nan_to_num.py @@ -0,0 +1,26 @@ +# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE + +import pytest + +import awkward as ak + +dak = pytest.importorskip("dask_awkward") +vector = pytest.importorskip("vector") +vector.register_awkward() + + +def test(): + a = ak.Array([1.0]) + da = dak.from_awkward(a, 1) + dv1 = dak.with_name(dak.zip({"x": da, "y": da, "z": da}), "Vector3D") + + result1 = (dv1 + dv1).compute() + assert result1.tolist() == [{"x": 2, "y": 2, "z": 2}] + assert str(result1.type).startswith("1 * Vector3D[") + assert type(result1).__name__ == "VectorArray3D" + + dv2 = dak.with_name(dak.zip({"rho": da, "phi": da, "theta": da}), "Vector3D") + result2 = (dv2 + dv2).compute() + assert result2.tolist() == [{"rho": 2, "phi": 1, "theta": 1}] + assert str(result2.type).startswith("1 * Vector3D[") + assert type(result2).__name__ == "VectorArray3D"
Gallopsled__pwntools-470
History search crashes if item isn't found and enter is pressed The history search in term.readline crashes when an item is searched for, but not found and then enter is pressed. ``` Traceback (most recent call last): File "./t.py", line 58, in <module> code.interact(local=locals()) File "/usr/lib/python2.7/code.py", line 306, in interact console.interact(banner) File "/usr/lib/python2.7/code.py", line 234, in interact line = self.raw_input(prompt) File "./t.py", line 48, in _raw_input_wrapper ret = raw_input(prompt) File "./pwntools/pwnlib/term/readline.py", line 426, in raw_input return readline(None, prompt, float) File "./pwntools/pwnlib/term/readline.py", line 374, in readline keymap.handle_input() File "./pwntools/pwnlib/term/keymap.py", line 19, in handle_input self.send(key.get()) File "./pwntools/pwnlib/term/keymap.py", line 47, in send cb(self.trace) File "./pwntools/pwnlib/term/readline.py", line 228, in submit commit_search() File "./pwntools/pwnlib/term/readline.py", line 140, in commit_search set_buffer(history[search_results[search_idx][0]], u'') IndexError: list index out of range ``` It appears to be related to search_results being empty, but search_idx is set to 0.
[ { "content": "from . import term, text\nfrom . import keymap as km\nfrom . import keyconsts as kc\ncursor = text.reverse\n\nbuffer_left, buffer_right = u'', u''\nsaved_buffer = None\nhistory = []\nhistory_idx = None\nprompt_handle = None\nbuffer_handle = None\nsuggest_handle = None\nsearch_idx = None\nsearch_results = []\nstartup_hook = None\nshutdown_hook = None\n\ndelims = ' /;:.\\\\'\n\nshow_completion = True\nshow_suggestions = False\n\ncomplete_hook = None\nsuggest_hook = None\n\ntabs = 0\n\ndef set_completer(completer):\n global complete_hook, suggest_hook\n if completer is None:\n complete_hook = None\n suggest_hook = None\n else:\n complete_hook = completer.complete\n suggest_hook = completer.suggest\n\ndef fmt_suggestions(suggestions):\n if suggestions:\n s = ''\n l = max(map(len, suggestions))\n columns = term.width // (l + 1)\n column_width = term.width // columns\n fmt = '%%-%ds' % column_width\n for j in range(0, len(suggestions), columns):\n for k in range(columns):\n l = j + k\n if l < len(suggestions):\n s += fmt % suggestions[l]\n s += '\\n'\n else:\n s = '\\n'\n return s\n\ndef auto_complete(*_):\n global show_suggestions, tabs\n if search_idx is not None:\n commit_search()\n tabs = 0\n elif tabs == 1:\n if complete_hook:\n ret = complete_hook(buffer_left, buffer_right)\n if ret:\n tabs = 0\n insert_text(ret)\n else:\n show_suggestions = not show_suggestions\n redisplay()\n\ndef handle_keypress(trace):\n global tabs\n k = trace[-1]\n if k == '<tab>':\n tabs += 1\n else:\n tabs = 0\n\ndef clear():\n global buffer_left, buffer_right, history_idx, search_idx\n buffer_left, buffer_right = u'', u''\n history_idx = None\n search_idx = None\n redisplay()\n\ndef redisplay():\n global suggest_handle\n if buffer_handle:\n if show_suggestions and suggest_hook:\n suggestions = suggest_hook(buffer_left, buffer_right)\n if suggest_handle is None:\n h = prompt_handle or buffer_handle\n suggest_handle = term.output(before = h)\n s = fmt_suggestions(suggestions)\n suggest_handle.update(s)\n elif suggest_handle:\n suggest_handle.update('')\n if search_idx is None:\n s = None\n if buffer_right:\n s = buffer_left + cursor(buffer_right[0]) + buffer_right[1:]\n elif show_completion and complete_hook:\n ret = complete_hook(buffer_left, buffer_right)\n if ret:\n s = buffer_left + \\\n text.underline(cursor(ret[0])) + \\\n text.underline(ret[1:])\n s = s or buffer_left + cursor(' ')\n buffer_handle.update(s)\n else:\n if search_results != []:\n idx, i, j = search_results[search_idx]\n buf = history[idx]\n a, b, c = buf[:i], buf[i:j], buf[j:]\n s = a + text.bold_green(b) + c\n else:\n s = text.white_on_red(buffer_left)\n buffer_handle.update('(search) ' + s)\n\ndef self_insert(trace):\n if len(trace) != 1:\n return\n k = trace[0]\n if k.type == kc.TYPE_UNICODE and k.mods == kc.MOD_NONE:\n insert_text(k.code)\n\ndef set_buffer(left, right):\n global buffer_left, buffer_right\n buffer_left = unicode(left)\n buffer_right = unicode(right)\n redisplay()\n\ndef cancel_search(*_):\n global search_idx\n if search_idx is not None:\n search_idx = None\n redisplay()\n\ndef commit_search():\n global search_idx\n if search_idx is not None:\n set_buffer(history[search_results[search_idx][0]], u'')\n search_idx = None\n redisplay()\n\ndef update_search_results():\n global search_results, search_idx, show_suggestions\n if search_idx is None:\n return\n show_suggestions = False\n if search_results:\n hidx = search_results[search_idx][0]\n else:\n hidx = None\n search_results = []\n search_idx = 0\n if not buffer_left:\n return\n for idx, h in enumerate(history):\n for i in range(0, len(h) - len(buffer_left) + 1):\n if h[i:i + len(buffer_left)] == buffer_left:\n if hidx is not None and idx == hidx:\n search_idx = len(search_results)\n search_results.append((idx, i, i + len(buffer_left)))\n break\n\ndef search_history(*_):\n global buffer_left, buffer_right, history_idx, search_idx\n if search_idx is None:\n buffer_left, buffer_right = buffer_left + buffer_right, u''\n history_idx = None\n search_idx = 0\n update_search_results()\n elif search_results:\n search_idx = (search_idx + 1) % len(search_results)\n redisplay()\n\ndef history_prev(*_):\n global history_idx, saved_buffer\n if history == []:\n return\n cancel_search()\n if history_idx is None:\n saved_buffer = (buffer_left, buffer_right)\n history_idx = -1\n if history_idx < len(history) - 1:\n history_idx += 1\n set_buffer(history[history_idx], u'')\n\ndef history_next(*_):\n global history_idx, saved_buffer\n if history_idx is None:\n return\n cancel_search()\n if history_idx == 0:\n set_buffer(*saved_buffer)\n history_idx = None\n saved_buffer = None\n else:\n history_idx -= 1\n set_buffer(history[history_idx], u'')\n\ndef backward_char(*_):\n global buffer_left, buffer_right\n commit_search()\n if buffer_left:\n buffer_right = buffer_left[-1] + buffer_right\n buffer_left = buffer_left[:-1]\n redisplay()\n\ndef forward_char(*_):\n global buffer_left, buffer_right\n commit_search()\n if buffer_right:\n buffer_left += buffer_right[0]\n buffer_right = buffer_right[1:]\n redisplay()\n\ndef insert_text(s):\n global history_idx, saved_buffer, buffer_left\n if history_idx is not None:\n history_idx = None\n saved_buffer = None\n buffer_left += s\n update_search_results()\n redisplay()\n\ndef submit(*_):\n if search_idx is not None:\n commit_search()\n else:\n keymap.stop()\n\ndef control_c(*_):\n global history_idx, saved_buffer\n if search_idx is not None:\n cancel_search()\n elif history_idx is not None:\n set_buffer(*saved_buffer)\n history_idx = None\n saved_buffer = None\n elif buffer_left or buffer_right:\n clear()\n else:\n raise KeyboardInterrupt\n\ndef control_d(*_):\n if buffer_left or buffer_right:\n return\n global eof\n eof = True\n keymap.stop()\n\ndef kill_to_end(*_):\n global buffer_right\n commit_search()\n buffer_right = []\n redisplay()\n\ndef delete_char_forward(*_):\n global buffer_right\n commit_search()\n if buffer_right:\n buffer_right = buffer_right[1:]\n redisplay()\n\ndef delete_char_backward(*_):\n global buffer_left\n if buffer_left:\n buffer_left = buffer_left[:-1]\n update_search_results()\n redisplay()\n\ndef kill_word_backward(*_):\n global buffer_left\n commit_search()\n flag = False\n while buffer_left:\n c = buffer_left[-1]\n if c[0] in delims:\n if flag:\n break\n else:\n flag = True\n buffer_left = buffer_left[:-1]\n redisplay()\n\ndef backward_word(*_):\n global buffer_left, buffer_right\n commit_search()\n flag = False\n while buffer_left:\n c = buffer_left[-1]\n if c[0] in delims:\n if flag:\n break\n else:\n flag = True\n buffer_right = buffer_left[-1] + buffer_right\n buffer_left = buffer_left[:-1]\n redisplay()\n\ndef forward_word(*_):\n global buffer_left, buffer_right\n commit_search()\n flag = False\n while buffer_right:\n c = buffer_right[0]\n if c[0] in delims:\n if flag:\n break\n else:\n flag = True\n buffer_left += buffer_right[0]\n buffer_right = buffer_right[1:]\n redisplay()\n\ndef go_beginning(*_):\n commit_search()\n set_buffer(u'', buffer_left + buffer_right)\n\ndef go_end(*_):\n commit_search()\n set_buffer(buffer_left + buffer_right, u'')\n\nkeymap = km.Keymap({\n '<nomatch>' : self_insert,\n '<up>' : history_prev,\n '<down>' : history_next,\n '<left>' : backward_char,\n '<right>' : forward_char,\n '<del>' : delete_char_backward,\n '<delete>' : delete_char_forward,\n '<enter>' : submit,\n 'C-j' : submit,\n 'C-<left>' : backward_word,\n 'C-<right>' : forward_word,\n 'M-<left>' : backward_word,\n 'M-<right>' : forward_word,\n 'C-c' : control_c,\n 'C-d' : control_d,\n 'C-k' : kill_to_end,\n 'C-w' : kill_word_backward,\n '<backspace>' : kill_word_backward,\n 'M-<del>' : kill_word_backward,\n 'C-r' : search_history,\n '<escape>' : cancel_search,\n 'C-a' : go_beginning,\n 'C-e' : go_end,\n '<tab>' : auto_complete,\n '<any>' : handle_keypress,\n })\n\ndef readline(_size = None, prompt = '', float = True, priority = 10):\n # The argument _size is unused, but is there for compatibility\n # with the existing readline\n\n global buffer_handle, prompt_handle, suggest_handle, eof, \\\n show_suggestions\n\n show_suggestions = False\n eof = False\n if prompt:\n prompt_handle = term.output(prompt, float = float, priority = priority)\n else:\n prompt_handle = None\n buffer_handle = term.output(float = float, priority = priority)\n suggest_handle = None\n clear()\n if startup_hook:\n startup_hook()\n try:\n while True:\n try:\n try:\n keymap.handle_input()\n except EOFError:\n if len(buffer_left + buffer_right) == 0:\n return ''\n if eof:\n return ''\n else:\n buffer = (buffer_left + buffer_right).encode('utf-8')\n if buffer:\n history.insert(0, buffer)\n return buffer + '\\n'\n except KeyboardInterrupt:\n control_c()\n finally:\n line = buffer_left + buffer_right + '\\n'\n buffer_handle.update(line)\n buffer_handle.freeze()\n buffer_handle = None\n if prompt_handle:\n prompt_handle.freeze()\n prompt_handle = None\n if suggest_handle:\n suggest_handle.freeze()\n suggest_handle = None\n if shutdown_hook:\n shutdown_hook()\n\ndef init():\n # defer imports until initialization\n import sys, __builtin__\n from ..util import safeeval\n\n class Wrapper:\n def __init__(self, fd):\n self._fd = fd\n def readline(self, size = None):\n return readline(size)\n def __getattr__(self, k):\n return self._fd.__getattribute__(k)\n sys.stdin = Wrapper(sys.stdin)\n\n def raw_input(prompt = '', float = True):\n \"\"\"raw_input(prompt = '', float = True)\n\n Replacement for the built-in `raw_input` using ``pwnlib``s readline\n implementation.\n\n Arguments:\n prompt(str): The prompt to show to the user.\n float(bool): If set to `True`, prompt and input will float to the\n bottom of the screen when `term.term_mode` is enabled.\n \"\"\"\n return readline(None, prompt, float)\n __builtin__.raw_input = raw_input\n\n def input(prompt = '', float = True):\n \"\"\"input(prompt = '', float = True)\n\n Replacement for the built-in `input` using ``pwnlib``s readline\n implementation, and `pwnlib.util.safeeval.expr` instead of `eval` (!).\n\n Arguments:\n prompt(str): The prompt to show to the user.\n float(bool): If set to `True`, prompt and input will float to the\n bottom of the screen when `term.term_mode` is enabled.\n \"\"\"\n return safeeval.const(readline(None, prompt, float))\n __builtin__.input = input\n", "path": "pwnlib/term/readline.py" } ]
[ { "content": "from . import term, text\nfrom . import keymap as km\nfrom . import keyconsts as kc\ncursor = text.reverse\n\nbuffer_left, buffer_right = u'', u''\nsaved_buffer = None\nhistory = []\nhistory_idx = None\nprompt_handle = None\nbuffer_handle = None\nsuggest_handle = None\nsearch_idx = None\nsearch_results = []\nstartup_hook = None\nshutdown_hook = None\n\ndelims = ' /;:.\\\\'\n\nshow_completion = True\nshow_suggestions = False\n\ncomplete_hook = None\nsuggest_hook = None\n\ntabs = 0\n\ndef set_completer(completer):\n global complete_hook, suggest_hook\n if completer is None:\n complete_hook = None\n suggest_hook = None\n else:\n complete_hook = completer.complete\n suggest_hook = completer.suggest\n\ndef fmt_suggestions(suggestions):\n if suggestions:\n s = ''\n l = max(map(len, suggestions))\n columns = term.width // (l + 1)\n column_width = term.width // columns\n fmt = '%%-%ds' % column_width\n for j in range(0, len(suggestions), columns):\n for k in range(columns):\n l = j + k\n if l < len(suggestions):\n s += fmt % suggestions[l]\n s += '\\n'\n else:\n s = '\\n'\n return s\n\ndef auto_complete(*_):\n global show_suggestions, tabs\n if search_idx is not None:\n commit_search()\n tabs = 0\n elif tabs == 1:\n if complete_hook:\n ret = complete_hook(buffer_left, buffer_right)\n if ret:\n tabs = 0\n insert_text(ret)\n else:\n show_suggestions = not show_suggestions\n redisplay()\n\ndef handle_keypress(trace):\n global tabs\n k = trace[-1]\n if k == '<tab>':\n tabs += 1\n else:\n tabs = 0\n\ndef clear():\n global buffer_left, buffer_right, history_idx, search_idx\n buffer_left, buffer_right = u'', u''\n history_idx = None\n search_idx = None\n redisplay()\n\ndef redisplay():\n global suggest_handle\n if buffer_handle:\n if show_suggestions and suggest_hook:\n suggestions = suggest_hook(buffer_left, buffer_right)\n if suggest_handle is None:\n h = prompt_handle or buffer_handle\n suggest_handle = term.output(before = h)\n s = fmt_suggestions(suggestions)\n suggest_handle.update(s)\n elif suggest_handle:\n suggest_handle.update('')\n if search_idx is None:\n s = None\n if buffer_right:\n s = buffer_left + cursor(buffer_right[0]) + buffer_right[1:]\n elif show_completion and complete_hook:\n ret = complete_hook(buffer_left, buffer_right)\n if ret:\n s = buffer_left + \\\n text.underline(cursor(ret[0])) + \\\n text.underline(ret[1:])\n s = s or buffer_left + cursor(' ')\n buffer_handle.update(s)\n else:\n if search_results != []:\n idx, i, j = search_results[search_idx]\n buf = history[idx]\n a, b, c = buf[:i], buf[i:j], buf[j:]\n s = a + text.bold_green(b) + c\n else:\n s = text.white_on_red(buffer_left)\n buffer_handle.update('(search) ' + s)\n\ndef self_insert(trace):\n if len(trace) != 1:\n return\n k = trace[0]\n if k.type == kc.TYPE_UNICODE and k.mods == kc.MOD_NONE:\n insert_text(k.code)\n\ndef set_buffer(left, right):\n global buffer_left, buffer_right\n buffer_left = unicode(left)\n buffer_right = unicode(right)\n redisplay()\n\ndef cancel_search(*_):\n global search_idx\n if search_idx is not None:\n search_idx = None\n redisplay()\n\ndef commit_search():\n global search_idx\n if search_idx is not None and search_results:\n set_buffer(history[search_results[search_idx][0]], u'')\n search_idx = None\n redisplay()\n\ndef update_search_results():\n global search_results, search_idx, show_suggestions\n if search_idx is None:\n return\n show_suggestions = False\n if search_results:\n hidx = search_results[search_idx][0]\n else:\n hidx = None\n search_results = []\n search_idx = 0\n if not buffer_left:\n return\n for idx, h in enumerate(history):\n for i in range(0, len(h) - len(buffer_left) + 1):\n if h[i:i + len(buffer_left)] == buffer_left:\n if hidx is not None and idx == hidx:\n search_idx = len(search_results)\n search_results.append((idx, i, i + len(buffer_left)))\n break\n\ndef search_history(*_):\n global buffer_left, buffer_right, history_idx, search_idx\n if search_idx is None:\n buffer_left, buffer_right = buffer_left + buffer_right, u''\n history_idx = None\n search_idx = 0\n update_search_results()\n elif search_results:\n search_idx = (search_idx + 1) % len(search_results)\n redisplay()\n\ndef history_prev(*_):\n global history_idx, saved_buffer\n if history == []:\n return\n cancel_search()\n if history_idx is None:\n saved_buffer = (buffer_left, buffer_right)\n history_idx = -1\n if history_idx < len(history) - 1:\n history_idx += 1\n set_buffer(history[history_idx], u'')\n\ndef history_next(*_):\n global history_idx, saved_buffer\n if history_idx is None:\n return\n cancel_search()\n if history_idx == 0:\n set_buffer(*saved_buffer)\n history_idx = None\n saved_buffer = None\n else:\n history_idx -= 1\n set_buffer(history[history_idx], u'')\n\ndef backward_char(*_):\n global buffer_left, buffer_right\n commit_search()\n if buffer_left:\n buffer_right = buffer_left[-1] + buffer_right\n buffer_left = buffer_left[:-1]\n redisplay()\n\ndef forward_char(*_):\n global buffer_left, buffer_right\n commit_search()\n if buffer_right:\n buffer_left += buffer_right[0]\n buffer_right = buffer_right[1:]\n redisplay()\n\ndef insert_text(s):\n global history_idx, saved_buffer, buffer_left\n if history_idx is not None:\n history_idx = None\n saved_buffer = None\n buffer_left += s\n update_search_results()\n redisplay()\n\ndef submit(*_):\n if search_idx is not None:\n commit_search()\n else:\n keymap.stop()\n\ndef control_c(*_):\n global history_idx, saved_buffer\n if search_idx is not None:\n cancel_search()\n elif history_idx is not None:\n set_buffer(*saved_buffer)\n history_idx = None\n saved_buffer = None\n elif buffer_left or buffer_right:\n clear()\n else:\n raise KeyboardInterrupt\n\ndef control_d(*_):\n if buffer_left or buffer_right:\n return\n global eof\n eof = True\n keymap.stop()\n\ndef kill_to_end(*_):\n global buffer_right\n commit_search()\n buffer_right = []\n redisplay()\n\ndef delete_char_forward(*_):\n global buffer_right\n commit_search()\n if buffer_right:\n buffer_right = buffer_right[1:]\n redisplay()\n\ndef delete_char_backward(*_):\n global buffer_left\n if buffer_left:\n buffer_left = buffer_left[:-1]\n update_search_results()\n redisplay()\n\ndef kill_word_backward(*_):\n global buffer_left\n commit_search()\n flag = False\n while buffer_left:\n c = buffer_left[-1]\n if c[0] in delims:\n if flag:\n break\n else:\n flag = True\n buffer_left = buffer_left[:-1]\n redisplay()\n\ndef backward_word(*_):\n global buffer_left, buffer_right\n commit_search()\n flag = False\n while buffer_left:\n c = buffer_left[-1]\n if c[0] in delims:\n if flag:\n break\n else:\n flag = True\n buffer_right = buffer_left[-1] + buffer_right\n buffer_left = buffer_left[:-1]\n redisplay()\n\ndef forward_word(*_):\n global buffer_left, buffer_right\n commit_search()\n flag = False\n while buffer_right:\n c = buffer_right[0]\n if c[0] in delims:\n if flag:\n break\n else:\n flag = True\n buffer_left += buffer_right[0]\n buffer_right = buffer_right[1:]\n redisplay()\n\ndef go_beginning(*_):\n commit_search()\n set_buffer(u'', buffer_left + buffer_right)\n\ndef go_end(*_):\n commit_search()\n set_buffer(buffer_left + buffer_right, u'')\n\nkeymap = km.Keymap({\n '<nomatch>' : self_insert,\n '<up>' : history_prev,\n '<down>' : history_next,\n '<left>' : backward_char,\n '<right>' : forward_char,\n '<del>' : delete_char_backward,\n '<delete>' : delete_char_forward,\n '<enter>' : submit,\n 'C-j' : submit,\n 'C-<left>' : backward_word,\n 'C-<right>' : forward_word,\n 'M-<left>' : backward_word,\n 'M-<right>' : forward_word,\n 'C-c' : control_c,\n 'C-d' : control_d,\n 'C-k' : kill_to_end,\n 'C-w' : kill_word_backward,\n '<backspace>' : kill_word_backward,\n 'M-<del>' : kill_word_backward,\n 'C-r' : search_history,\n '<escape>' : cancel_search,\n 'C-a' : go_beginning,\n 'C-e' : go_end,\n '<tab>' : auto_complete,\n '<any>' : handle_keypress,\n })\n\ndef readline(_size = None, prompt = '', float = True, priority = 10):\n # The argument _size is unused, but is there for compatibility\n # with the existing readline\n\n global buffer_handle, prompt_handle, suggest_handle, eof, \\\n show_suggestions\n\n show_suggestions = False\n eof = False\n if prompt:\n prompt_handle = term.output(prompt, float = float, priority = priority)\n else:\n prompt_handle = None\n buffer_handle = term.output(float = float, priority = priority)\n suggest_handle = None\n clear()\n if startup_hook:\n startup_hook()\n try:\n while True:\n try:\n try:\n keymap.handle_input()\n except EOFError:\n if len(buffer_left + buffer_right) == 0:\n return ''\n if eof:\n return ''\n else:\n buffer = (buffer_left + buffer_right).encode('utf-8')\n if buffer:\n history.insert(0, buffer)\n return buffer + '\\n'\n except KeyboardInterrupt:\n control_c()\n finally:\n line = buffer_left + buffer_right + '\\n'\n buffer_handle.update(line)\n buffer_handle.freeze()\n buffer_handle = None\n if prompt_handle:\n prompt_handle.freeze()\n prompt_handle = None\n if suggest_handle:\n suggest_handle.freeze()\n suggest_handle = None\n if shutdown_hook:\n shutdown_hook()\n\ndef init():\n # defer imports until initialization\n import sys, __builtin__\n from ..util import safeeval\n\n class Wrapper:\n def __init__(self, fd):\n self._fd = fd\n def readline(self, size = None):\n return readline(size)\n def __getattr__(self, k):\n return self._fd.__getattribute__(k)\n sys.stdin = Wrapper(sys.stdin)\n\n def raw_input(prompt = '', float = True):\n \"\"\"raw_input(prompt = '', float = True)\n\n Replacement for the built-in `raw_input` using ``pwnlib``s readline\n implementation.\n\n Arguments:\n prompt(str): The prompt to show to the user.\n float(bool): If set to `True`, prompt and input will float to the\n bottom of the screen when `term.term_mode` is enabled.\n \"\"\"\n return readline(None, prompt, float)\n __builtin__.raw_input = raw_input\n\n def input(prompt = '', float = True):\n \"\"\"input(prompt = '', float = True)\n\n Replacement for the built-in `input` using ``pwnlib``s readline\n implementation, and `pwnlib.util.safeeval.expr` instead of `eval` (!).\n\n Arguments:\n prompt(str): The prompt to show to the user.\n float(bool): If set to `True`, prompt and input will float to the\n bottom of the screen when `term.term_mode` is enabled.\n \"\"\"\n return safeeval.const(readline(None, prompt, float))\n __builtin__.input = input\n", "path": "pwnlib/term/readline.py" } ]
diff --git a/pwnlib/term/readline.py b/pwnlib/term/readline.py index 8e435d2bd..5ebb26bb0 100644 --- a/pwnlib/term/readline.py +++ b/pwnlib/term/readline.py @@ -136,7 +136,7 @@ def cancel_search(*_): def commit_search(): global search_idx - if search_idx is not None: + if search_idx is not None and search_results: set_buffer(history[search_results[search_idx][0]], u'') search_idx = None redisplay()
facebookresearch__fairseq-214
Size Mismatch in AdaptiveSoftmax when targets are not specified Following up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off: https://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126 I imagine this could be solved by some sort of truncation to the output of tail[i].input
[ { "content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py" } ]
[ { "content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n else:\n assert vocab_size == cutoff[\n -1], 'cannot specify cutoff smaller than vocab size'\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py" } ]
diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py index 307861b6c8..aeceb486df 100644 --- a/fairseq/modules/adaptive_softmax.py +++ b/fairseq/modules/adaptive_softmax.py @@ -22,6 +22,9 @@ def __init__(self, vocab_size, input_dim, cutoff, dropout): if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] + else: + assert vocab_size == cutoff[ + -1], 'cannot specify cutoff smaller than vocab size' output_dim = cutoff[0] + len(cutoff) - 1
pre-commit__pre-commit-2836
Alternative to stashing files for testing Are there any plans to implement alternatives to stashing the worktree? Ideally this would be hook/scriptable, like some 'prepare-worktree' and 'restore-worktree' options (which default to the current stash behavior) but can also yield some new directory where the tests are run. The rationale here is that my editor reverts files changed on disk and I'd like to add notes to source files while the commit is in progress. In my own pre-commit hooks I use something like: git archive "$(git write-tree)" --prefix="$test_dir/" | tar xf - To create a pristine source tree (actually, I also prime it with `cp -rl` with build artifacts from the previous build to speed up incremental builds). 'git-worktree' and other tools could be used as well... Eventually I have the idea to run some (more expensive) pre-commit checks in the background while one types the commit message. Then in the commit-msg hook wait for the background results and abort the commit there. This should reduce the turn around times significantly.
[ { "content": "from __future__ import annotations\n\nimport contextlib\nimport os\nfrom typing import Generator\nfrom typing import Sequence\n\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\n\nBUILD_DIR = '.build'\nBUILD_CONFIG = 'release'\n\nENVIRONMENT_DIR = 'swift_env'\nget_default_version = lang_base.basic_get_default_version\nhealth_check = lang_base.basic_health_check\nrun_hook = lang_base.basic_run_hook\n\n\ndef get_env_patch(venv: str) -> PatchesT: # pragma: win32 no cover\n bin_path = os.path.join(venv, BUILD_DIR, BUILD_CONFIG)\n return (('PATH', (bin_path, os.pathsep, Var('PATH'))),)\n\n\[email protected] # pragma: win32 no cover\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n lang_base.assert_version_default('swift', version)\n lang_base.assert_no_additional_deps('swift', additional_dependencies)\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # Build the swift package\n os.mkdir(envdir)\n cmd_output_b(\n 'swift', 'build',\n '-C', prefix.prefix_dir,\n '-c', BUILD_CONFIG,\n '--build-path', os.path.join(envdir, BUILD_DIR),\n )\n", "path": "pre_commit/languages/swift.py" } ]
[ { "content": "from __future__ import annotations\n\nimport contextlib\nimport os\nfrom typing import Generator\nfrom typing import Sequence\n\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\n\nBUILD_DIR = '.build'\nBUILD_CONFIG = 'release'\n\nENVIRONMENT_DIR = 'swift_env'\nget_default_version = lang_base.basic_get_default_version\nhealth_check = lang_base.basic_health_check\nrun_hook = lang_base.basic_run_hook\n\n\ndef get_env_patch(venv: str) -> PatchesT: # pragma: win32 no cover\n bin_path = os.path.join(venv, BUILD_DIR, BUILD_CONFIG)\n return (('PATH', (bin_path, os.pathsep, Var('PATH'))),)\n\n\[email protected] # pragma: win32 no cover\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n lang_base.assert_version_default('swift', version)\n lang_base.assert_no_additional_deps('swift', additional_dependencies)\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # Build the swift package\n os.mkdir(envdir)\n cmd_output_b(\n 'swift', 'build',\n '--package-path', prefix.prefix_dir,\n '-c', BUILD_CONFIG,\n '--build-path', os.path.join(envdir, BUILD_DIR),\n )\n", "path": "pre_commit/languages/swift.py" } ]
diff --git a/pre_commit/languages/swift.py b/pre_commit/languages/swift.py index 8250ab703..f16bb0451 100644 --- a/pre_commit/languages/swift.py +++ b/pre_commit/languages/swift.py @@ -44,7 +44,7 @@ def install_environment( os.mkdir(envdir) cmd_output_b( 'swift', 'build', - '-C', prefix.prefix_dir, + '--package-path', prefix.prefix_dir, '-c', BUILD_CONFIG, '--build-path', os.path.join(envdir, BUILD_DIR), )
biopython__biopython-2059
Duplicate files in Tests/Motif and Tests/motifs The files in Test/motifs seem to be a copy of those in Test/Motif plus some more. Has the Motif directory been deprecated since renaming Bio.Motif to Bio.motifs?
[ { "content": "# Copyright 2003-2009 by Bartek Wilczynski. All rights reserved.\n# Copyright 2012-2013 by Michiel JL de Hoon. All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\"\"\"Tools for sequence motif analysis.\n\nBio.motifs contains the core Motif class containing various I/O methods\nas well as methods for motif comparisons and motif searching in sequences.\nIt also includes functionality for parsing output from the AlignACE, MEME,\nand MAST programs, as well as files in the TRANSFAC format.\n\nBio.motifs is replacing the older and now obsolete Bio.Motif module.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom Bio._py3k import range\n\n\ndef create(instances, alphabet=None):\n \"\"\"Create a Motif object.\"\"\"\n instances = Instances(instances, alphabet)\n return Motif(instances=instances, alphabet=alphabet)\n\n\ndef parse(handle, format, strict=True):\n \"\"\"Parse an output file from a motif finding program.\n\n Currently supported formats (case is ignored):\n - AlignAce: AlignAce output file format\n - MEME: MEME output file motif\n - MINIMAL: MINIMAL MEME output file motif\n - MAST: MAST output file motif\n - TRANSFAC: TRANSFAC database file format\n - pfm: JASPAR-style position-frequency matrix\n - jaspar: JASPAR-style multiple PFM format\n - sites: JASPAR-style sites file\n\n As files in the pfm and sites formats contain only a single motif,\n it is easier to use Bio.motifs.read() instead of Bio.motifs.parse()\n for those.\n\n For example:\n\n >>> from Bio import motifs\n >>> with open(\"Motif/alignace.out\") as handle:\n ... for m in motifs.parse(handle, \"AlignAce\"):\n ... print(m.consensus)\n ...\n TCTACGATTGAG\n CTGCAGCTAGCTACGAGTGAG\n GTGCTCTAAGCATAGTAGGCG\n GCCACTAGCAGAGCAGGGGGC\n CGACTCAGAGGTT\n CCACGCTAAGAGAGGTGCCGGAG\n GCGCGTCGCTGAGCA\n GTCCATCGCAAAGCGTGGGGC\n GGGATCAGAGGGCCG\n TGGAGGCGGGG\n GACCAGAGCTTCGCATGGGGG\n GGCGTGCGTG\n GCTGGTTGCTGTTCATTAGG\n GCCGGCGGCAGCTAAAAGGG\n GAGGCCGGGGAT\n CGACTCGTGCTTAGAAGG\n\n If strict is True (default), the parser will raise a ValueError if the\n file contents does not strictly comply with the specified file format.\n \"\"\"\n format = format.lower()\n if format == \"alignace\":\n from Bio.motifs import alignace\n record = alignace.read(handle)\n return record\n elif format == \"meme\":\n from Bio.motifs import meme\n record = meme.read(handle)\n return record\n elif format == \"minimal\":\n from Bio.motifs import minimal\n record = minimal.read(handle)\n return record\n elif format == \"mast\":\n from Bio.motifs import mast\n record = mast.read(handle)\n return record\n elif format == \"transfac\":\n from Bio.motifs import transfac\n record = transfac.read(handle, strict)\n return record\n elif format in ('pfm', 'sites', 'jaspar'):\n from Bio.motifs import jaspar\n record = jaspar.read(handle, format)\n return record\n else:\n raise ValueError(\"Unknown format %s\" % format)\n\n\ndef read(handle, format, strict=True):\n \"\"\"Read a motif from a handle using the specified file-format.\n\n This supports the same formats as Bio.motifs.parse(), but\n only for files containing exactly one motif. For example,\n reading a JASPAR-style pfm file:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/SRF.pfm\") as handle:\n ... m = motifs.read(handle, \"pfm\")\n >>> m.consensus\n Seq('GCCCATATATGG', IUPACUnambiguousDNA())\n\n Or a single-motif MEME file,\n\n >>> from Bio import motifs\n >>> with open(\"motifs/meme.out\") as handle:\n ... m = motifs.read(handle, \"meme\")\n >>> m.consensus\n Seq('CTCAATCGTA', IUPACUnambiguousDNA())\n\n If the handle contains no records, or more than one record,\n an exception is raised:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/alignace.out\") as handle:\n ... motif = motifs.read(handle, \"AlignAce\")\n Traceback (most recent call last):\n ...\n ValueError: More than one motif found in handle\n\n If however you want the first motif from a file containing\n multiple motifs this function would raise an exception (as\n shown in the example above). Instead use:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/alignace.out\") as handle:\n ... record = motifs.parse(handle, \"alignace\")\n >>> motif = record[0]\n >>> motif.consensus\n Seq('TCTACGATTGAG', IUPACUnambiguousDNA())\n\n Use the Bio.motifs.parse(handle, format) function if you want\n to read multiple records from the handle.\n\n If strict is True (default), the parser will raise a ValueError if the\n file contents does not strictly comply with the specified file format.\n \"\"\"\n format = format.lower()\n motifs = parse(handle, format, strict)\n if len(motifs) == 0:\n raise ValueError(\"No motifs found in handle\")\n if len(motifs) > 1:\n raise ValueError(\"More than one motif found in handle\")\n motif = motifs[0]\n return motif\n\n\nclass Instances(list):\n \"\"\"Class containing a list of sequences that made the motifs.\"\"\"\n\n def __init__(self, instances=None, alphabet=None):\n \"\"\"Initialize the class.\"\"\"\n from Bio.Alphabet import IUPAC\n from Bio.Seq import Seq\n if instances is None:\n instances = []\n self.length = None\n for instance in instances:\n if self.length is None:\n self.length = len(instance)\n elif self.length != len(instance):\n message = \"All instances should have the same length (%d found, %d expected)\" % (len(instance), self.length)\n raise ValueError(message)\n try:\n a = instance.alphabet\n except AttributeError:\n # The instance is a plain string\n continue\n if alphabet is None:\n alphabet = a\n elif alphabet != a:\n raise ValueError(\"Alphabets are inconsistent\")\n if alphabet is None or alphabet.letters is None:\n # If we didn't get a meaningful alphabet from the instances,\n # assume it is DNA.\n alphabet = IUPAC.unambiguous_dna\n for instance in instances:\n if not isinstance(instance, Seq):\n sequence = str(instance)\n instance = Seq(sequence, alphabet=alphabet)\n self.append(instance)\n self.alphabet = alphabet\n\n def __str__(self):\n \"\"\"Return a string containing the sequences of the motif.\"\"\"\n text = \"\"\n for instance in self:\n text += str(instance) + \"\\n\"\n return text\n\n def count(self):\n \"\"\"Count nucleotides in a position.\"\"\"\n counts = {}\n for letter in self.alphabet.letters:\n counts[letter] = [0] * self.length\n for instance in self:\n for position, letter in enumerate(instance):\n counts[letter][position] += 1\n return counts\n\n def search(self, sequence):\n \"\"\"Find positions of motifs in a given sequence.\n\n This is a generator function, returning found positions of motif\n instances in a given sequence.\n \"\"\"\n for pos in range(0, len(sequence) - self.length + 1):\n for instance in self:\n if str(instance) == str(sequence[pos:pos + self.length]):\n yield (pos, instance)\n break # no other instance will fit (we don't want to return multiple hits)\n\n def reverse_complement(self):\n \"\"\"Compute reverse complement of sequences.\"\"\"\n instances = Instances(alphabet=self.alphabet)\n instances.length = self.length\n for instance in self:\n instance = instance.reverse_complement()\n instances.append(instance)\n return instances\n\n\nclass Motif(object):\n \"\"\"A class representing sequence motifs.\"\"\"\n\n def __init__(self, alphabet=None, instances=None, counts=None):\n \"\"\"Initialize the class.\"\"\"\n from . import matrix\n from Bio.Alphabet import IUPAC\n self.name = \"\"\n if counts is not None and instances is not None:\n raise Exception(ValueError,\n \"Specify either instances or counts, \"\n \"don't specify both\")\n elif counts is not None:\n if alphabet is None:\n alphabet = IUPAC.unambiguous_dna\n self.instances = None\n self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)\n self.length = self.counts.length\n elif instances is not None:\n self.instances = instances\n alphabet = self.instances.alphabet\n counts = self.instances.count()\n self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)\n self.length = self.counts.length\n else:\n self.counts = None\n self.instances = None\n self.length = None\n if alphabet is None:\n alphabet = IUPAC.unambiguous_dna\n self.alphabet = alphabet\n self.pseudocounts = None\n self.background = None\n self.mask = None\n\n def __get_mask(self):\n return self.__mask\n\n def __set_mask(self, mask):\n if self.length is None:\n self.__mask = ()\n elif mask is None:\n self.__mask = (1,) * self.length\n elif len(mask) != self.length:\n raise ValueError(\"The length (%d) of the mask is inconsistent with the length (%d) of the motif\", (len(mask), self.length))\n elif isinstance(mask, str):\n self.__mask = []\n for char in mask:\n if char == \"*\":\n self.__mask.append(1)\n elif char == \" \":\n self.__mask.append(0)\n else:\n raise ValueError(\"Mask should contain only '*' or ' ' and not a '%s'\" % char)\n self.__mask = tuple(self.__mask)\n else:\n self.__mask = tuple(int(bool(c)) for c in mask)\n\n mask = property(__get_mask, __set_mask)\n del __get_mask\n del __set_mask\n\n def __get_pseudocounts(self):\n return self._pseudocounts\n\n def __set_pseudocounts(self, value):\n self._pseudocounts = {}\n if isinstance(value, dict):\n self._pseudocounts = dict((letter, value[letter]) for letter in self.alphabet.letters)\n else:\n if value is None:\n value = 0.0\n self._pseudocounts = dict.fromkeys(self.alphabet.letters, value)\n\n pseudocounts = property(__get_pseudocounts, __set_pseudocounts)\n del __get_pseudocounts\n del __set_pseudocounts\n\n def __get_background(self):\n return self._background\n\n def __set_background(self, value):\n if isinstance(value, dict):\n self._background = dict((letter, value[letter]) for letter in self.alphabet.letters)\n elif value is None:\n self._background = dict.fromkeys(self.alphabet.letters, 1.0)\n else:\n if sorted(self.alphabet.letters) != [\"A\", \"C\", \"G\", \"T\"]:\n # TODO - Should this be a ValueError?\n raise Exception(\"Setting the background to a single value only \"\n \"works for DNA motifs (in which case the value \"\n \"is interpreted as the GC content\")\n self._background['A'] = (1.0 - value) / 2.0\n self._background['C'] = value / 2.0\n self._background['G'] = value / 2.0\n self._background['T'] = (1.0 - value) / 2.0\n total = sum(self._background.values())\n for letter in self.alphabet.letters:\n self._background[letter] /= total\n\n background = property(__get_background, __set_background)\n del __get_background\n del __set_background\n\n @property\n def pwm(self):\n \"\"\"Compute position weight matrices.\"\"\"\n return self.counts.normalize(self._pseudocounts)\n\n @property\n def pssm(self):\n \"\"\"Compute position specific scoring matrices.\"\"\"\n return self.pwm.log_odds(self._background)\n\n def __str__(self, masked=False):\n \"\"\"Return string representation of a motif.\"\"\"\n text = \"\"\n if self.instances is not None:\n text += str(self.instances)\n\n if masked:\n for i in range(self.length):\n if self.__mask[i]:\n text += \"*\"\n else:\n text += \" \"\n text += \"\\n\"\n return text\n\n def __len__(self):\n \"\"\"Return the length of a motif.\n\n Please use this method (i.e. invoke len(m)) instead of referring to m.length directly.\n \"\"\"\n if self.length is None:\n return 0\n else:\n return self.length\n\n def reverse_complement(self):\n \"\"\"Return the reverse complement of the motif as a new motif.\"\"\"\n alphabet = self.alphabet\n if self.instances is not None:\n instances = self.instances.reverse_complement()\n res = Motif(instances=instances, alphabet=alphabet)\n else: # has counts\n res = Motif(alphabet)\n res.counts = {}\n res.counts[\"A\"] = self.counts[\"T\"][::-1]\n res.counts[\"T\"] = self.counts[\"A\"][::-1]\n res.counts[\"G\"] = self.counts[\"C\"][::-1]\n res.counts[\"C\"] = self.counts[\"G\"][::-1]\n res.length = self.length\n res.__mask = self.__mask[::-1]\n return res\n\n @property\n def consensus(self):\n \"\"\"Return the consensus sequence.\"\"\"\n return self.counts.consensus\n\n @property\n def anticonsensus(self):\n \"\"\"Return the least probable pattern to be generated from this motif.\"\"\"\n return self.counts.anticonsensus\n\n @property\n def degenerate_consensus(self):\n \"\"\"Generate degenerate consesnsus sequence.\n\n Following the rules adapted from\n D. R. Cavener: \"Comparison of the consensus sequence flanking\n translational start sites in Drosophila and vertebrates.\"\n Nucleic Acids Research 15(4): 1353-1361. (1987).\n\n The same rules are used by TRANSFAC.\n \"\"\"\n return self.counts.degenerate_consensus\n\n def weblogo(self, fname, format=\"PNG\", version=\"2.8.2\", **kwds):\n \"\"\"Download and save a weblogo using the Berkeley weblogo service.\n\n Requires an internet connection.\n\n The parameters from ``**kwds`` are passed directly to the weblogo server.\n\n Currently, this method uses WebLogo version 3.3.\n These are the arguments and their default values passed to\n WebLogo 3.3; see their website at http://weblogo.threeplusone.com\n for more information::\n\n 'stack_width' : 'medium',\n 'stacks_per_line' : '40',\n 'alphabet' : 'alphabet_dna',\n 'ignore_lower_case' : True,\n 'unit_name' : \"bits\",\n 'first_index' : '1',\n 'logo_start' : '1',\n 'logo_end': str(self.length),\n 'composition' : \"comp_auto\",\n 'percentCG' : '',\n 'scale_width' : True,\n 'show_errorbars' : True,\n 'logo_title' : '',\n 'logo_label' : '',\n 'show_xaxis': True,\n 'xaxis_label': '',\n 'show_yaxis': True,\n 'yaxis_label': '',\n 'yaxis_scale': 'auto',\n 'yaxis_tic_interval' : '1.0',\n 'show_ends' : True,\n 'show_fineprint' : True,\n 'color_scheme': 'color_auto',\n 'symbols0': '',\n 'symbols1': '',\n 'symbols2': '',\n 'symbols3': '',\n 'symbols4': '',\n 'color0': '',\n 'color1': '',\n 'color2': '',\n 'color3': '',\n 'color4': '',\n\n \"\"\"\n from Bio._py3k import urlopen, urlencode, Request\n from Bio import Alphabet\n\n if isinstance(self.alphabet, Alphabet.ProteinAlphabet):\n alpha = \"alphabet_protein\"\n elif isinstance(self.alphabet, Alphabet.RNAAlphabet):\n alpha = \"alphabet_rna\"\n elif isinstance(self.alphabet, Alphabet.DNAAlphabet):\n alpha = \"alphabet_dna\"\n else:\n alpha = \"auto\"\n\n frequencies = self.format('transfac')\n url = 'http://weblogo.threeplusone.com/create.cgi'\n values = {'sequences': frequencies,\n 'format': format.lower(),\n 'stack_width': 'medium',\n 'stacks_per_line': '40',\n 'alphabet': alpha,\n 'ignore_lower_case': True,\n 'unit_name': \"bits\",\n 'first_index': '1',\n 'logo_start': '1',\n 'logo_end': str(self.length),\n 'composition': \"comp_auto\",\n 'percentCG': '',\n 'scale_width': True,\n 'show_errorbars': True,\n 'logo_title': '',\n 'logo_label': '',\n 'show_xaxis': True,\n 'xaxis_label': '',\n 'show_yaxis': True,\n 'yaxis_label': '',\n 'yaxis_scale': 'auto',\n 'yaxis_tic_interval': '1.0',\n 'show_ends': True,\n 'show_fineprint': True,\n 'color_scheme': 'color_auto',\n 'symbols0': '',\n 'symbols1': '',\n 'symbols2': '',\n 'symbols3': '',\n 'symbols4': '',\n 'color0': '',\n 'color1': '',\n 'color2': '',\n 'color3': '',\n 'color4': '',\n }\n\n values.update(\n dict((k, \"\" if v is False else str(v)) for k, v in kwds.items()))\n data = urlencode(values).encode(\"utf-8\")\n req = Request(url, data)\n response = urlopen(req)\n with open(fname, \"wb\") as f:\n im = response.read()\n f.write(im)\n\n def format(self, format):\n \"\"\"Return a string representation of the Motif in the given format.\n\n Currently supported fromats:\n - pfm : JASPAR single Position Frequency Matrix\n - jaspar : JASPAR multiple Position Frequency Matrix\n - transfac : TRANSFAC like files\n\n \"\"\"\n if format in ('pfm', 'jaspar'):\n from Bio.motifs import jaspar\n motifs = [self]\n return jaspar.write(motifs, format)\n elif format == \"transfac\":\n from Bio.motifs import transfac\n motifs = [self]\n return transfac.write(motifs)\n else:\n raise ValueError(\"Unknown format type %s\" % format)\n\n\ndef write(motifs, format):\n \"\"\"Return a string representation of motifs in the given format.\n\n Currently supported formats (case is ignored):\n - pfm : JASPAR simple single Position Frequency Matrix\n - jaspar : JASPAR multiple PFM format\n - transfac : TRANSFAC like files\n\n \"\"\"\n format = format.lower()\n if format in (\"pfm\", \"jaspar\"):\n from Bio.motifs import jaspar\n return jaspar.write(motifs, format)\n elif format == \"transfac\":\n from Bio.motifs import transfac\n return transfac.write(motifs)\n else:\n raise ValueError(\"Unknown format type %s\" % format)\n\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n run_doctest(verbose=0)\n", "path": "Bio/motifs/__init__.py" } ]
[ { "content": "# Copyright 2003-2009 by Bartek Wilczynski. All rights reserved.\n# Copyright 2012-2013 by Michiel JL de Hoon. All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\"\"\"Tools for sequence motif analysis.\n\nBio.motifs contains the core Motif class containing various I/O methods\nas well as methods for motif comparisons and motif searching in sequences.\nIt also includes functionality for parsing output from the AlignACE, MEME,\nand MAST programs, as well as files in the TRANSFAC format.\n\nBio.motifs is replacing the older and now obsolete Bio.Motif module.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom Bio._py3k import range\n\n\ndef create(instances, alphabet=None):\n \"\"\"Create a Motif object.\"\"\"\n instances = Instances(instances, alphabet)\n return Motif(instances=instances, alphabet=alphabet)\n\n\ndef parse(handle, format, strict=True):\n \"\"\"Parse an output file from a motif finding program.\n\n Currently supported formats (case is ignored):\n - AlignAce: AlignAce output file format\n - MEME: MEME output file motif\n - MINIMAL: MINIMAL MEME output file motif\n - MAST: MAST output file motif\n - TRANSFAC: TRANSFAC database file format\n - pfm: JASPAR-style position-frequency matrix\n - jaspar: JASPAR-style multiple PFM format\n - sites: JASPAR-style sites file\n\n As files in the pfm and sites formats contain only a single motif,\n it is easier to use Bio.motifs.read() instead of Bio.motifs.parse()\n for those.\n\n For example:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/alignace.out\") as handle:\n ... for m in motifs.parse(handle, \"AlignAce\"):\n ... print(m.consensus)\n ...\n TCTACGATTGAG\n CTGCAGCTAGCTACGAGTGAG\n GTGCTCTAAGCATAGTAGGCG\n GCCACTAGCAGAGCAGGGGGC\n CGACTCAGAGGTT\n CCACGCTAAGAGAGGTGCCGGAG\n GCGCGTCGCTGAGCA\n GTCCATCGCAAAGCGTGGGGC\n GGGATCAGAGGGCCG\n TGGAGGCGGGG\n GACCAGAGCTTCGCATGGGGG\n GGCGTGCGTG\n GCTGGTTGCTGTTCATTAGG\n GCCGGCGGCAGCTAAAAGGG\n GAGGCCGGGGAT\n CGACTCGTGCTTAGAAGG\n\n If strict is True (default), the parser will raise a ValueError if the\n file contents does not strictly comply with the specified file format.\n \"\"\"\n format = format.lower()\n if format == \"alignace\":\n from Bio.motifs import alignace\n record = alignace.read(handle)\n return record\n elif format == \"meme\":\n from Bio.motifs import meme\n record = meme.read(handle)\n return record\n elif format == \"minimal\":\n from Bio.motifs import minimal\n record = minimal.read(handle)\n return record\n elif format == \"mast\":\n from Bio.motifs import mast\n record = mast.read(handle)\n return record\n elif format == \"transfac\":\n from Bio.motifs import transfac\n record = transfac.read(handle, strict)\n return record\n elif format in ('pfm', 'sites', 'jaspar'):\n from Bio.motifs import jaspar\n record = jaspar.read(handle, format)\n return record\n else:\n raise ValueError(\"Unknown format %s\" % format)\n\n\ndef read(handle, format, strict=True):\n \"\"\"Read a motif from a handle using the specified file-format.\n\n This supports the same formats as Bio.motifs.parse(), but\n only for files containing exactly one motif. For example,\n reading a JASPAR-style pfm file:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/SRF.pfm\") as handle:\n ... m = motifs.read(handle, \"pfm\")\n >>> m.consensus\n Seq('GCCCATATATGG', IUPACUnambiguousDNA())\n\n Or a single-motif MEME file,\n\n >>> from Bio import motifs\n >>> with open(\"motifs/meme.out\") as handle:\n ... m = motifs.read(handle, \"meme\")\n >>> m.consensus\n Seq('CTCAATCGTA', IUPACUnambiguousDNA())\n\n If the handle contains no records, or more than one record,\n an exception is raised:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/alignace.out\") as handle:\n ... motif = motifs.read(handle, \"AlignAce\")\n Traceback (most recent call last):\n ...\n ValueError: More than one motif found in handle\n\n If however you want the first motif from a file containing\n multiple motifs this function would raise an exception (as\n shown in the example above). Instead use:\n\n >>> from Bio import motifs\n >>> with open(\"motifs/alignace.out\") as handle:\n ... record = motifs.parse(handle, \"alignace\")\n >>> motif = record[0]\n >>> motif.consensus\n Seq('TCTACGATTGAG', IUPACUnambiguousDNA())\n\n Use the Bio.motifs.parse(handle, format) function if you want\n to read multiple records from the handle.\n\n If strict is True (default), the parser will raise a ValueError if the\n file contents does not strictly comply with the specified file format.\n \"\"\"\n format = format.lower()\n motifs = parse(handle, format, strict)\n if len(motifs) == 0:\n raise ValueError(\"No motifs found in handle\")\n if len(motifs) > 1:\n raise ValueError(\"More than one motif found in handle\")\n motif = motifs[0]\n return motif\n\n\nclass Instances(list):\n \"\"\"Class containing a list of sequences that made the motifs.\"\"\"\n\n def __init__(self, instances=None, alphabet=None):\n \"\"\"Initialize the class.\"\"\"\n from Bio.Alphabet import IUPAC\n from Bio.Seq import Seq\n if instances is None:\n instances = []\n self.length = None\n for instance in instances:\n if self.length is None:\n self.length = len(instance)\n elif self.length != len(instance):\n message = \"All instances should have the same length (%d found, %d expected)\" % (len(instance), self.length)\n raise ValueError(message)\n try:\n a = instance.alphabet\n except AttributeError:\n # The instance is a plain string\n continue\n if alphabet is None:\n alphabet = a\n elif alphabet != a:\n raise ValueError(\"Alphabets are inconsistent\")\n if alphabet is None or alphabet.letters is None:\n # If we didn't get a meaningful alphabet from the instances,\n # assume it is DNA.\n alphabet = IUPAC.unambiguous_dna\n for instance in instances:\n if not isinstance(instance, Seq):\n sequence = str(instance)\n instance = Seq(sequence, alphabet=alphabet)\n self.append(instance)\n self.alphabet = alphabet\n\n def __str__(self):\n \"\"\"Return a string containing the sequences of the motif.\"\"\"\n text = \"\"\n for instance in self:\n text += str(instance) + \"\\n\"\n return text\n\n def count(self):\n \"\"\"Count nucleotides in a position.\"\"\"\n counts = {}\n for letter in self.alphabet.letters:\n counts[letter] = [0] * self.length\n for instance in self:\n for position, letter in enumerate(instance):\n counts[letter][position] += 1\n return counts\n\n def search(self, sequence):\n \"\"\"Find positions of motifs in a given sequence.\n\n This is a generator function, returning found positions of motif\n instances in a given sequence.\n \"\"\"\n for pos in range(0, len(sequence) - self.length + 1):\n for instance in self:\n if str(instance) == str(sequence[pos:pos + self.length]):\n yield (pos, instance)\n break # no other instance will fit (we don't want to return multiple hits)\n\n def reverse_complement(self):\n \"\"\"Compute reverse complement of sequences.\"\"\"\n instances = Instances(alphabet=self.alphabet)\n instances.length = self.length\n for instance in self:\n instance = instance.reverse_complement()\n instances.append(instance)\n return instances\n\n\nclass Motif(object):\n \"\"\"A class representing sequence motifs.\"\"\"\n\n def __init__(self, alphabet=None, instances=None, counts=None):\n \"\"\"Initialize the class.\"\"\"\n from . import matrix\n from Bio.Alphabet import IUPAC\n self.name = \"\"\n if counts is not None and instances is not None:\n raise Exception(ValueError,\n \"Specify either instances or counts, \"\n \"don't specify both\")\n elif counts is not None:\n if alphabet is None:\n alphabet = IUPAC.unambiguous_dna\n self.instances = None\n self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)\n self.length = self.counts.length\n elif instances is not None:\n self.instances = instances\n alphabet = self.instances.alphabet\n counts = self.instances.count()\n self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)\n self.length = self.counts.length\n else:\n self.counts = None\n self.instances = None\n self.length = None\n if alphabet is None:\n alphabet = IUPAC.unambiguous_dna\n self.alphabet = alphabet\n self.pseudocounts = None\n self.background = None\n self.mask = None\n\n def __get_mask(self):\n return self.__mask\n\n def __set_mask(self, mask):\n if self.length is None:\n self.__mask = ()\n elif mask is None:\n self.__mask = (1,) * self.length\n elif len(mask) != self.length:\n raise ValueError(\"The length (%d) of the mask is inconsistent with the length (%d) of the motif\", (len(mask), self.length))\n elif isinstance(mask, str):\n self.__mask = []\n for char in mask:\n if char == \"*\":\n self.__mask.append(1)\n elif char == \" \":\n self.__mask.append(0)\n else:\n raise ValueError(\"Mask should contain only '*' or ' ' and not a '%s'\" % char)\n self.__mask = tuple(self.__mask)\n else:\n self.__mask = tuple(int(bool(c)) for c in mask)\n\n mask = property(__get_mask, __set_mask)\n del __get_mask\n del __set_mask\n\n def __get_pseudocounts(self):\n return self._pseudocounts\n\n def __set_pseudocounts(self, value):\n self._pseudocounts = {}\n if isinstance(value, dict):\n self._pseudocounts = dict((letter, value[letter]) for letter in self.alphabet.letters)\n else:\n if value is None:\n value = 0.0\n self._pseudocounts = dict.fromkeys(self.alphabet.letters, value)\n\n pseudocounts = property(__get_pseudocounts, __set_pseudocounts)\n del __get_pseudocounts\n del __set_pseudocounts\n\n def __get_background(self):\n return self._background\n\n def __set_background(self, value):\n if isinstance(value, dict):\n self._background = dict((letter, value[letter]) for letter in self.alphabet.letters)\n elif value is None:\n self._background = dict.fromkeys(self.alphabet.letters, 1.0)\n else:\n if sorted(self.alphabet.letters) != [\"A\", \"C\", \"G\", \"T\"]:\n # TODO - Should this be a ValueError?\n raise Exception(\"Setting the background to a single value only \"\n \"works for DNA motifs (in which case the value \"\n \"is interpreted as the GC content\")\n self._background['A'] = (1.0 - value) / 2.0\n self._background['C'] = value / 2.0\n self._background['G'] = value / 2.0\n self._background['T'] = (1.0 - value) / 2.0\n total = sum(self._background.values())\n for letter in self.alphabet.letters:\n self._background[letter] /= total\n\n background = property(__get_background, __set_background)\n del __get_background\n del __set_background\n\n @property\n def pwm(self):\n \"\"\"Compute position weight matrices.\"\"\"\n return self.counts.normalize(self._pseudocounts)\n\n @property\n def pssm(self):\n \"\"\"Compute position specific scoring matrices.\"\"\"\n return self.pwm.log_odds(self._background)\n\n def __str__(self, masked=False):\n \"\"\"Return string representation of a motif.\"\"\"\n text = \"\"\n if self.instances is not None:\n text += str(self.instances)\n\n if masked:\n for i in range(self.length):\n if self.__mask[i]:\n text += \"*\"\n else:\n text += \" \"\n text += \"\\n\"\n return text\n\n def __len__(self):\n \"\"\"Return the length of a motif.\n\n Please use this method (i.e. invoke len(m)) instead of referring to m.length directly.\n \"\"\"\n if self.length is None:\n return 0\n else:\n return self.length\n\n def reverse_complement(self):\n \"\"\"Return the reverse complement of the motif as a new motif.\"\"\"\n alphabet = self.alphabet\n if self.instances is not None:\n instances = self.instances.reverse_complement()\n res = Motif(instances=instances, alphabet=alphabet)\n else: # has counts\n res = Motif(alphabet)\n res.counts = {}\n res.counts[\"A\"] = self.counts[\"T\"][::-1]\n res.counts[\"T\"] = self.counts[\"A\"][::-1]\n res.counts[\"G\"] = self.counts[\"C\"][::-1]\n res.counts[\"C\"] = self.counts[\"G\"][::-1]\n res.length = self.length\n res.__mask = self.__mask[::-1]\n return res\n\n @property\n def consensus(self):\n \"\"\"Return the consensus sequence.\"\"\"\n return self.counts.consensus\n\n @property\n def anticonsensus(self):\n \"\"\"Return the least probable pattern to be generated from this motif.\"\"\"\n return self.counts.anticonsensus\n\n @property\n def degenerate_consensus(self):\n \"\"\"Generate degenerate consesnsus sequence.\n\n Following the rules adapted from\n D. R. Cavener: \"Comparison of the consensus sequence flanking\n translational start sites in Drosophila and vertebrates.\"\n Nucleic Acids Research 15(4): 1353-1361. (1987).\n\n The same rules are used by TRANSFAC.\n \"\"\"\n return self.counts.degenerate_consensus\n\n def weblogo(self, fname, format=\"PNG\", version=\"2.8.2\", **kwds):\n \"\"\"Download and save a weblogo using the Berkeley weblogo service.\n\n Requires an internet connection.\n\n The parameters from ``**kwds`` are passed directly to the weblogo server.\n\n Currently, this method uses WebLogo version 3.3.\n These are the arguments and their default values passed to\n WebLogo 3.3; see their website at http://weblogo.threeplusone.com\n for more information::\n\n 'stack_width' : 'medium',\n 'stacks_per_line' : '40',\n 'alphabet' : 'alphabet_dna',\n 'ignore_lower_case' : True,\n 'unit_name' : \"bits\",\n 'first_index' : '1',\n 'logo_start' : '1',\n 'logo_end': str(self.length),\n 'composition' : \"comp_auto\",\n 'percentCG' : '',\n 'scale_width' : True,\n 'show_errorbars' : True,\n 'logo_title' : '',\n 'logo_label' : '',\n 'show_xaxis': True,\n 'xaxis_label': '',\n 'show_yaxis': True,\n 'yaxis_label': '',\n 'yaxis_scale': 'auto',\n 'yaxis_tic_interval' : '1.0',\n 'show_ends' : True,\n 'show_fineprint' : True,\n 'color_scheme': 'color_auto',\n 'symbols0': '',\n 'symbols1': '',\n 'symbols2': '',\n 'symbols3': '',\n 'symbols4': '',\n 'color0': '',\n 'color1': '',\n 'color2': '',\n 'color3': '',\n 'color4': '',\n\n \"\"\"\n from Bio._py3k import urlopen, urlencode, Request\n from Bio import Alphabet\n\n if isinstance(self.alphabet, Alphabet.ProteinAlphabet):\n alpha = \"alphabet_protein\"\n elif isinstance(self.alphabet, Alphabet.RNAAlphabet):\n alpha = \"alphabet_rna\"\n elif isinstance(self.alphabet, Alphabet.DNAAlphabet):\n alpha = \"alphabet_dna\"\n else:\n alpha = \"auto\"\n\n frequencies = self.format('transfac')\n url = 'http://weblogo.threeplusone.com/create.cgi'\n values = {'sequences': frequencies,\n 'format': format.lower(),\n 'stack_width': 'medium',\n 'stacks_per_line': '40',\n 'alphabet': alpha,\n 'ignore_lower_case': True,\n 'unit_name': \"bits\",\n 'first_index': '1',\n 'logo_start': '1',\n 'logo_end': str(self.length),\n 'composition': \"comp_auto\",\n 'percentCG': '',\n 'scale_width': True,\n 'show_errorbars': True,\n 'logo_title': '',\n 'logo_label': '',\n 'show_xaxis': True,\n 'xaxis_label': '',\n 'show_yaxis': True,\n 'yaxis_label': '',\n 'yaxis_scale': 'auto',\n 'yaxis_tic_interval': '1.0',\n 'show_ends': True,\n 'show_fineprint': True,\n 'color_scheme': 'color_auto',\n 'symbols0': '',\n 'symbols1': '',\n 'symbols2': '',\n 'symbols3': '',\n 'symbols4': '',\n 'color0': '',\n 'color1': '',\n 'color2': '',\n 'color3': '',\n 'color4': '',\n }\n\n values.update(\n dict((k, \"\" if v is False else str(v)) for k, v in kwds.items()))\n data = urlencode(values).encode(\"utf-8\")\n req = Request(url, data)\n response = urlopen(req)\n with open(fname, \"wb\") as f:\n im = response.read()\n f.write(im)\n\n def format(self, format):\n \"\"\"Return a string representation of the Motif in the given format.\n\n Currently supported fromats:\n - pfm : JASPAR single Position Frequency Matrix\n - jaspar : JASPAR multiple Position Frequency Matrix\n - transfac : TRANSFAC like files\n\n \"\"\"\n if format in ('pfm', 'jaspar'):\n from Bio.motifs import jaspar\n motifs = [self]\n return jaspar.write(motifs, format)\n elif format == \"transfac\":\n from Bio.motifs import transfac\n motifs = [self]\n return transfac.write(motifs)\n else:\n raise ValueError(\"Unknown format type %s\" % format)\n\n\ndef write(motifs, format):\n \"\"\"Return a string representation of motifs in the given format.\n\n Currently supported formats (case is ignored):\n - pfm : JASPAR simple single Position Frequency Matrix\n - jaspar : JASPAR multiple PFM format\n - transfac : TRANSFAC like files\n\n \"\"\"\n format = format.lower()\n if format in (\"pfm\", \"jaspar\"):\n from Bio.motifs import jaspar\n return jaspar.write(motifs, format)\n elif format == \"transfac\":\n from Bio.motifs import transfac\n return transfac.write(motifs)\n else:\n raise ValueError(\"Unknown format type %s\" % format)\n\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n run_doctest(verbose=0)\n", "path": "Bio/motifs/__init__.py" } ]
diff --git a/Bio/motifs/__init__.py b/Bio/motifs/__init__.py index 982279765df..682cff9e25c 100644 --- a/Bio/motifs/__init__.py +++ b/Bio/motifs/__init__.py @@ -44,7 +44,7 @@ def parse(handle, format, strict=True): For example: >>> from Bio import motifs - >>> with open("Motif/alignace.out") as handle: + >>> with open("motifs/alignace.out") as handle: ... for m in motifs.parse(handle, "AlignAce"): ... print(m.consensus) ... diff --git a/Doc/Tutorial/chapter_motifs.tex b/Doc/Tutorial/chapter_motifs.tex index 70d9e84b1dc..dfaadc0e1de 100644 --- a/Doc/Tutorial/chapter_motifs.tex +++ b/Doc/Tutorial/chapter_motifs.tex @@ -25,7 +25,7 @@ \section{Motif objects} Since we are interested in motif analysis, we need to take a look at \verb|Motif| objects in the first place. For that we need to import the Bio.motifs library: -%doctest ../Tests/Motif +%doctest ../Tests/motifs \begin{verbatim} >>> from Bio import motifs \end{verbatim} @@ -1452,7 +1452,7 @@ \subsection{MEME} \verb|meme.out|. You can retrieve the motifs reported by MEME by running the following piece of code: -%doctest ../Tests/Motif +%doctest ../Tests/motifs \begin{verbatim} >>> from Bio import motifs >>> with open("meme.out") as handle: diff --git a/Tests/Motif/Arnt.sites b/Tests/Motif/Arnt.sites deleted file mode 100644 index c460a8efde0..00000000000 --- a/Tests/Motif/Arnt.sites +++ /dev/null @@ -1,40 +0,0 @@ ->MA0004 ARNT 1 -CACGTGatgtcctc ->MA0004 ARNT 2 -CACGTGggaggtac ->MA0004 ARNT 3 -CACGTGccgcgcgc ->MA0004 ARNT 4 -CACGTGaagttgtc ->MA0004 ARNT 5 -taaatgcCACGTG ->MA0004 ARNT 6 -aggtataCACGTG ->MA0004 ARNT 7 -agtCACGTGttcc ->MA0004 ARNT 8 -gggatCACGTGgt ->MA0004 ARNT 9 -gggtCACGTGttc ->MA0004 ARNT 10 -catgtCACGTGcc ->MA0004 ARNT 11 -agttcgCACGTGc ->MA0004 ARNT 12 -taagCACGTGgtc ->MA0004 ARNT 13 -tgaatacCACGTG ->MA0004 ARNT 14 -tgaCACGTGtccg ->MA0004 ARNT 15 -attgtgCACGTGg ->MA0004 ARNT 16 -AACGTGacttcgtacc ->MA0004 ARNT 17 -AACGTGcgtgatgtcc ->MA0004 ARNT 18 -AACGTGacagccctcc ->MA0004 ARNT 19 -AACGTGcacatcgtcc ->MA0004 ARNT 20 -aggaatCGCGTGc diff --git a/Tests/Motif/REB1.pfm b/Tests/Motif/REB1.pfm deleted file mode 100644 index 8a6b4923967..00000000000 --- a/Tests/Motif/REB1.pfm +++ /dev/null @@ -1,5 +0,0 @@ -30 0 0 100 0 0 0 0 15 -10 0 0 0 100 100 100 0 15 -50 0 0 0 0 0 0 60 55 -10 100 100 0 0 0 0 40 15 - diff --git a/Tests/Motif/SRF.pfm b/Tests/Motif/SRF.pfm deleted file mode 100644 index 09134483a47..00000000000 --- a/Tests/Motif/SRF.pfm +++ /dev/null @@ -1,4 +0,0 @@ - 2 9 0 1 32 3 46 1 43 15 2 2 - 1 33 45 45 1 1 0 0 0 1 0 1 -39 2 1 0 0 0 0 0 0 0 44 43 - 4 2 0 0 13 42 0 45 3 30 0 0 diff --git a/Tests/Motif/alignace.out b/Tests/Motif/alignace.out deleted file mode 100644 index a577bc480ba..00000000000 --- a/Tests/Motif/alignace.out +++ /dev/null @@ -1,354 +0,0 @@ -AlignACE 4.0 05/13/04 -./AlignACE -i test.fa -Parameter values: - expect = 10 - gcback = 0.38 - minpass = 200 - seed = 1227623309 - numcols = 10 - undersample = 1 - oversample = 1 - -Input sequences: -#0 SEQ1; M: CTCAATCGTAGA at 52 -#1 SEQ2; M: CTCAATCGTAGA at 172 -#2 SEQ3; M: CTCAATCGTAGA at 112 -#3 SEQ4; M: CTCAATCGTAGA at 173 -#4 SEQ5; M: CTCAATCGTAGA at 185 -#5 SEQ6; M: CTCAATCGTAGA at 105 -#6 SEQ7; M: CTCAATCGTAGA at 177 -#7 SEQ8; M: CTCAATCGTAGA at 172 -#8 SEQ9; M: CTCAATCGTAGA at 93 -#9 SEQ10; M: CTCAATCGTAGA at 3 - -Motif 1 -TCTACGATTGAG 0 51 0 -TCTACGATTGAG 1 171 0 -TCTACGATTGAG 2 111 0 -TCTACGATTGAG 3 172 0 -TCTACGATTGAG 4 184 0 -TCTACGATTGAG 5 104 0 -TCTACGATTGAG 6 176 0 -TCTACGATTGAG 7 171 0 -TCTACGATTGAG 8 92 0 -TCAAAGATAGAG 8 155 1 -TCTACGATTGAG 9 2 0 -** ***** *** -MAP Score: 57.9079 - -Motif 2 -GCGAAGGAAGCAGCGCGTGTG 0 7 1 -GGCACCGCCTCTACGATTGAG 0 51 0 -CAGAGCTTAGCATTGAACGCG 0 93 0 -CTAATGAAAGCAATGAGAGTG 0 154 1 -CTTGTGCCCTCTAAGCGTCCG 1 73 1 -GAGCACGACGCTTTGTACCTG 1 153 0 -CGGCACTTAGCAGCGTATCGT 2 36 0 -CTGGTTTCATCTACGATTGAG 2 111 0 -GGGCCAATAGCGGCGCCGGAG 2 133 0 -GTGGAGTTATCTTAGTGCGCG 2 158 0 -GAGAGGTTATCTACGATTGAG 3 172 0 -CTGCTCCCCGCATACAGCGCG 4 62 0 -CAGAACCGAGGTCCGGTACGG 4 157 1 -GTGCCCCAAGCTTACCCAGGG 5 40 1 -CGCCTCTGATCTACGATTGAG 5 104 0 -GTGCTCATAGGGACGTCGCGG 6 2 1 -CTGCCCCCCGCATAGTAGGGG 6 45 1 -GTAAAGAAATCGATGTGCCAG 6 72 0 -CACCTGCAATTGCTGGCAGCG 6 128 0 -GGCGGGCCATCCCTGTATGAA 8 65 0 -CTCCAGGTCGCATGGAGAGAG 9 89 1 -CCTCGGATCGCTTGGGAAGAG 9 134 0 -* ** * *** * * * -MAP Score: 19.6235 - -Motif 3 -GTGCGCGAAGGAAGCAGCGCG 0 3 1 -CAGAGCTTAGCATTGAACGCG 0 93 0 -GTGCCCGATGACCACCCGTCG 0 117 0 -GCCCTCTAAGCGTCCGCGGAT 1 78 1 -GAGCACGACGCTTTGTACCTG 1 153 0 -CGGCACTTAGCAGCGTATCGT 2 36 0 -GGGCCAATAGCGGCGCCGGAG 2 133 0 -GCGCACTAAGATAACTCCACG 2 159 1 -CGGCCCGTTGTCCAGCAGACG 3 2 0 -CTGCTCCCCGCATACAGCGCG 4 62 0 -GTGCCCCAAGCTTACCCAGGG 5 40 1 -GTGCTCATAGGGACGTCGCGG 6 2 1 -CTGCCCCCCGCATAGTAGGGG 6 45 1 -CGCCGCCATGCGACGCAGAGG 8 39 0 -AACCTCTAAGCATACTCTACG 9 8 0 -GACCTGGAGGCTTAGACTTGG 9 77 0 -GCGCTCTTCCCAAGCGATCCG 9 131 1 -GGGCCGTCAGCTCTCAAGTCT 9 153 1 -* ** * ** * * ** -MAP Score: 19.1804 - -Motif 4 -GCCCCAAGCTTACCCAGGGAC 5 42 1 -GCCGTCTGCTGGACAACGGGC 3 0 1 -GCCGACGGGTGGTCATCGGGC 0 115 1 -GCCAATAGCGGCGCCGGAGTC 2 131 0 -GCCCCCCGCATAGTAGGGGGA 6 47 1 -GCCCGTACCGGACCTCGGTTC 4 159 0 -GCCTCATGTACCGGAAGGGAC 3 24 1 -GACACGCGCCTGGGAGGGTTC 2 11 1 -GCCTTTGGCCTTGGATGAGAA 7 76 0 -GGCCCTCGGATCGCTTGGGAA 9 137 0 -GCATGTTGGGAATCCGCGGAC 1 89 0 -GACACGCGCTGTATGCGGGGA 4 58 1 -GCCAGGTACAAAGCGTCGTGC 1 151 1 -GCGATCAGCTTGTGGGCGTGC 4 82 1 -GACAAATCGGATACTGGGGCA 3 75 0 -GCACTTAGCAGCGTATCGTTA 2 34 0 -*** ** * *** * -MAP Score: 18.0097 - -Motif 5 -CGGCACAGAGCTT 0 106 0 -ATCCGCGGACGCT 1 86 0 -CGCCTGGGAGGGT 2 17 1 -CGGAAGGGACGTT 3 35 1 -ACACACAGACGGT 3 122 0 -TGCCAGAGAGGTT 3 185 0 -AGACTGAGACGTT 4 114 1 -AATCGTAGAGGAT 4 187 1 -CGTCTCGTAGGGT 5 61 0 -CGTCGCGGAGGAT 6 15 1 -CTTCTTAGACGCT 6 119 1 -CGACGCAGAGGAT 8 37 0 -ATGCTTAGAGGTT 9 16 1 -AGACTTGGGCGAT 9 72 0 -CGACCTGGAGGCT 9 86 0 -** * ****** * -MAP Score: 16.8287 - -Motif 6 -GTGCGCGAAGGAAGCAGCGCGTG 0 3 1 -TTGAGCCGAGTAAAGGGCTGGTG 0 33 0 -CAATGCTAAGCTCTGTGCCGACG 0 99 1 -CAACTCTCTATGTAGTGCCCGAG 1 28 0 -CGACGCTTTGTACCTGGCTTGCG 1 146 0 -CGAGTCAATGACACGCGCCTGGG 2 2 1 -CGATACGCTGCTAAGTGCCGTCC 2 37 1 -CCGGGCCAATAGCGGCGCCGGAG 2 133 0 -CCACGCTTCGACACGTGGTATAG 2 175 1 -CCGAGCCTCATGTACCGGAAGGG 3 20 1 -CTGCTCCCCGCATACAGCGCGTG 4 60 0 -CCGAGGTCCGGTACGGGCAAGCC 4 162 1 -GTGCTCATAGGGACGTCGCGGAG 6 2 1 -CCCTACTATGCGGGGGGCAGGTC 6 42 0 -GCCAGCAATTGCAGGTGGTCGTG 6 132 1 -CTCTGCGTCGCATGGCGGCGTGG 8 40 1 -GGAGGCTTAGACTTGGGCGATAC 9 70 0 -GCATGGAGAGAGATCCGGAGGAG 9 98 1 -* * ** * * ** * * -MAP Score: 15.0441 - -Motif 7 -GCGCGTGTGTGTAAC 0 19 1 -GCACAGAGCTTAGCA 0 102 0 -GGTGGTCATCGGGCA 0 122 1 -GCGCGTGTCATTGAC 2 5 0 -GGACGGCACTTAGCA 2 45 0 -GCGCGTCCCGGGCCA 2 148 0 -GCTCGGCCCGTTGTC 3 11 0 -GCGCGTGTCCTTTAA 4 52 0 -GCTGATCGCTGCTCC 4 76 0 -GCCCGTACCGGACCT 4 165 0 -GGACGTCGCGGAGGA 6 12 1 -GCGGGGGGCAGGTCA 6 41 0 -GGACGTACTGGCACA 6 65 1 -GCAGGTGGTCGTGCA 6 142 1 -GCGCATACCTTAACA 7 21 0 -GCACGGGACTTCAAC 7 38 0 -GCACGTAGCTGGTAA 7 116 0 -GCTCGTCTATGGTCA 8 139 0 -GCGCATGCTGGATCC 9 120 0 -GGCCGTCAGCTCTCA 9 154 1 -** **** * * ** -MAP Score: 13.3145 - -Motif 8 -GAACCGAGGTCCGGTACGGGC 4 159 1 -GCCCCCCGCATAGTAGGGGGA 6 47 1 -GTCCCTGGGTAAGCTTGGGGC 5 42 0 -ACTCCACGCTTCGACACGTGG 2 172 1 -ATCCTCTGCGTCGCATGGCGG 8 37 1 -GTTCAATGCTAAGCTCTGTGC 0 96 1 -GCTCATAGGGACGTCGCGGAG 6 4 1 -GTCCCGGGCCAATAGCGGCGC 2 138 0 -GCACTTAGCAGCGTATCGTTA 2 34 0 -GGCCCTCGGATCGCTTGGGAA 9 137 0 -CTGCTGGACAACGGGCCGAGC 3 5 1 -GGGCACTACATAGAGAGTTGC 1 31 1 -AGCCTCCAGGTCGCATGGAGA 9 86 1 -AATCGTAGATCAGAGGCGAGA 5 107 1 -GAACTCCACTAAGACTTGAGA 9 164 0 -GAGCAGCGATCAGCTTGTGGG 4 77 1 -GCCAGGTACAAAGCGTCGTGC 1 151 1 -AGTCAATGACACGCGCCTGGG 2 4 1 -GGTCATGGAATCTTATGTAGC 4 5 1 -GTAGATAACAGAGGTCGGGGG 1 178 1 -* * ** ** ** ** -MAP Score: 11.6098 - -Motif 9 -CCGAGTAAAGGGCTG 0 36 0 -GTGGTCATCGGGCAC 0 123 1 -GATAACAGAGGTCGG 1 181 1 -CGGCGCCGGAGTCTG 2 129 0 -GCGCGTCCCGGGCCA 2 148 0 -CTGGACAACGGGCCG 3 8 1 -CGGATACTGGGGCAG 3 74 0 -GGGAGCAGCGATCAG 4 75 1 -CAGAACCGAGGTCCG 4 157 1 -GGGTCCCTGGGTAAG 5 50 0 -GTGCTCATAGGGACG 6 2 1 -GAGATCCGGAGGAGG 9 107 1 -GCGATCCGAGGGCCG 9 144 1 -GAGTTCACATGGCTG 9 179 1 -* * ** ***** * -MAP Score: 11.2943 - -Motif 10 -TAGAGGCGGTG 0 59 1 -GCTAAGCTCTG 0 103 1 -TGGAAGCAGTG 1 121 1 -GCGAGGCTGTG 1 138 0 -ACGACGCTTTG 1 159 0 -GGGACGCGCAC 2 154 1 -TCGAAGCGTGG 2 175 0 -TGTATGCGGGG 4 67 1 -GGTAAGCTTGG 5 45 0 -TGTACGCTGGG 5 148 1 -ACTATGCGGGG 6 50 0 -GGTATGCGCTG 7 27 1 -GGTACCCGGAG 7 157 1 -GCGACGCAGAG 8 40 0 -TGGCGGCGTGG 8 52 1 -TCTAGGCGGGC 8 79 0 -AGTATGCTTAG 9 13 1 -TGGAGGCTTAG 9 83 0 -**** ****** -MAP Score: 9.7924 - -Motif 11 -GCACAGAGCTTAGCATTGAAC 0 96 0 -GTCCGCGGATTCCCAACATGC 1 89 1 -ATACACAGCCTCGCAAGCCAG 1 135 1 -GGCCCGGGACGCGCACTAAGA 2 149 1 -GCCCGTTGTCCAGCAGACGGC 3 0 0 -GAGCAGCGATCAGCTTGTGGG 4 77 1 -GAACCGAGGTCCGGTACGGGC 4 159 1 -GTCCCTGGGTAAGCTTGGGGC 5 42 0 -GACCTGCCCCCCGCATAGTAG 6 42 1 -AACCAGCGCATACCTTAACAG 7 20 0 -ATCCTCTGCGTCGCATGGCGG 8 37 1 -GACCATAGACGAGCATCAAAG 8 140 1 -GGCCCTCGGATCGCTTGGGAA 9 137 0 -* ** * **** ** -MAP Score: 9.01393 - -Motif 12 -GCCGTCCGTC 2 53 1 -GGCGTGCGCG 0 0 1 -GGCGCGTGTC 2 11 0 -AGCGCGTGTG 0 18 1 -GCGGTGCGTG 8 108 0 -AGCGCGTGTC 4 58 0 -AGCGTCCGCG 1 86 1 -ACCGTCTGTG 3 122 1 -GCCATGCGAC 8 46 0 -ACCACCCGTC 0 118 0 -GGCGCCGGAG 2 133 0 -ACCACGTGTC 2 184 0 -GGCTTGCGAG 1 144 0 -GCGATCCGAG 9 144 1 -AGTGCGCGTC 2 156 0 -AGTGCCCGAG 1 28 0 -********** -MAP Score: 7.51121 - -Motif 13 -GCCGACGGGTGGTCATCGGG 0 115 1 -GCACGACGCTTTGTACCTGG 1 152 0 -CCTGGGAGGGTTCAATAACG 2 19 1 -GCGCGTCCCGGGCCAATAGC 2 143 0 -GCCGTCTGCTGGACAACGGG 3 0 1 -GTCCCTTCCGGTACATGAGG 3 25 0 -GCTGCTCCCCGCATACAGCG 4 64 0 -GCCCCAAGCTTACCCAGGGA 5 42 1 -ACCGGCTGACGCTAATACGG 5 84 0 -GCGGGGGGCAGGTCATTACA 6 36 0 -GCTGGCAGCGTCTAAGAAGG 6 118 0 -GCAGGTGGTCGTGCAATACG 6 142 1 -GCTGGTTGAAGTCCCGTGCG 7 34 1 -GCACGTAGCTGGTAAATAGG 7 111 0 -GCGGCGTGGATTTCATACAG 8 54 1 -CCTGGAGGCTTAGACTTGGG 9 76 0 -** ** ** * * ** -MAP Score: 5.63667 - -Motif 14 -GCCGACGGGTGGTCATCGGG 0 115 1 -ATCCGCGGACGCTTAGAGGG 1 79 0 -ACGCTTTGTACCTGGCTTGC 1 147 0 -ACGGACGGCACTTAGCAGCG 2 42 0 -GCCGTCTGCTGGACAACGGG 3 0 1 -ACACACAGACGGTTGAAAGG 3 115 0 -GCCGATAGTGCTTAAGTTCG 3 147 1 -CTTGCCCGTACCGGACCTCG 4 163 0 -ACCGGCTGACGCTAATACGG 5 84 0 -GCCCCCCGCATAGTAGGGGG 6 47 1 -GCTGGCAGCGTCTAAGAAGG 6 118 0 -GCAGGTGGTCGTGCAATACG 6 142 1 -ACGCACGGGACTTCAACCAG 7 35 0 -GCACGTAGCTGGTAAATAGG 7 111 0 -ATCCTCTGCGTCGCATGGCG 8 37 1 -** * * * * * * ** -MAP Score: 3.89842 - -Motif 15 -GAGGCTGTGTAT 1 135 0 -GAGGTCGGGGGT 1 188 1 -GACGGACGGCAC 2 51 0 -TTGGCCCGGGAC 2 147 1 -GAGGCTCGGCCC 3 17 0 -CACGCGCTGTAT 4 60 1 -TAGGCCAGGTAT 4 127 0 -GAGGTCCGGTAC 4 164 1 -TACGCTGGGGAT 5 150 1 -GTCGCGGAGGAT 6 16 1 -TACGCACGGGAC 7 44 0 -TACTCCGGGTAC 7 158 0 -GACGCAGAGGAT 8 37 0 -TAGGCGGGCCAT 8 76 0 -***** *** ** -MAP Score: 3.33444 - -Motif 16 -CGGCTCAATCGTAGAGGC 0 48 1 -CGACGGGTGGTCATCGGG 0 117 1 -CGCTTAGAGGGCACAAGC 1 72 0 -TGACACGCGCCTGGGAGG 2 10 1 -CGATACGCTGCTAAGTGC 2 37 1 -CGTCCCGGGCCAATAGCG 2 142 0 -CCACGCTTCGACACGTGG 2 175 1 -CGTCTGCTGGACAACGGG 3 2 1 -ACACAGACGGTTGAAAGG 3 115 0 -TGCTCCCCGCATACAGCG 4 64 0 -TGAGGCTTGCCCGTACCG 4 170 0 -TGCCCCAAGCTTACCCAG 5 41 1 -CGGCTGACGCTAATACGG 5 84 0 -CGCGACGTCCCTATGAGC 6 4 0 -TGCCCCCCGCATAGTAGG 6 46 1 -CGTTGCCTTCTTAGACGC 6 113 1 -TGACTCAATCGTAGACCC 6 173 1 -AGTCCCGTGCGTATGTGG 7 43 1 -AGGCTCGCACGTAGCTGG 7 119 0 -CCACGCCGCCATGCGACG 8 45 0 -AGCCTCCAGGTCGCATGG 9 86 1 -** * * ** ** ** -MAP Score: 1.0395 - diff --git a/Tests/Motif/mast.dna.oops.txt b/Tests/Motif/mast.dna.oops.txt deleted file mode 100644 index 250c40f026e..00000000000 --- a/Tests/Motif/mast.dna.oops.txt +++ /dev/null @@ -1,301 +0,0 @@ -******************************************************************************** -MAST - Motif Alignment and Search Tool -******************************************************************************** - MAST version 3.0 (Release date: 2004/08/18 09:07:01) - - For further information on how to interpret these results or to get - a copy of the MAST software please access http://meme.sdsc.edu. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** - If you use this program in your research, please cite: - - Timothy L. Bailey and Michael Gribskov, - "Combining evidence using p-values: application to sequence homology - searches", Bioinformatics, 14(48-54), 1998. -******************************************************************************** - - -******************************************************************************** -DATABASE AND MOTIFS -******************************************************************************** - DATABASE INO_up800.s (nucleotide) - Last updated on Mon Aug 16 21:19:59 2004 - Database contains 7 sequences, 5600 residues - - Scores for positive and reverse complement strands are combined. - - MOTIFS meme.INO_up800.oops.txt (nucleotide) - MOTIF WIDTH BEST POSSIBLE MATCH - ----- ----- ------------------- - 1 12 TTCACATGCCGC - 2 10 TCTGGCACAG - - PAIRWISE MOTIF CORRELATIONS: - MOTIF 1 - ----- ----- - 2 0.32 - No overly similar pairs (correlation > 0.60) found. - - Random model letter frequencies (from non-redundant database): - A 0.281 C 0.222 G 0.229 T 0.267 -******************************************************************************** - - -******************************************************************************** -SECTION I: HIGH-SCORING SEQUENCES -******************************************************************************** - - Each of the following 7 sequences has E-value less than 10. - - The E-value of a sequence is the expected number of sequences - in a random database of the same size that would match the motifs as - well as the sequence does and is equal to the combined p-value of the - sequence times the number of sequences in the database. - - The combined p-value of a sequence measures the strength of the - match of the sequence to all the motifs and is calculated by - o finding the score of the single best match of each motif - to the sequence (best matches may overlap), - o calculating the sequence p-value of each score, - o forming the product of the p-values, - o taking the p-value of the product. - - The sequence p-value of a score is defined as the - probability of a random sequence of the same length containing - some match with as good or better a score. - - The score for the match of a position in a sequence to a motif - is computed by by summing the appropriate entry from each column of - the position-dependent scoring matrix that represents the motif. - - Sequences shorter than one or more of the motifs are skipped. - - The table is sorted by increasing E-value. -******************************************************************************** - -SEQUENCE NAME DESCRIPTION E-VALUE LENGTH -------------- ----------- -------- ------ -ACC1 sequence of the region up... 6.1e-05 800 -CHO1 sequence of the region up... 0.00016 800 -INO1 sequence of the region up... 0.00019 800 -FAS1 sequence of the region up... 0.00022 800 -OPI3 sequence of the region up... 0.00092 800 -CHO2 sequence of the region up... 0.0029 800 -FAS2 sequence of the region up... 0.0093 800 - -******************************************************************************** - - - -******************************************************************************** -SECTION II: MOTIF DIAGRAMS -******************************************************************************** - - The ordering and spacing of all non-overlapping motif occurrences - are shown for each high-scoring sequence listed in Section I. - - A motif occurrence is defined as a position in the sequence whose - match to the motif has POSITION p-value less than 0.0001. - - The POSITION p-value of a match is the probability of - a single random subsequence of the length of the motif - scoring at least as well as the observed match. - - For each sequence, all motif occurrences are shown unless there - are overlaps. In that case, a motif occurrence is shown only if its - p-value is less than the product of the p-values of the other - (lower-numbered) motif occurrences that it overlaps. - - The table also shows the E-value of each sequence. - - Spacers and motif occurences are indicated by - o -d- `d' residues separate the end of the preceding motif - occurrence and the start of the following motif occurrence - o [sn] occurrence of motif `n' with p-value less than 0.0001. - A minus sign indicates that the occurrence is on the - reverse complement strand. -******************************************************************************** - -SEQUENCE NAME E-VALUE MOTIF DIAGRAM -------------- -------- ------------- -ACC1 6.1e-05 82_[+1]_137_[+2]_559 -CHO1 0.00016 152_[+2]_396_[-2]_42_[+1]_17_ - [+1]_149 -INO1 0.00019 282_[-2]_327_[-1]_55_[+1]_102 -FAS1 0.00022 43_[+2]_41_[+1]_694 -OPI3 0.00092 185_[-2]_144_[+1]_449 -CHO2 0.0029 353_[+1]_47_[-2]_378 -FAS2 0.0093 184_[-2]_372_[+1]_222 - -******************************************************************************** - - - -******************************************************************************** -SECTION III: ANNOTATED SEQUENCES -******************************************************************************** - - The positions and p-values of the non-overlapping motif occurrences - are shown above the actual sequence for each of the high-scoring - sequences from Section I. - - A motif occurrence is defined as a position in the sequence whose - match to the motif has POSITION p-value less than 0.0001 as - defined in Section II. - - For each sequence, the first line specifies the name of the sequence. - - The second (and possibly more) lines give a description of the - sequence. - - Following the description line(s) is a line giving the length, - combined p-value, and E-value of the sequence as defined in Section I. - - The next line reproduces the motif diagram from Section II. - - The entire sequence is printed on the following lines. - - Motif occurrences are indicated directly above their positions in the - sequence on lines showing - o the motif number of the occurrence (a minus sign indicates that - the occurrence is on the reverse complement strand), - o the position p-value of the occurrence, - o the best possible match to the motif (or its reverse complement), and - o columns whose match to the motif has a positive score (indicated - by a plus sign). -******************************************************************************** - - -ACC1 - sequence of the region upstream from YNR016C - LENGTH = 800 COMBINED P-VALUE = 8.78e-06 E-VALUE = 6.1e-05 - DIAGRAM: 82_[+1]_137_[+2]_559 - - - [+1] - 3.1e-07 - TTCACATGCCGC - ++++++++++ + -76 TAAAATCTTCACATGGCCCGGCCGCGCGCGCGTTGTGCCAACAAGTCGCAGTCGAAATTCAACCGCTCATTGCCA - - [+2] - 7.4e-07 - TCTGGCACAG - ++++++++++ -226 TCGTATTCTGGCACAGTATAGCCTAGCACAATCACTGTCACAATTGTTATCGGTTCTACAATTGTTCTGCTCTCT - - -CHO1 - sequence of the region upstream from YER026C - LENGTH = 800 COMBINED P-VALUE = 2.30e-05 E-VALUE = 0.00016 - DIAGRAM: 152_[+2]_396_[-2]_42_[+1]_17_[+1]_149 - - - [+2] - 3.9e-05 - TCTGGCACAG - ++++++ + -151 CGTCTGGCGCCCTTCCCATTCCGAACCATGTTATATTGAACCATCTGGCGACAAGCAGTATTAAGCATAATACAT - - [-2] - 7.4e-07 - CTGTGCCAGA - ++++++++++ -526 CAATCCCCACTCCTTCTCAATGTGTGCAGACTTCTGTGCCAGACACTGAATATATATCAGTAATTGGTCAAAATC - - [+1] [+1] - 8.7e-07 2.2e-05 - TTCACATGCCGC TTCACATGCCGC - ++++++ +++ + +++++++++ + -601 ACTTTGAACGTTCACACGGCACCCTCACGCCTTTGAGCTTTCACATGGACCCATCTAAAGATGAAGATCCGTATT - - -INO1 - sequence of the region upstream from YJL153C - LENGTH = 800 COMBINED P-VALUE = 2.71e-05 E-VALUE = 0.00019 - DIAGRAM: 282_[-2]_327_[-1]_55_[+1]_102 - - - [-2] - 1.8e-05 - CTGTGCCAGA - +++ +++ ++ -226 ACGTTGTATATGAAACGAGTAGTGAACGTTCGTACGATCTTTCACGCAGACATGCGACTGCGCCCGCCGTAGACC - - [-1] - 4.2e-08 - GCGGCATGTGAA - ++++++++++++ -601 TGCGCTTCGGCGGCTAAATGCGGCATGTGAAAAGTATTGTCTATTTTATCTTCATCCTTCTTTCCCAGAATATTG - - [+1] - 1.3e-05 - TTCACATGCCGC - +++++++++ ++ -676 AACTTATTTAATTCACATGGAGCAGAGAAAGCGCACCTCTGCGTTGGCGGCAATGTTAATTTGAGACGTATATAA - - -FAS1 - sequence of the region upstream from YKL182W - LENGTH = 800 COMBINED P-VALUE = 3.19e-05 E-VALUE = 0.00022 - DIAGRAM: 43_[+2]_41_[+1]_694 - - [+2] - 2.2e-05 - TCTGGCACAG - ++ +++++ + -1 CCGGGTTATAGCAGCGTCTGCTCCGCATCACGATACACGAGGTGCAGGCACGGTTCACTACTCCCCTGGCCTCCA - - [+1] - 4.2e-08 - TTCACATGCCGC - ++++++++++++ -76 ACAAACGACGGCCAAAAACTTCACATGCCGCCCAGCCAAGCATAATTACGCAACAGCGATCTTTCCGTCGCACAA - - -OPI3 - sequence of the region upstream from YJR073C - LENGTH = 800 COMBINED P-VALUE = 1.32e-04 E-VALUE = 0.00092 - DIAGRAM: 185_[-2]_144_[+1]_449 - - - [-2] - 7.4e-07 - CTGTGCCAGA - ++++++++++ -151 GTTAATCTGATCAACGCTACGCCGATGACAACGGTCTGTGCCAGATCTGGTTTTCCCCACTTATTTGCTACTTCC - - [+1] - 5.8e-06 - TTCACATGCCGC - ++++ ++ ++ + -301 AACTCCGTCAGGTCTTCCACGTGGAACTGCCAAGCCTCCTTCAGATCGCTCTTGTCGACCGTCTCCAAGAGATCC - - -CHO2 - sequence of the region upstream from YGR157W - LENGTH = 800 COMBINED P-VALUE = 4.18e-04 E-VALUE = 0.0029 - DIAGRAM: 353_[+1]_47_[-2]_378 - - - [+1] - 5.2e-07 - TTCACATGCCGC - +++ ++++++++ -301 ATATATATTTTTGCCTTGGTTTAAATTGGTCAAGACAGTCAATTGCCACACTTTTCTCATGCCGCATTCATTATT - - [-2] - 2.9e-05 - CTGTGCCAGA - + +++++++ -376 CGCGAAGTTTTCCACACAAAACTGTGAAAATGAACGGCGATGCCAGAAACGGCAAAACCTCAAATGTTAGATAAC - - -FAS2 - sequence of the region upstream from YPL231W - LENGTH = 800 COMBINED P-VALUE = 1.33e-03 E-VALUE = 0.0093 - DIAGRAM: 184_[-2]_372_[+1]_222 - - - [-2] - 2.9e-05 - CTGTGCCAGA - ++++++++ -151 AACAGGGTGTCGGTCATACCGATAAAGCCGTCAAGAGTGCCAGAAAAGCAAGAAAGAACAAGATTAGATGTTGGT - - [+1] - 1.9e-06 - TTCACATGCCGC - +++++++++ + -526 GCTTAGCAAAATCCAACCATTTTTTTTTTATCTCCCGCGTTTTCACATGCTACCTCATTCGCCTCGTAACGTTAC - -******************************************************************************** - - -CPU: pmgm2 -Time 0.030000 secs. - -mast meme.INO_up800.oops.txt -text -stdout diff --git a/Tests/Motif/mast.protein.oops.txt b/Tests/Motif/mast.protein.oops.txt deleted file mode 100644 index 3fefee4db25..00000000000 --- a/Tests/Motif/mast.protein.oops.txt +++ /dev/null @@ -1,885 +0,0 @@ -******************************************************************************** -MAST - Motif Alignment and Search Tool -******************************************************************************** - MAST version 3.0 (Release date: 2004/08/18 09:07:01) - - For further information on how to interpret these results or to get - a copy of the MAST software please access http://meme.sdsc.edu. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** - If you use this program in your research, please cite: - - Timothy L. Bailey and Michael Gribskov, - "Combining evidence using p-values: application to sequence homology - searches", Bioinformatics, 14(48-54), 1998. -******************************************************************************** - - -******************************************************************************** -DATABASE AND MOTIFS -******************************************************************************** - DATABASE adh.s (peptide) - Last updated on Mon Aug 16 21:19:59 2004 - Database contains 33 sequences, 9996 residues - - MOTIFS meme.adh.oops.txt (peptide) - MOTIF WIDTH BEST POSSIBLE MATCH - ----- ----- ------------------- - 1 29 YSASKFAVRMLTRSMAHEYAPHGIRVNCI - 2 29 KVVLITGCSSGIGKATAKHLHKEGAKVVL - - PAIRWISE MOTIF CORRELATIONS: - MOTIF 1 - ----- ----- - 2 0.30 - No overly similar pairs (correlation > 0.60) found. - - Random model letter frequencies (from non-redundant database): - A 0.073 C 0.018 D 0.052 E 0.062 F 0.040 G 0.069 H 0.022 I 0.056 K 0.058 - L 0.092 M 0.023 N 0.046 P 0.051 Q 0.041 R 0.052 S 0.074 T 0.059 V 0.064 - W 0.013 Y 0.033 -******************************************************************************** - - -******************************************************************************** -SECTION I: HIGH-SCORING SEQUENCES -******************************************************************************** - - Each of the following 33 sequences has E-value less than 10. - - The E-value of a sequence is the expected number of sequences - in a random database of the same size that would match the motifs as - well as the sequence does and is equal to the combined p-value of the - sequence times the number of sequences in the database. - - The combined p-value of a sequence measures the strength of the - match of the sequence to all the motifs and is calculated by - o finding the score of the single best match of each motif - to the sequence (best matches may overlap), - o calculating the sequence p-value of each score, - o forming the product of the p-values, - o taking the p-value of the product. - - The sequence p-value of a score is defined as the - probability of a random sequence of the same length containing - some match with as good or better a score. - - The score for the match of a position in a sequence to a motif - is computed by by summing the appropriate entry from each column of - the position-dependent scoring matrix that represents the motif. - - Sequences shorter than one or more of the motifs are skipped. - - The table is sorted by increasing E-value. -******************************************************************************** - -SEQUENCE NAME DESCRIPTION E-VALUE LENGTH -------------- ----------- -------- ------ -BUDC_KLETE ACETOIN(DIACETYL) REDUCTA... 2.6e-33 241 -YRTP_BACSU HYPOTHETICAL 25.3 KD PROT... 4.3e-33 238 -AP27_MOUSE ADIPOCYTE P27 PROTEIN (AP... 4.5e-33 244 -HDE_CANTR HYDRATASE-DEHYDROGENASE-E... 1.6e-32 906 -HDHA_ECOLI 7-ALPHA-HYDROXYSTEROID DE... 4.9e-31 255 -DHII_HUMAN CORTICOSTEROID 11-BETA-DE... 8.2e-31 292 -FIXR_BRAJA FIXR PROTEIN 2.6e-30 278 -DHGB_BACME GLUCOSE 1-DEHYDROGENASE B... 3.2e-30 262 -NODG_RHIME NODULATION PROTEIN G (HOS... 3.4e-29 245 -RIDH_KLEAE RIBITOL 2-DEHYDROGENASE (... 6.2e-29 249 -YINL_LISMO HYPOTHETICAL 26.8 KD PROT... 6.6e-29 248 -DHMA_FLAS1 N-ACYLMANNOSAMINE 1-DEHYD... 1.2e-28 270 -HMTR_LEIMA no comment 5.1e-28 287 -2BHD_STREX 20-BETA-HYDROXYSTEROID DE... 5.9e-28 255 -ENTA_ECOLI 2,3-DIHYDRO-2,3-DIHYDROXY... 4.8e-27 248 -DHB2_HUMAN no comment 1.7e-26 387 -BDH_HUMAN D-BETA-HYDROXYBUTYRATE DE... 2.8e-26 343 -BA72_EUBSP 7-ALPHA-HYDROXYSTEROID DE... 4.2e-26 249 -FVT1_HUMAN no comment 8.9e-26 332 -GUTD_ECOLI SORBITOL-6-PHOSPHATE 2-DE... 5.1e-25 259 -DHB3_HUMAN no comment 8.3e-25 310 -3BHD_COMTE 3-BETA-HYDROXYSTEROID DEH... 1.4e-24 253 -LIGD_PSEPA C ALPHA-DEHYDROGENASE (EC... 8.1e-24 305 -DHES_HUMAN ESTRADIOL 17 BETA-DEHYDRO... 2.4e-22 327 -RFBB_NEIGO no comment 3.2e-19 346 -BPHB_PSEPS BIPHENYL-CIS-DIOL DEHYDRO... 1e-18 275 -YURA_MYXXA no comment 1.9e-18 258 -PCR_PEA no comment 7.2e-18 399 -DHCA_HUMAN no comment 1.1e-17 276 -ADH_DROME ALCOHOL DEHYDROGENASE (EC... 2.7e-14 255 -MAS1_AGRRA no comment 3e-14 476 -FABI_ECOLI no comment 9.7e-14 262 -CSGA_MYXXA no comment 2.5e-12 166 - -******************************************************************************** - - - -******************************************************************************** -SECTION II: MOTIF DIAGRAMS -******************************************************************************** - - The ordering and spacing of all non-overlapping motif occurrences - are shown for each high-scoring sequence listed in Section I. - - A motif occurrence is defined as a position in the sequence whose - match to the motif has POSITION p-value less than 0.0001. - - The POSITION p-value of a match is the probability of - a single random subsequence of the length of the motif - scoring at least as well as the observed match. - - For each sequence, all motif occurrences are shown unless there - are overlaps. In that case, a motif occurrence is shown only if its - p-value is less than the product of the p-values of the other - (lower-numbered) motif occurrences that it overlaps. - - The table also shows the E-value of each sequence. - - Spacers and motif occurences are indicated by - o -d- `d' residues separate the end of the preceding motif - occurrence and the start of the following motif occurrence - o [n] occurrence of motif `n' with p-value less than 0.0001. -******************************************************************************** - -SEQUENCE NAME E-VALUE MOTIF DIAGRAM -------------- -------- ------------- -BUDC_KLETE 2.6e-33 2_[2]_120_[1]_61 -YRTP_BACSU 4.3e-33 6_[2]_119_[1]_55 -AP27_MOUSE 4.5e-33 7_[2]_112_[1]_67 -HDE_CANTR 1.6e-32 8_[2]_125_[1]_131_[2]_115_[1]_411 -HDHA_ECOLI 4.9e-31 11_[2]_74_[1]_15_[1]_68 -DHII_HUMAN 8.2e-31 34_[2]_119_[1]_81 -FIXR_BRAJA 2.6e-30 36_[2]_123_[1]_61 -DHGB_BACME 3.2e-30 7_[2]_123_[1]_74 -NODG_RHIME 3.4e-29 6_[2]_116_[1]_65 -RIDH_KLEAE 6.2e-29 14_[2]_116_[1]_61 -YINL_LISMO 6.6e-29 5_[2]_75_[2]_15_[1]_66 -DHMA_FLAS1 1.2e-28 14_[2]_121_[1]_77 -HMTR_LEIMA 5.1e-28 6_[2]_157_[1]_66 -2BHD_STREX 5.9e-28 6_[2]_116_[1]_75 -ENTA_ECOLI 4.8e-27 5_[2]_109_[1]_76 -DHB2_HUMAN 1.7e-26 82_[2]_120_[1]_127 -BDH_HUMAN 2.8e-26 55_[2]_123_[1]_107 -BA72_EUBSP 4.2e-26 6_[2]_121_[1]_64 -FVT1_HUMAN 8.9e-26 32_[2]_124_[1]_118 -GUTD_ECOLI 5.1e-25 2_[2]_122_[1]_77 -DHB3_HUMAN 8.3e-25 48_[2]_120_[1]_84 -3BHD_COMTE 1.4e-24 6_[2]_115_[1]_74 -LIGD_PSEPA 8.1e-24 6_[2]_121_[1]_120 -DHES_HUMAN 2.4e-22 2_[2]_50_[2]_44_[1]_144 -RFBB_NEIGO 3.2e-19 6_[2]_129_[1]_153 -BPHB_PSEPS 1e-18 5_[2]_118_[1]_94 -YURA_MYXXA 1.9e-18 65_[2]_22_[2]_14_[1]_70 -PCR_PEA 7.2e-18 25_[1]_32_[2]_284 -DHCA_HUMAN 1.1e-17 4_[2]_159_[1]_55 -ADH_DROME 2.7e-14 6_[2]_116_[1]_75 -MAS1_AGRRA 3e-14 245_[2]_74_[1]_14_[1]_56 -FABI_ECOLI 9.7e-14 6_[2]_123_[1]_75 -CSGA_MYXXA 2.5e-12 51_[2]_7_[1]_50 - -******************************************************************************** - - - -******************************************************************************** -SECTION III: ANNOTATED SEQUENCES -******************************************************************************** - - The positions and p-values of the non-overlapping motif occurrences - are shown above the actual sequence for each of the high-scoring - sequences from Section I. - - A motif occurrence is defined as a position in the sequence whose - match to the motif has POSITION p-value less than 0.0001 as - defined in Section II. - - For each sequence, the first line specifies the name of the sequence. - - The second (and possibly more) lines give a description of the - sequence. - - Following the description line(s) is a line giving the length, - combined p-value, and E-value of the sequence as defined in Section I. - - The next line reproduces the motif diagram from Section II. - - The entire sequence is printed on the following lines. - - Motif occurrences are indicated directly above their positions in the - sequence on lines showing - o the motif number of the occurrence, - o the position p-value of the occurrence, - o the best possible match to the motif, and - o columns whose match to the motif has a positive score (indicated - by a plus sign). -******************************************************************************** - - -BUDC_KLETE - ACETOIN(DIACETYL) REDUCTASE (EC 1.1.1.5) (ACETOIN DEHYDROGENASE) - LENGTH = 241 COMBINED P-VALUE = 7.82e-35 E-VALUE = 2.6e-33 - DIAGRAM: 2_[2]_120_[1]_61 - - [2] - 7.9e-21 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++++++++++++++ + + -1 MQKVALVTGAGQGIGKAIALRLVKDGFAVAIADYNDATATAVAAEINQAGGRAVAIKVDVSRRDQVFAAVEQARK - - [1] - 2.6e-21 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++++++++++ ++++++ -151 VYSSSKFAVRGLTQTAARDLAPLGITVNGFCPGIVKTPMWAEIDRQCRKRRANRWATARLNLPNASPLAACRSLK - - -YRTP_BACSU - HYPOTHETICAL 25.3 KD PROTEIN IN RTP 5'REGION (ORF238) - LENGTH = 238 COMBINED P-VALUE = 1.31e-34 E-VALUE = 4.3e-33 - DIAGRAM: 6_[2]_119_[1]_55 - - [2] - 2.8e-19 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++++++++++++++++ ++ + -1 MQSLQHKTALITGGGRGIGRATALALAKEGVNIGLIGRTSANVEKVAEEVKALGVKAAFAAADVKDADQVNQAVA - - [1] - 1.3e-22 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++++++++ ++++++++++++++++++++ -151 VTSAYSASKFAVLGLTESLMQEVRKHNIRVSALTPSTVASDMSIELNLTDGNPEKVMQPEDLAEYMVAQLKLDPR - - -AP27_MOUSE - ADIPOCYTE P27 PROTEIN (AP27) - LENGTH = 244 COMBINED P-VALUE = 1.37e-34 E-VALUE = 4.5e-33 - DIAGRAM: 7_[2]_112_[1]_67 - - [2] - 7.4e-20 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++ ++++++++++++++ -1 MKLNFSGLRALVTGAGKGIGRDTVKALHASGAKVVAVTRTNSDLVSLAKECPGIEPVCVDLGDWDATEKALGGIG - - [1 - 4. - YS - ++ -76 PVDLLVNNAALVIMQPFLEVTKEAFDRSFSVNLRSVFQVSQMVARDMINRGVPGSIVNVSSMVAHVTFPNLITYS - - ] - 8e-22 - ASKFAVRMLTRSMAHEYAPHGIRVNCI - ++++++++++++++++++++ ++++++ -151 STKGAMTMLTKAMAMELGPHKIRVNSVNPTVVLTDMGKKVSADPEFARKLKERHPLRKFAEVEDVVNSILFLLSD - - -HDE_CANTR - HYDRATASE-DEHYDROGENASE-EPIMERASE (HDE) - LENGTH = 906 COMBINED P-VALUE = 4.94e-34 E-VALUE = 1.6e-32 - DIAGRAM: 8_[2]_125_[1]_131_[2]_115_[1]_411 - - [2] - 2.5e-19 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++++++ + +++++ ++++++ -1 MSPVDFKDKVVIITGAGGGLGKYYSLEFAKLGAKVVVNDLGGALNGQGGNSKAADVVVDEIVKNGGVAVADYNNV - - [1] - 2.8e-13 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + +++ ++ ++ ++++++ ++++++ +++ -151 PAGLYGNFGQANYASAKSALLGFAETLAKEGAKYNIKANAIAPLARSRMTESILPPPMLEKLGPEKVAPLVLYLS - - [2] - 1.5e-24 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++ +++++++++++++++++++ -301 TNEARKLPANDASGAPTVSLKDKVVLITGAGAGLGKEYAKWFAKYGAKVVVNDFKDATKTVDEIKAAGGEAWPDQ - - [1] - 5.2e-18 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++++++++ +++++++ + ++ +++++++ -451 NITSTSGIYGNFGQANYSSSKAGILGLSKTMAIEGAKNNIKVNIVAPHAETAMTLTIFREQDKNLYHADQVAPLL - - -HDHA_ECOLI - 7-ALPHA-HYDROXYSTEROID DEHYDROGENASE (EC 1.1.1.159) (HSDH) - LENGTH = 255 COMBINED P-VALUE = 1.49e-32 E-VALUE = 4.9e-31 - DIAGRAM: 11_[2]_74_[1]_15_[1]_68 - - [2] - 4.3e-21 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++ +++++++++++++++++++ -1 MFNSDNLRLDGKCAIITGAGAGIGKEIAITFATAGASVVVSDINADAANHVVDEIQQLGGQAFACRCDITSEQEL - - [1] - 2.9e-05 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + ++ ++++ + ++++ + + -76 SALADFAISKLGKVDILVNNAGGGGPKPFDMPMADFRRAYELNVFSFFHLSQLVAPEMEKNGGGVILTITSMAAE - - [1] - 8.6e-19 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + +++++ +++++ +++++++++++++++ -151 NKNINMTSYASSKAAASHLVRNMAFDLGEKNIRVNGIAPGAILTDALKSVITPEIEQKMLQHTPIRRLGQPQDIA - - -DHII_HUMAN - CORTICOSTEROID 11-BETA-DEHYDROGENASE (EC 1.1.1.146) (11-DH) (11-BETA- - HYDROXYSTEROID DEHYDROGENASE) (11-BETA-HSD) - LENGTH = 292 COMBINED P-VALUE = 2.49e-32 E-VALUE = 8.2e-31 - DIAGRAM: 34_[2]_119_[1]_81 - - [2] - 3.9e-24 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++++++++++++++++++ -1 MAFMKKYLLPILGLFMAYYYYSANEEFRPEMLQGKKVIVTGASKGIGREMAYHLAKMGAHVVVTARSKETLQKVV - - [1] - 1.2e-15 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++ ++ +++++ +++++++ -151 TVAALPMLKQSNGSIVVVSSLAGKVAYPMVAAYSASKFALDGFFSSIRKEYSVSRVNVSITLCVLGLIDTETAMK - - -FIXR_BRAJA - FIXR PROTEIN - LENGTH = 278 COMBINED P-VALUE = 7.83e-32 E-VALUE = 2.6e-30 - DIAGRAM: 36_[2]_123_[1]_61 - - [2] - 3.2e-18 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++ +++++++++++++ + ++++++ -1 MGLDLPNDNLIRGPLPEAHLDRLVDAVNARVDRGEPKVMLLTGASRGIGHATAKLFSEAGWRIISCARQPFDGER - - [1] - 5.1e-21 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + +++++ ++++++++++++++++++++ -151 APILLAQGLFDELRAASGSIVNVTSIAGSRVHPFAGSAYATSKAALASLTRELAHDYAPHGIRVNAIAPGEIRTD - - -DHGB_BACME - GLUCOSE 1-DEHYDROGENASE B (EC 1.1.1.47) - LENGTH = 262 COMBINED P-VALUE = 9.77e-32 E-VALUE = 3.2e-30 - DIAGRAM: 7_[2]_123_[1]_74 - - [2] - 4.4e-19 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++ + +++++++++++++ +++++ -1 MYKDLEGKVVVITGSSTGLGKSMAIRFATEKAKVVVNYRSKEDEANSVLEEEIKKVGGEAIAVKGDVTVESDVIN - - [1] - 5.3e-20 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + +++++++ +++++++++++++++++++ -151 KIPWPLFVHYAASKGGMKLMTETLALEYAPKGIRVNNIGPGAINTPINAEKFADPEQRADVESMIPMGYIGEPEE - - -NODG_RHIME - NODULATION PROTEIN G (HOST-SPECIFICITY OF NODULATION PROTEIN C) - LENGTH = 245 COMBINED P-VALUE = 1.03e-30 E-VALUE = 3.4e-29 - DIAGRAM: 6_[2]_116_[1]_65 - - [2] - 4.0e-16 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++ ++ ++++ ++++++ + + -1 MFELTGRKALVTGASGAIGGAIARVLHAQGAIVGLHGTQIEKLETLATELGDRVKLFPANLANRDEVKALGQRAE - - [1] - 7.3e-22 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++++++++++ +++++++ -151 NYCASKAGMIGFSKSLAQEIATRNITVNCVAPGFIESAMTDKLNHKQKEKIMVAIPIHRMGTGTEVASAVAYLAS - - -RIDH_KLEAE - RIBITOL 2-DEHYDROGENASE (EC 1.1.1.56) (RDH) - LENGTH = 249 COMBINED P-VALUE = 1.88e-30 E-VALUE = 6.2e-29 - DIAGRAM: 14_[2]_116_[1]_61 - - [2] - 7.0e-21 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++ +++++++++++++++++ +++++++ -1 MKHSVSSMNTSLSGKVAAITGAASGIGLECARTLLGAGAKVVLIDREGEKLNKLVAELGENAFALQVDLMQADQV - - [1] - 7.4e-17 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++++++++++++++ +++++++++++ ++ -151 VVPVIWEPVYTASKFAVQAFVHTTRRQVAQYGVRVGAVLPGPVVTALLDDWPKAKMDEALANGSLMQPIEVAESV - - -YINL_LISMO - HYPOTHETICAL 26.8 KD PROTEIN IN INLA 5'REGION (ORFA) - LENGTH = 248 COMBINED P-VALUE = 1.99e-30 E-VALUE = 6.6e-29 - DIAGRAM: 5_[2]_75_[2]_15_[1]_66 - - [2] - 2.9e-23 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++++++++++ +++++++ ++ -1 MTIKNKVIIITGASSGIGKATALLLAEKGAKLVLAARRVEKLEKIVQIIKANSGEAIFAKTDVTKREDNKKLVEL - - [2] - 3.8e-05 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - + + + ++ + +++ ++ + -76 AIERYGKVDAIFLNAGIMPNSPLSALKEDEWEQMIDINIKGVLNGIAAVLPSFIAQKSGHIIATSSVAGLKAYPG - - [1] - 1.9e-14 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++ +++ ++++ ++++ ++++ -151 GAVYGATKWAVRDLMEVLRMESAQEGTNIRTATIYPAAINTELLETITDKETEQGMTSLYKQYGITPDRIASIVA - - -DHMA_FLAS1 - N-ACYLMANNOSAMINE 1-DEHYDROGENASE (EC 1.1.1.233) (NAM-DH) - LENGTH = 270 COMBINED P-VALUE = 3.65e-30 E-VALUE = 1.2e-28 - DIAGRAM: 14_[2]_121_[1]_77 - - [2] - 2.0e-19 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - + +++++++++++++++++ +++++++++ -1 TTAGVSRRPGRLAGKAAIVTGAAGGIGRATVEAYLREGASVVAMDLAPRLAATRYEEPGAIPIACDLADRAAIDA - - [1] - 4.2e-18 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + ++++++ +++++++++++++++ ++++ -151 GSVNSFMAEPEAAAYVAAKGGVAMLTRAMAVDLARHGILVNMIAPGPVDVTGNNTGYSEPRLAEQVLDEVALGRP - - -HMTR_LEIMA - no comment - LENGTH = 287 COMBINED P-VALUE = 1.55e-29 E-VALUE = 5.1e-28 - DIAGRAM: 6_[2]_157_[1]_66 - - [2] - 1.6e-17 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++ +++++++ ++++++ +++ -1 MTAPTVPVALVTGAAKRLGRSIAEGLHAEGYAVCLHYHRSAAEANALSATLNARRPNSAITVQADLSNVATAPVS - - [1] - 2.1e-19 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++++++++++ +++++++ -151 PYFLIKAFAHRSRHPSQASRTNYSIINMVDAMTNQPLLGYTIYTMAKGALEGLTRSAALELAPLQIRVNGVGPGL - - -2BHD_STREX - 20-BETA-HYDROXYSTEROID DEHYDROGENASE (EC 1.1.1.53) - LENGTH = 255 COMBINED P-VALUE = 1.78e-29 E-VALUE = 5.9e-28 - DIAGRAM: 6_[2]_116_[1]_75 - - [2] - 7.1e-18 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++ + +++ +++++++++ -1 MNDLSGKTVIITGGARGLGAEAARQAVAAGARVVLADVLDEEGAATARELGDAARYQHLDVTIEEDWQRVVAYAR - - [1] - 6.8e-19 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++ +++++++ +++++++ -151 SYGASKWGVRGLSKLAAVELGTDRIRVNSVHPGMTYTPMTAETGIRQGEGNYPNTPMGRVGNEPGEIAGAVVKLL - - -ENTA_ECOLI - 2,3-DIHYDRO-2,3-DIHYDROXYBENZOATE DEHYDROGENASE (EC 1.3.1.28) - LENGTH = 248 COMBINED P-VALUE = 1.45e-28 E-VALUE = 4.8e-27 - DIAGRAM: 5_[2]_109_[1]_76 - - [2] - 3.7e-20 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++++++++++++++++ -1 MDFSGKNVWVTGAGKGIGYATALAFVEAGAKVTGFDQAFTQEQYPFATEVMDVADAAQVAQVCQRLLAETERLDA - - [1] - 1.2e-15 - YSASKFA - +++++++ -76 LVNAAGILRMGATDQLSKEDWQQTFAVNVGGAFNLFQQTMNQFRRQRGGAIVTVASDAAHTPRIGMSAYGASKAA - - - - VRMLTRSMAHEYAPHGIRVNCI - ++++ + ++++ ++++++++ -151 LKSLALSVGLELAGSGVRCNVVSPGSTDTDMQRTLWVSDDAEEQRIRGFGEQFKLGIPLGKIARPQEIANTILFL - - -DHB2_HUMAN - no comment - LENGTH = 387 COMBINED P-VALUE = 5.05e-28 E-VALUE = 1.7e-26 - DIAGRAM: 82_[2]_120_[1]_127 - - - [2] - 3.4e-17 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - + ++++++++++++++++++ + ++ +++ -76 ELLPVDQKAVLVTGGDCGLGHALCKYLDELGFTVFAGVLNENGPGAEELRRTCSPRLSVLQMDITKPVQIKDAYS - - [1] - 1.7e-18 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++ ++++++++++++ ++ -226 MERLASYGSSKAAVTMFSSVMRLELSKWGIKVASIQPGGFLTNIAGTSDKWEKLEKDILDHLPAEVQEDYGQDYI - - -BDH_HUMAN - D-BETA-HYDROXYBUTYRATE DEHYDROGENASE PRECURSOR (EC 1.1.1.30) (BDH) - (3-HYDROXYBUTYRATE DEHYDROGENASE) (FRAGMENT) - LENGTH = 343 COMBINED P-VALUE = 8.57e-28 E-VALUE = 2.8e-26 - DIAGRAM: 55_[2]_123_[1]_107 - - [2] - 2.3e-18 - KVVLITGCSSGIGKATAKHL - + +++++++++ ++++++++ -1 GLRPPPPGRFSRLPGKTLSACDRENGARRPLLLGSTSFIPIGRRTYASAAEPVGSKAVLVTGCDSGFGFSLAKHL - - - - HKEGAKVVL - +++++ +++ -76 HSKGFLVFAGCLMKDKGHDGVKELDSLNSDRLRTVQLNVFRSEEVEKVVGDCPFEPEGPEKGMWGLVNNAGISTF - - [1] - 5.5e-17 - YSASKFAVRMLTRSMAHE - ++ +++++++++++++++ -151 GEVEFTSLETYKQVAEVNLWGTVRMTKSFLPLIRRAKGRVVNISSMLGRMANPARSPYCITKFGVEAFSDCLRYE - - - - YAPHGIRVNCI - +++ +++++++ -226 MYPLGVKVSVVEPGNFIAATSLYNPESIQAIAKKMWEELPEVVRKDYGKKYFDEKIAKMETYCSSGSTDTSPVID - - -BA72_EUBSP - 7-ALPHA-HYDROXYSTEROID DEHYDROGENASE (EC 1.1.1.159) (BILE ACID - 7-DEHYDROXYLASE) (BILE ACID-INDUCIBLE PROTEIN) - LENGTH = 249 COMBINED P-VALUE = 1.28e-27 E-VALUE = 4.2e-26 - DIAGRAM: 6_[2]_121_[1]_64 - - [2] - 2.6e-18 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++ ++++++ ++ ++ +++++ + -1 MNLVQDKVTIITGGTRGIGFAAAKIFIDNGAKVSIFGETQEEVDTALAQLKELYPEEEVLGFAPDLTSRDAVMAA - - [1] - 1.5e-16 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + +++++++++++ + +++ +++++++++ -151 SLSGVGYPASKASVIGLTHGLGREIIRKNIRVVGVAPGVVNTDMTNGNPPEIMEGYLKALPMKRMLEPEEIANVY - - -FVT1_HUMAN - no comment - LENGTH = 332 COMBINED P-VALUE = 2.70e-27 E-VALUE = 8.9e-26 - DIAGRAM: 32_[2]_124_[1]_118 - - [2] - 2.3e-17 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++++++++++++++++ + + -1 MLLLAAAFLVAFVLLLYMVSPLISPKPLALPGAHVVVTGGSSGIGKCIAIECYKQGAFITLVARNEDKLLQAKKE - - [1] - 1.9e-17 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++ ++++++++++++++++ -151 YPSRAVITTMKERRVGRIVFVSSQAGQLGLFGFTAYSASKFAIRGLAEALQMEVKPYNVYITVAYPPDTDTPGFA - - -GUTD_ECOLI - SORBITOL-6-PHOSPHATE 2-DEHYDROGENASE (EC 1.1.1.140) (GLUCITOL-6- PHOSPHATE - DEHYDROGENASE) (KETOSEPHOSPHATE REDUCTASE) - LENGTH = 259 COMBINED P-VALUE = 1.54e-26 E-VALUE = 5.1e-25 - DIAGRAM: 2_[2]_122_[1]_77 - - [2] - 1.5e-14 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++ ++++ ++ +++ ++++++++ + -1 MNQVAVVIGGGQTLGAFLCHGLAAEGYRVAVVDIQSDKAANVAQEINAEYGESMAYGFGADATSEQSCLALSRGV - - [1] - 3.0e-19 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++ +++++++++++++++++++++ -151 NSGYSAAKFGGVGLTQSLALDLAEYGITVHSLMLGNLLKSPMFQSLLPQYATKLGIKPDQVEQYYIDKVPLKRGC - - -DHB3_HUMAN - no comment - LENGTH = 310 COMBINED P-VALUE = 2.53e-26 E-VALUE = 8.3e-25 - DIAGRAM: 48_[2]_120_[1]_84 - - [2] - 7.4e-19 - KVVLITGCSSGIGKATAKHLHKEGAKV - +++++++++ ++++++ ++++++ ++ -1 MGDVLEQFFILTGLLVCLACLAKCVRFSRCVLLNYYKVLPKSFLRSMGQWAVITGAGDGIGKAYSFELAKRGLNV - - - - VL - ++ -76 VLISRTLEKLEAIATEIERTTGRSVKIIQADFTKDDIYEHIKEKLAGLEIGILVNNVGMLPNLLPSHFLNAPDEI - - [1] - 6.7e-15 - YSASKFAVRMLTRSMAHEYAPHGIRVNC - ++++++++++++++++ +++ + + +++ -151 QSLIHCNITSVVKMTQLILKHMESRQKGLILNISSGIALFPWPLYSMYSASKAFVCAFSKALQEEYKAKEVIIQV - - - - I - + -226 LTPYAVSTAMTKYLNTNVITKTADEFVKESLNYVTIGGETCGCLAHEILAGFLSLIPAWAFYSGAFQRLLLTHYV - - -3BHD_COMTE - 3-BETA-HYDROXYSTEROID DEHYDROGENASE (EC 1.1.1.51) - LENGTH = 253 COMBINED P-VALUE = 4.25e-26 E-VALUE = 1.4e-24 - DIAGRAM: 6_[2]_115_[1]_74 - - [2] - 2.6e-18 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++ ++++++ ++ +++++ -1 TNRLQGKVALVTGGASGVGLEVVKLLLGEGAKVAFSDINEAAGQQLAAELGERSMFVRHDVSSEADWTLVMAAVQ - - [1] - 5.1e-15 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++++++ +++++ +++ -151 YSASKAAVSALTRAAALSCRKQGYAIRVNSIHPDGIYTPMMQASLPKGVSKEMVLHDPKLNRAGRAYMPERIAQL - - -LIGD_PSEPA - C ALPHA-DEHYDROGENASE (EC -.-.-.-) - LENGTH = 305 COMBINED P-VALUE = 2.45e-25 E-VALUE = 8.1e-24 - DIAGRAM: 6_[2]_121_[1]_120 - - [2] - 6.5e-17 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++ +++++++ ++ ++ + ++++++++ -1 MKDFQDQVAFITGGASGAGFGQAKVFGQAGAKIVVADVRAEAVEKAVAELEGLGITAHGIVLDIMDREAYARAAD - - [1] - 7.9e-16 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++ +++++ +++ ++++++ ++++ -151 SALAGPYSAAKAASINLMEGYRQGLEKYGIGVSVCTPANIKSNIAEASRLRPAKYGTSGYVENEESIASLHSIHQ - - -DHES_HUMAN - ESTRADIOL 17 BETA-DEHYDROGENASE (EC 1.1.1.62) (20 ALPHA-HYDROXYSTEROID - DEHYDROGENASE) (E2DH) (17-BETA-HSD) (PLACENTAL 17-BETA-HYDROXYSTEROID - DEHYDROGENASE) - LENGTH = 327 COMBINED P-VALUE = 7.31e-24 E-VALUE = 2.4e-22 - DIAGRAM: 2_[2]_50_[2]_44_[1]_144 - - [2] - 1.4e-14 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++++++ +++++ ++ + -1 ARTVVLITGCSSGIGLHLAVRLASDPSQSFKVYATLRDLKTQGRLWEAARALACPPGSLETLQLDVRDSKSVAAA - - [2] - 9.3e-05 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++ + ++ ++++ ++ + + + + -76 RERVTEGRVDVLVCNAGLGLLGPLEALGEDAVASVLDVNVVGTVRMLQAFLPDMKRRGSGRVLVTGSVGGLMGLP - - [1] - 1.0e-16 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++++++++++ + + +++ + + -151 FNDVYCASKFALEGLCESLAVLLLPFGVHLSLIECGPVHTAFMEKVLGSPEEVLDRTDIHTFHRFYQYLAHSKQV - - -RFBB_NEIGO - no comment - LENGTH = 346 COMBINED P-VALUE = 9.68e-21 E-VALUE = 3.2e-19 - DIAGRAM: 6_[2]_129_[1]_153 - - [2] - 1.8e-13 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++ ++ +++++ +++ ++ -1 MQTEGKKNILVTGGAGFIGSAVVRHIIQNTRDSVVNLDKLTYAGNLESLTDIADNPRYAFEQVDICDRAELDRVF - - [1] - 1.0e-14 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++ +++++++++ ++ ++ ++ -151 DLFTETTPYAPSSPYSASKAAADHLVRAWQRTYRLPSIVSNCSNNYGPRQFPEKLIPLMILNALSGKPLPVYGDG - - -BPHB_PSEPS - BIPHENYL-CIS-DIOL DEHYDROGENASE (EC 1.3.1.-) - LENGTH = 275 COMBINED P-VALUE = 3.02e-20 E-VALUE = 1e-18 - DIAGRAM: 5_[2]_118_[1]_94 - - [2] - 8.6e-15 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++++ +++++ ++ -1 MKLKGEAVLITGGASGLGRALVDRFVAEAKVAVLDKSAERLAELETDLGDNVLGIVGDVRSLEDQKQAASRCVAR - - [1] - 1.2e-12 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++++++++++++++++++++++ + + -151 PLYTAAKQAIVGLVRELAFELAPYVRVNGVGPGGMNSDMRGPSSLGMGSKAISTVPLADMLKSVLPIGRMPEVEE - - -YURA_MYXXA - no comment - LENGTH = 258 COMBINED P-VALUE = 5.64e-20 E-VALUE = 1.9e-18 - DIAGRAM: 65_[2]_22_[2]_14_[1]_70 - - [2] - 5.6e-05 - KVVLITGCSS - + ++ ++ -1 RQHTGGLHGGDELPDGVGDGCLQRPGTRAGAVARQAGVRVFAAGRRLPQLQAADEAPGGRRHRGARGVDVTKADA - - [2] - 5.7e-08 - GIGKATAKHLHKEGAKVVL KVVLITGCSSGIGKATAKHLHKEGAKVVL - + + +++ + + ++ + +++++ + + ++ ++ + + -76 TLERIRALDAEAGGLDLVVANAGVGGTTNAKRLPWERVRGIIDTNVTGAAATLSAVLPQMVERKRGHLVGVSSLA - - [1] - 3.8e-19 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - +++++++++ ++++++++++ ++++++++ -151 GFRGLPATRYSASKAFLSTFMESLRVDLRGTGVRVTCIYPGFVKSELTATNNFPMPFLMETHDAVELMGKGIVRG - - -PCR_PEA - no comment - LENGTH = 399 COMBINED P-VALUE = 2.17e-19 E-VALUE = 7.2e-18 - DIAGRAM: 25_[1]_32_[2]_284 - - [1] - 1.7e-08 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++ ++++++++++ + + +++ + -1 MALQTASMLPASFSIPKEGKIGASLKDSTLFGVSSLSDSLKGDFTSSALRCKELRQKVGAVRAETAAPATPAVNK - - [2] - 1.9e-18 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++++++++++++++++++ + ++ -76 SSSEGKKTLRKGNVVITGASSGLGLATAKALAESGKWHVIMACRDYLKAARAAKSAGLAKENYTIMHLDLASLDS - - -DHCA_HUMAN - no comment - LENGTH = 276 COMBINED P-VALUE = 3.43e-19 E-VALUE = 1.1e-17 - DIAGRAM: 4_[2]_159_[1]_55 - - [2] - 4.0e-16 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++++++++++++++++++++ + ++ -1 SSGIHVALVTGGNKGIGLAIVRDLCRLFSGDVVLTARDVTRGQAAVQQLQAEGLSPRFHQLDIDDLQSIRALRDF - - [1] - 2.9e-10 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++ ++ +++ +++ +++ +++++ +++ -151 PELQQKFRSETITEEELVGLMNKFVEDTKKGVHQKEGWPSSAYGVTKIGVTVLSRIHARKLSEQRKGDKILLNAC - - -ADH_DROME - ALCOHOL DEHYDROGENASE (EC 1.1.1.1) - LENGTH = 255 COMBINED P-VALUE = 8.17e-16 E-VALUE = 2.7e-14 - DIAGRAM: 6_[2]_116_[1]_75 - - [2] - 1.1e-10 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - ++++ + +++++ + ++++++ + + -1 SFTLTNKNVIFVAGLGGIGLDTSKELLKRDLKNLVILDRIENPAAIAELKAINPKVTVTFYPYDVTVPIAETTKL - - [1] - 3.6e-12 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++ ++++++++++++++ ++++ +++ -151 VYSGTKAAVVNFTSSLAKLAPITGVTAYTVNPGITRTTLVHKFNSWLDVEPQVAEKLLAHPTQPSLACAENFVKA - - -MAS1_AGRRA - no comment - LENGTH = 476 COMBINED P-VALUE = 9.22e-16 E-VALUE = 3e-14 - DIAGRAM: 245_[2]_74_[1]_14_[1]_56 - - - [2] - 2.9e-15 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++ + +++ ++++++++++++++ + -226 GRVLHFRRGFSHWTVEIHQSPVILVSGSNRGVGKAIAEDLIAHGYRLSLGARKVKDLEVAFGPQDEWLHYARFDA - - [1] - 4.0e-08 - YSASKFAVRMLTRSMAHEYAPHGIRVN - + + + +++ ++ + +++ +++ -301 EDHGTMAAWVTAAVEKFGRIDGLVNNAGYGEPVNLDKHVDYQRFHLQWYINCVAPLRMTELCLPHLYETGSGRIV - - [1] - 8.7e-05 - CI YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++ + +++ ++ +++++ ++ + + -376 NINSMSGQRVLNPLVGYNMTKHALGGLTKTTQHVGWDRRCAAIDICLGFVATDMSAWTDLIASKDMIQPEDIAKL - - -FABI_ECOLI - no comment - LENGTH = 262 COMBINED P-VALUE = 2.94e-15 E-VALUE = 9.7e-14 - DIAGRAM: 6_[2]_123_[1]_75 - - [2] - 4.5e-10 - KVVLITGCSSGIGKATAKHLHKEGAKVVL - +++++++ ++ + ++ ++ +++ + -1 MGFLSGKRILVTGVASKLSIAYGIAQAMHREGAELAFTYQNDKLKGRVEEFAAQLGSDIVLQCDVAEDASIDTMF - - [1] - 3.1e-12 - YSASKFAVRMLTRSMAHEYAPHGIRVNCI - ++ +++++++ +++++ +++++++++++ -151 RAIPNYNVMGLAKASLEANVRYMANAMGPEGVRVNAISAGPIRTLAASGIKDFRKMLAHCEAVTPIRRTVTIEDV - - -CSGA_MYXXA - no comment - LENGTH = 166 COMBINED P-VALUE = 7.50e-14 E-VALUE = 2.5e-12 - DIAGRAM: 51_[2]_7_[1]_50 - - [2] - 9.0e-08 - KVVLITGCSSGIGKATAKHLHKEG - + ++ + ++ + +++ +++ -1 MRAFATNVCTGPVDVLINNAGVSGLWCALGDVDYADMARTFTINALGPLRVTSAMLPGLRQGALRRVAHVTSRMG - - [1] - 1.3e-12 - AKVVL YSASKFAVRMLTRSMAHEYAPHGIRVNCI - + + ++++++ + ++++ ++++++ + + + -76 SLAANTDGGAYAYRMSKAALNMAVRSMSTDLRPEGFVTVLLHPGWVQTDMGGPDATLPAPDSVRGMLRVIDGLNP - -******************************************************************************** - - -CPU: pmgm2 -Time 0.250000 secs. - -mast meme.adh.oops.txt -text -stdout diff --git a/Tests/Motif/mast.protein.tcm.txt b/Tests/Motif/mast.protein.tcm.txt deleted file mode 100644 index 15340c5c837..00000000000 --- a/Tests/Motif/mast.protein.tcm.txt +++ /dev/null @@ -1,332 +0,0 @@ -******************************************************************************** -MAST - Motif Alignment and Search Tool -******************************************************************************** - MAST version 3.0 (Release date: 2004/08/18 09:07:01) - - For further information on how to interpret these results or to get - a copy of the MAST software please access http://meme.sdsc.edu. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** - If you use this program in your research, please cite: - - Timothy L. Bailey and Michael Gribskov, - "Combining evidence using p-values: application to sequence homology - searches", Bioinformatics, 14(48-54), 1998. -******************************************************************************** - - -******************************************************************************** -DATABASE AND MOTIFS -******************************************************************************** - DATABASE farntrans5.s (peptide) - Last updated on Mon Aug 16 21:19:59 2004 - Database contains 5 sequences, 1900 residues - - MOTIFS meme.farntrans5.tcm.txt (peptide) - MOTIF WIDTH BEST POSSIBLE MATCH - ----- ----- ------------------- - 1 30 GGFQGRPNKEVHTCYTYWALAALAILNKLH - 2 14 INKEKLIQWIKSCQ - - PAIRWISE MOTIF CORRELATIONS: - MOTIF 1 - ----- ----- - 2 0.22 - No overly similar pairs (correlation > 0.60) found. - - Random model letter frequencies (from non-redundant database): - A 0.073 C 0.018 D 0.052 E 0.062 F 0.040 G 0.069 H 0.022 I 0.056 K 0.058 - L 0.092 M 0.023 N 0.046 P 0.051 Q 0.041 R 0.052 S 0.074 T 0.059 V 0.064 - W 0.013 Y 0.033 -******************************************************************************** - - -******************************************************************************** -SECTION I: HIGH-SCORING SEQUENCES -******************************************************************************** - - Each of the following 5 sequences has E-value less than 10. - - The E-value of a sequence is the expected number of sequences - in a random database of the same size that would match the motifs as - well as the sequence does and is equal to the combined p-value of the - sequence times the number of sequences in the database. - - The combined p-value of a sequence measures the strength of the - match of the sequence to all the motifs and is calculated by - o finding the score of the single best match of each motif - to the sequence (best matches may overlap), - o calculating the sequence p-value of each score, - o forming the product of the p-values, - o taking the p-value of the product. - - The sequence p-value of a score is defined as the - probability of a random sequence of the same length containing - some match with as good or better a score. - - The score for the match of a position in a sequence to a motif - is computed by by summing the appropriate entry from each column of - the position-dependent scoring matrix that represents the motif. - - Sequences shorter than one or more of the motifs are skipped. - - The table is sorted by increasing E-value. -******************************************************************************** - -SEQUENCE NAME DESCRIPTION E-VALUE LENGTH -------------- ----------- -------- ------ -BET2_YEAST YPT1/SEC4 PROTEINS GERANY... 2.9e-27 325 -RATRABGERB Rat rab geranylgeranyl tr... 1.4e-25 331 -CAL1_YEAST RAS PROTEINS GERANYLGERAN... 9.7e-22 376 -PFTB_RAT PROTEIN FARNESYLTRANSFERA... 7.6e-21 437 -RAM1_YEAST PROTEIN FARNESYLTRANSFERA... 6.2e-20 431 - -******************************************************************************** - - - -******************************************************************************** -SECTION II: MOTIF DIAGRAMS -******************************************************************************** - - The ordering and spacing of all non-overlapping motif occurrences - are shown for each high-scoring sequence listed in Section I. - - A motif occurrence is defined as a position in the sequence whose - match to the motif has POSITION p-value less than 0.0001. - - The POSITION p-value of a match is the probability of - a single random subsequence of the length of the motif - scoring at least as well as the observed match. - - For each sequence, all motif occurrences are shown unless there - are overlaps. In that case, a motif occurrence is shown only if its - p-value is less than the product of the p-values of the other - (lower-numbered) motif occurrences that it overlaps. - - The table also shows the E-value of each sequence. - - Spacers and motif occurences are indicated by - o -d- `d' residues separate the end of the preceding motif - occurrence and the start of the following motif occurrence - o [n] occurrence of motif `n' with p-value less than 0.0001. -******************************************************************************** - -SEQUENCE NAME E-VALUE MOTIF DIAGRAM -------------- -------- ------------- -BET2_YEAST 2.9e-27 6_[2]_3_[1]_1_[2]_4_[1]_4_[2]_ - 3_[1]_1_[2]_3_[1]_21_[1]_1_[2]_ - 4_[1]_24 -RATRABGERB 1.4e-25 65_[2]_3_[1]_1_[2]_3_[1]_1_[2]_ - 3_[1]_18_[1]_1_[2]_4_[1]_26 -CAL1_YEAST 9.7e-22 125_[2]_50_[2]_1_[1]_4_[2]_22_ - [1]_22_[1]_5_[2]_1 -PFTB_RAT 7.6e-21 120_[2]_3_[1]_4_[2]_3_[1]_1_[2]_ - 3_[1]_1_[2]_4_[1]_14_[2]_4_[1]_60 -RAM1_YEAST 6.2e-20 144_[1]_5_[2]_4_[1]_1_[2]_4_[1]_ - 1_[2]_4_[1]_4_[2]_5_[1]_35_[2]_4 - -******************************************************************************** - - - -******************************************************************************** -SECTION III: ANNOTATED SEQUENCES -******************************************************************************** - - The positions and p-values of the non-overlapping motif occurrences - are shown above the actual sequence for each of the high-scoring - sequences from Section I. - - A motif occurrence is defined as a position in the sequence whose - match to the motif has POSITION p-value less than 0.0001 as - defined in Section II. - - For each sequence, the first line specifies the name of the sequence. - - The second (and possibly more) lines give a description of the - sequence. - - Following the description line(s) is a line giving the length, - combined p-value, and E-value of the sequence as defined in Section I. - - The next line reproduces the motif diagram from Section II. - - The entire sequence is printed on the following lines. - - Motif occurrences are indicated directly above their positions in the - sequence on lines showing - o the motif number of the occurrence, - o the position p-value of the occurrence, - o the best possible match to the motif, and - o columns whose match to the motif has a positive score (indicated - by a plus sign). -******************************************************************************** - - -BET2_YEAST - YPT1/SEC4 PROTEINS GERANYLGERANYLTRANSFERASE BETA SUBUNIT (EC 2. - LENGTH = 325 COMBINED P-VALUE = 5.77e-28 E-VALUE = 2.9e-27 - DIAGRAM: 6_[2]_3_[1]_1_[2]_4_[1]_4_[2]_3_[1]_1_[2]_3_[1]_21_[1]_1_[2]_4_[1]_24 - - [2] [1] [2] [1] - 5.2e-05 2.7e-10 6.6e-10 5.9 - INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGF - +++ +++++++ ++ + +++ +++ +++++ +++++++ + ++++++++++++++ + + -1 MSGSLTLLKEKHIRYIESLDTNKHNFEYWLTEHLRLNGIYWGLTALCVLDSPETFVKEEVISFVLSCWDDKYGAF - - [2] [1] - e-14 4.8e-07 2.3e-17 - QGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILN - + +++++++ + ++++++ +++++ +++ +++++++ ++ + ++++ +++++++ +++++++++++ -76 APFPRHDAHLLTTLSAVQILATYDALDVLGKDRKVRLISFIRGNQLEDGSFQGDRFGEVDTRFVYTALSALSILG - - [2] [1] [1] - 5.1e-07 1.4e-18 4.6 - KLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH GGF - +++ ++++ ++++++++ ++++ ++++++++++++++++++++++++ +++ -151 ELTSEVVDPAVDFVLKCYNFDGGFGLCPNAESHAAQAFTCLGALAIANKLDMLSDDQLEEIGWWLCERQLPEGGL - - [2] [1] - e-22 2.0e-13 3.8e-17 - QGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKL - ++++ ++++++++++++++++++++++ ++ +++++++++++ ++++++++++++++++ ++++++++++ + -226 NGRPSKLPDVCYSWWVLSSLAIIGRLDWINYEKLTEFILKCQDEKKGGISDRPENEVDVFHTVFGVAGLSLMGYD - - - - H - + -301 NLVPIDPIYCMPKSVTSKFKKYPYK - - -RATRABGERB - Rat rab geranylgeranyl transferase beta-subunit - LENGTH = 331 COMBINED P-VALUE = 2.83e-26 E-VALUE = 1.4e-25 - DIAGRAM: 65_[2]_3_[1]_1_[2]_3_[1]_1_[2]_3_[1]_18_[1]_1_[2]_4_[1]_26 - - [2] - 1.0e-11 - INKEKLIQWI - +++++++ ++ -1 MGTQQKDVTIKSDAPDTLLLEKHADYIASYGSKKDDYEYCMSEYLRMSGVYWGLTVMDLMGQLHRMNKEEILVFI - - [1] [2] [1] - 1.6e-14 1.4e-09 5.4e-19 - KSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWAL - ++++ ++ + +++++++ ++ ++++++++++++ +++++++ ++++++ + ++++++++++++++++++ -76 KSCQHECGGVSASIGHDPHLLYTLSAVQILTLYDSIHVINVDKVVAYVQSLQKEDGSFAGDIWGEIDTRFSFCAV - - [2] [1] - 3.8e-12 4.8e-19 - AALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH - ++++++++++ ++++++++++++++ ++++++++ ++++++++++++ ++++++++ -151 ATLALLGKLDAINVEKAIEFVLSCMNFDGGFGCRPGSESHAGQIYCCTGFLAITSQLHQVNSDLLGWWLCERQLP - - [1] [2] [1] - 3.9e-21 1.2e-12 9.6e-18 - GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAI - +++++++++++++++++++++++ ++++++ ++++++++++++++ ++++++++++++ +++ ++++++++ -226 SGGLNGRPEKLPDVCYSWWVLASLKIIGRLHWIDREKLRSFILACQDEETGGFADRPGDMVDPFHTLFGIAGLSL - - - - LNKLH - +++++ -301 LGEEQIKPVSPVFCMPEEVLQRVNVQPELVS - - -CAL1_YEAST - RAS PROTEINS GERANYLGERANYLTRANSFERASE (EC 2.5.1.-) (PROTEIN GER - LENGTH = 376 COMBINED P-VALUE = 1.94e-22 E-VALUE = 9.7e-22 - DIAGRAM: 125_[2]_50_[2]_1_[1]_4_[2]_22_[1]_22_[1]_5_[2]_1 - - - [2] - 1.8e-08 - INKEKLIQWIKSCQ - +++++++++++++ -76 LDDTENTVISGFVGSLVMNIPHATTINLPNTLFALLSMIMLRDYEYFETILDKRSLARFVSKCQRPDRGSFVSCL - - [2] [1] - 4.8e-10 8.7e-14 - INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALA - +++++++ ++++++ + + + +++++ +++ ++++ -151 DYKTNCGSSVDSDDLRFCYIAVAILYICGCRSKEDFDEYIDTEKLLGYIMSQQCYNGAFGAHNEPHSGYTSCALS - - [2] [1] - 5.9e-08 5.9e-20 - ALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAIL - +++++++++ ++++++++++++ +++++++++ +++++++++++++ ++ -226 TLALLSSLEKLSDKFKEDTITWLLHRQVSSHGCMKFESELNASYDQSDDGGFQGRENKFADTCYAFWCLNSLHLL - - [1] [2] - 4.0e-13 2.1e-07 - NKLH GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ - ++++ ++++ + ++++++++++ + +++++++ + ++ +++++++++ -301 TKDWKMLCQTELVTNYLLDRTQKTLTGGFSKNDEEDADLYHSCLGSAALALIEGKFNGELCIPQEIFNDFSKRCC - - -PFTB_RAT - PROTEIN FARNESYLTRANSFERASE BETA SUBUNIT (EC 2.5.1.-) (CAAX FARNES - LENGTH = 437 COMBINED P-VALUE = 1.53e-21 E-VALUE = 7.6e-21 - DIAGRAM: 120_[2]_3_[1]_4_[2]_3_[1]_1_[2]_3_[1]_1_[2]_4_[1]_14_[2]_4_[1]_60 - - - [2] [1] - 1.3e-07 2.8e-19 - INKEKLIQWIKSCQ GGFQGRPNKEVHT - ++ ++++++++ ++ +++++++++ +++ -76 EKHFHYLKRGLRQLTDAYECLDASRPWLCYWILHSLELLDEPIPQIVATDVCQFLELCQSPDGGFGGGPGQYPHL - - [2] [1] [2] - 2.3e-09 2.1e-14 1.8e-0 - CYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKL - + +++++++++++++++ ++++++++++ +++ + + ++ +++++++ +++++++++++++++ + +++ -151 APTYAAVNALCIIGTEEAYNVINREKLLQYLYSLKQPDGSFLMHVGGEVDVRSAYCAASVASLTNIITPDLFEGT - - [1] [2] [1] - 8 7.4e-20 1.8e-08 2.2e-16 - IQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCY - ++++ +++ +++++ +++++++++++++++++ ++++++ + +++++++++++ ++++++ ++++++++ -226 AEWIARCQNWEGGIGGVPGMEAHGGYTFCGLAALVILKKERSLNLKSLLQWVTSRQMRFEGGFQGRCNKLVDGCY - - [2] [1] - 5.0e-08 3.1e-15 - TYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNK - ++++++ + ++++ ++++++++++++++ +++ +++++ +++++++++++++++++ -301 SFWQAGLLPLLHRALHAQGDPALSMSHWMFHQQALQEYILMCCQCPAGGLLDKPGKSRDFYHTCYCLSGLSIAQH - - - - LH - + -376 FGSGAMLHDVVMGVPENVLQPTHPVYNIGPDKVIQATTHFLQKPVPGFEECEDAVTSDPATD - - -RAM1_YEAST - PROTEIN FARNESYLTRANSFERASE BETA SUBUNIT (EC 2.5.1.-) (CAAX FARN - LENGTH = 431 COMBINED P-VALUE = 1.24e-20 E-VALUE = 6.2e-20 - DIAGRAM: 144_[1]_5_[2]_4_[1]_1_[2]_4_[1]_1_[2]_4_[1]_4_[2]_5_[1]_35_[2]_4 - - - [1] - 8.8e-1 - GGFQGR - + ++++ -76 PALTKEFHKMYLDVAFEISLPPQMTALDASQPWMLYWIANSLKVMDRDWLSDDTKRKIVVKLFTISPSGGPFGGG - - [2] [1] - 7 6.4e-07 1.0e-13 - PNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNK - ++++++++ ++++++++++ ++++ ++++++++++ +++ + ++ ++++++++ +++++++++++++ -151 PGQLSHLASTYAAINALSLCDNIDGCWDRIDRKGIYQWLISLKEPNGGFKTCLEVGEVDTRGIYCALSIATLLNI - - [2] [1] [2] [1] - 2.5e-08 3.1e-17 4.7e-11 2.4e- - LH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQG - ++ + ++++++++++++ + ++ +++++++++++++++++++++++ ++++++++++++++ ++ + -226 LTEELTEGVLNYLKNCQNYEGGFGSCPHVDEAHGGYTFCATASLAILRSMDQINVEKLLEWSSARQLQEERGFCG - - [2] [1] - 16 4.9e-09 2.7e-13 - RPNKEVHTCYTYWALAALAILNKLH INKEKLIQWIKSCQ GGFQGRPNKEVHTCYTYWALAALAILN - + ++++++++++++ +++++++++ +++++++++++ ++ +++++++++++++++ +++ ++++++ -301 RSNKLVDGCYSFWVGGSAAILEAFGYGQCFNKHALRDYILYCCQEKEQPGLRDKPGAHSDFYHTNYCLLGLAVAE - - [2] - 9.8e-05 - KLH INKEKLIQWIKSCQ - + +++++++ + +++ -376 SSYSCTPNDSPHNIKCTPDRLIGSSKLTDVNPVYGLPIENVRKIIHYFKSNLSSPS - -******************************************************************************** - - -CPU: pmgm2 -Time 0.130000 secs. - -mast meme.farntrans5.tcm.txt -text -stdout diff --git a/Tests/Motif/meme.dna.oops.txt b/Tests/Motif/meme.dna.oops.txt deleted file mode 100644 index d2f53feedad..00000000000 --- a/Tests/Motif/meme.dna.oops.txt +++ /dev/null @@ -1,324 +0,0 @@ -******************************************************************************** -MEME - Motif discovery tool -******************************************************************************** -MEME version 3.0 (Release date: 2004/08/18 09:07:01) - -For further information on how to interpret these results or to get -a copy of the MEME software please access http://meme.sdsc.edu. - -This file may be used as input to the MAST algorithm for searching -sequence databases for matches to groups of motifs. MAST is available -for interactive use and downloading at http://meme.sdsc.edu. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** -If you use this program in your research, please cite: - -Timothy L. Bailey and Charles Elkan, -"Fitting a mixture model by expectation maximization to discover -motifs in biopolymers", Proceedings of the Second International -Conference on Intelligent Systems for Molecular Biology, pp. 28-36, -AAAI Press, Menlo Park, California, 1994. -******************************************************************************** - - -******************************************************************************** -TRAINING SET -******************************************************************************** -DATAFILE= INO_up800.s -ALPHABET= ACGT -Sequence name Weight Length Sequence name Weight Length -------------- ------ ------ ------------- ------ ------ -CHO1 1.0000 800 CHO2 1.0000 800 -FAS1 1.0000 800 FAS2 1.0000 800 -ACC1 1.0000 800 INO1 1.0000 800 -OPI3 1.0000 800 -******************************************************************************** - -******************************************************************************** -COMMAND LINE SUMMARY -******************************************************************************** -This information can also be useful in the event you wish to report a -problem with the MEME software. - -command: meme -mod oops -dna -revcomp -nmotifs 2 -bfile yeast.nc.6.freq INO_up800.s - -model: mod= oops nmotifs= 2 evt= inf -object function= E-value of product of p-values -width: minw= 8 maxw= 50 minic= 0.00 -width: wg= 11 ws= 1 endgaps= yes -nsites: minsites= 7 maxsites= 7 wnsites= 0.8 -theta: prob= 1 spmap= uni spfuzz= 0.5 -em: prior= dirichlet b= 0.01 maxiter= 50 - distance= 1e-05 -data: n= 5600 N= 7 -strands: + - -sample: seed= 0 seqfrac= 1 -Letter frequencies in dataset: -A 0.304 C 0.196 G 0.196 T 0.304 -Background letter frequencies (from yeast.nc.6.freq): -A 0.324 C 0.176 G 0.176 T 0.324 -******************************************************************************** - - -******************************************************************************** -MOTIF 1 width = 12 sites = 7 llr = 95 E-value = 2.0e-001 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 1 Description --------------------------------------------------------------------------------- -Simplified A :::9:a::::3: -pos.-specific C ::a:9:11691a -probability G ::::1::94:4: -matrix T aa:1::9::11: - - bits 2.5 * * - 2.3 * * - 2.0 * * * * - 1.8 * * * * * -Information 1.5 *** ** *** * -content 1.3 *** ****** * -(19.5 bits) 1.0 ********** * - 0.8 ********** * - 0.5 ********** * - 0.3 ************ - 0.0 ------------ - -Multilevel TTCACATGCCGC -consensus G A -sequence - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Strand Start P-value Site -------------- ------ ----- --------- ------------ -INO1 - 620 1.85e-08 GACAATACTT TTCACATGCCGC ATTTAGCCGC -FAS1 + 95 1.85e-08 GGCCAAAAAC TTCACATGCCGC CCAGCCAAGC -ACC1 + 83 1.52e-07 CGTTAAAATC TTCACATGGCCC GGCCGCGCGC -CHO2 + 354 2.52e-07 TGCCACACTT TTCTCATGCCGC ATTCATTATT -CHO1 + 611 4.23e-07 ACTTTGAACG TTCACACGGCAC CCTCACGCCT -FAS2 + 567 9.43e-07 CTCCCGCGTT TTCACATGCTAC CTCATTCGCC -OPI3 + 340 3.32e-06 CCAAGCCTCC TTCAGATCGCTC TTGTCGACCG --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -INO1 1.8e-08 619_[-1]_169 -FAS1 1.8e-08 94_[+1]_694 -ACC1 1.5e-07 82_[+1]_706 -CHO2 2.5e-07 353_[+1]_435 -CHO1 4.2e-07 610_[+1]_178 -FAS2 9.4e-07 566_[+1]_222 -OPI3 3.3e-06 339_[+1]_449 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 1 width=12 seqs=7 -INO1 ( 620) TTCACATGCCGC 1 -FAS1 ( 95) TTCACATGCCGC 1 -ACC1 ( 83) TTCACATGGCCC 1 -CHO2 ( 354) TTCTCATGCCGC 1 -CHO1 ( 611) TTCACACGGCAC 1 -FAS2 ( 567) TTCACATGCTAC 1 -OPI3 ( 340) TTCAGATCGCTC 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 4 w= 12 n= 5523 bayes= 9.62205 E= 2.0e-001 - -945 -945 -945 162 - -945 -945 -945 162 - -945 251 -945 -945 - 140 -945 -945 -118 - -945 229 -30 -945 - 162 -945 -945 -945 - -945 -30 -945 140 - -945 -30 229 -945 - -945 170 129 -945 - -945 229 -945 -118 - -18 -30 129 -118 - -945 251 -945 -945 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 4 w= 12 nsites= 7 E= 2.0e-001 - 0.000000 0.000000 0.000000 1.000000 - 0.000000 0.000000 0.000000 1.000000 - 0.000000 1.000000 0.000000 0.000000 - 0.857143 0.000000 0.000000 0.142857 - 0.000000 0.857143 0.142857 0.000000 - 1.000000 0.000000 0.000000 0.000000 - 0.000000 0.142857 0.000000 0.857143 - 0.000000 0.142857 0.857143 0.000000 - 0.000000 0.571429 0.428571 0.000000 - 0.000000 0.857143 0.000000 0.142857 - 0.285714 0.142857 0.428571 0.142857 - 0.000000 1.000000 0.000000 0.000000 --------------------------------------------------------------------------------- - - - - - -Time 20.91 secs. - -******************************************************************************** - - -******************************************************************************** -MOTIF 2 width = 10 sites = 7 llr = 81 E-value = 1.1e+002 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 2 Description --------------------------------------------------------------------------------- -Simplified A ::1:::9:6: -pos.-specific C :a:::a:911 -probability G 3:1aa:1:19 -matrix T 7:7::::11: - - bits 2.5 * *** - 2.3 * *** - 2.0 * *** * - 1.8 * *** * * -Information 1.5 * *** * * -content 1.3 * ***** * -(16.7 bits) 1.0 ** ***** * - 0.8 ** ***** * - 0.5 ******** * - 0.3 ********** - 0.0 ---------- - -Multilevel TCTGGCACAG -consensus G -sequence - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Strand Start P-value Site -------------- ------ ----- --------- ---------- -OPI3 - 186 3.24e-07 GAAAACCAGA TCTGGCACAG ACCGTTGTCA -ACC1 + 232 3.24e-07 CCAGTCGTAT TCTGGCACAG TATAGCCTAG -CHO1 - 559 3.24e-07 ATATTCAGTG TCTGGCACAG AAGTCTGCAC -INO1 - 283 5.29e-06 ACGGTCTACG GCGGGCGCAG TCGCATGTCT -FAS1 + 44 6.25e-06 TACACGAGGT GCAGGCACGG TTCACTACTC -FAS2 - 185 8.48e-06 TTCTTGCTTT TCTGGCACTC TTGACGGCTT -CHO2 - 413 8.48e-06 TTTTGCCGTT TCTGGCATCG CCGTTCATTT --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -OPI3 3.2e-07 185_[-2]_605 -ACC1 3.2e-07 231_[+2]_559 -CHO1 3.2e-07 558_[-2]_232 -INO1 5.3e-06 282_[-2]_508 -FAS1 6.3e-06 43_[+2]_747 -FAS2 8.5e-06 184_[-2]_606 -CHO2 8.5e-06 412_[-2]_378 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 2 width=10 seqs=7 -OPI3 ( 186) TCTGGCACAG 1 -ACC1 ( 232) TCTGGCACAG 1 -CHO1 ( 559) TCTGGCACAG 1 -INO1 ( 283) GCGGGCGCAG 1 -FAS1 ( 44) GCAGGCACGG 1 -FAS2 ( 185) TCTGGCACTC 1 -CHO2 ( 413) TCTGGCATCG 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 4 w= 10 n= 5537 bayes= 9.62571 E= 1.1e+002 - -945 -945 70 114 - -945 251 -945 -945 - -118 -945 -30 114 - -945 -945 251 -945 - -945 -945 251 -945 - -945 251 -945 -945 - 140 -945 -30 -945 - -945 229 -945 -118 - 82 -30 -30 -118 - -945 -30 229 -945 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 4 w= 10 nsites= 7 E= 1.1e+002 - 0.000000 0.000000 0.285714 0.714286 - 0.000000 1.000000 0.000000 0.000000 - 0.142857 0.000000 0.142857 0.714286 - 0.000000 0.000000 1.000000 0.000000 - 0.000000 0.000000 1.000000 0.000000 - 0.000000 1.000000 0.000000 0.000000 - 0.857143 0.000000 0.142857 0.000000 - 0.000000 0.857143 0.000000 0.142857 - 0.571429 0.142857 0.142857 0.142857 - 0.000000 0.142857 0.857143 0.000000 --------------------------------------------------------------------------------- - - - - - -Time 41.19 secs. - -******************************************************************************** - - -******************************************************************************** -SUMMARY OF MOTIFS -******************************************************************************** - --------------------------------------------------------------------------------- - Combined block diagrams: non-overlapping sites with p-value < 0.0001 --------------------------------------------------------------------------------- -SEQUENCE NAME COMBINED P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -CHO1 5.44e-06 152_[+2(1.10e-05)]_396_[-2(3.24e-07)]_42_[+1(4.23e-07)]_17_[+1(1.23e-05)]_149 -CHO2 6.96e-05 353_[+1(2.52e-07)]_47_[-2(8.48e-06)]_378 -FAS1 4.61e-06 43_[+2(6.25e-06)]_41_[+1(1.85e-08)]_694 -FAS2 2.34e-04 184_[-2(8.48e-06)]_372_[+1(9.43e-07)]_222 -ACC1 2.09e-06 82_[+1(1.52e-07)]_137_[+2(3.24e-07)]_559 -INO1 3.95e-06 282_[-2(5.29e-06)]_327_[-1(1.85e-08)]_55_[+1(7.55e-06)]_102 -OPI3 3.70e-05 185_[-2(3.24e-07)]_144_[+1(3.32e-06)]_449 --------------------------------------------------------------------------------- - -******************************************************************************** - - -******************************************************************************** -Stopped because nmotifs = 2 reached. -******************************************************************************** - -CPU: pmgm2 - -******************************************************************************** diff --git a/Tests/Motif/meme.out b/Tests/Motif/meme.out deleted file mode 100644 index 7c9ec1cf5aa..00000000000 --- a/Tests/Motif/meme.out +++ /dev/null @@ -1,225 +0,0 @@ -******************************************************************************** -MEME - Motif discovery tool -******************************************************************************** -MEME version 3.5.7 (Release date: 2007-12-17 16:56:19 -0800 (Mon, 17 Dec 2007)) - -For further information on how to interpret these results or to get -a copy of the MEME software please access http://meme.nbcr.net. - -This file may be used as input to the MAST algorithm for searching -sequence databases for matches to groups of motifs. MAST is available -for interactive use and downloading at http://meme.nbcr.net. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** -If you use this program in your research, please cite: - -Timothy L. Bailey and Charles Elkan, -"Fitting a mixture model by expectation maximization to discover -motifs in biopolymers", Proceedings of the Second International -Conference on Intelligent Systems for Molecular Biology, pp. 28-36, -AAAI Press, Menlo Park, California, 1994. -******************************************************************************** - - -******************************************************************************** -TRAINING SET -******************************************************************************** -DATAFILE= test.fa -ALPHABET= ACGT -Sequence name Weight Length Sequence name Weight Length -------------- ------ ------ ------------- ------ ------ -SEQ1; 1.0000 200 SEQ2; 1.0000 200 -SEQ3; 1.0000 200 SEQ4; 1.0000 200 -SEQ5; 1.0000 200 SEQ6; 1.0000 200 -SEQ7; 1.0000 200 SEQ8; 1.0000 200 -SEQ9; 1.0000 200 SEQ10; 1.0000 200 -******************************************************************************** - -******************************************************************************** -COMMAND LINE SUMMARY -******************************************************************************** -This information can also be useful in the event you wish to report a -problem with the MEME software. - -command: meme test.fa -dna -w 10 -dir /home/bartek/MetaMotif/meme - -model: mod= zoops nmotifs= 1 evt= inf -object function= E-value of product of p-values -width: minw= 10 maxw= 10 minic= 0.00 -width: wg= 11 ws= 1 endgaps= yes -nsites: minsites= 2 maxsites= 10 wnsites= 0.8 -theta: prob= 1 spmap= uni spfuzz= 0.5 -em: prior= dirichlet b= 0.01 maxiter= 50 - distance= 1e-05 -data: n= 2000 N= 10 -strands: + -sample: seed= 0 seqfrac= 1 -Letter frequencies in dataset: -A 0.255 C 0.235 G 0.261 T 0.249 -Background letter frequencies (from dataset with add-one prior applied): -A 0.255 C 0.236 G 0.260 T 0.249 -******************************************************************************** - - -******************************************************************************** -MOTIF 1 width = 10 sites = 10 llr = 140 E-value = 1.1e-022 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 1 Description --------------------------------------------------------------------------------- -Simplified A :::aa::::a -pos.-specific C a:a:::a::: -probability G :::::::a:: -matrix T :a:::a::a: - - bits 2.1 *** ** * - 1.9 ********** - 1.7 ********** - 1.5 ********** -Information 1.3 ********** -content 1.0 ********** -(20.1 bits) 0.8 ********** - 0.6 ********** - 0.4 ********** - 0.2 ********** - 0.0 ---------- - -Multilevel CTCAATCGTA -consensus -sequence - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Start P-value Site -------------- ----- --------- ---------- -SEQ10; 3 8.71e-07 TT CTCAATCGTA GAGTATGCTT -SEQ9; 93 8.71e-07 CGCCTAGAAA CTCAATCGTA GAGTATCACG -SEQ8; 172 8.71e-07 CCCGGAGTAT CTCAATCGTA GATGAATACC -SEQ7; 177 8.71e-07 AAGTCTTTGA CTCAATCGTA GACCCAACAC -SEQ6; 105 8.71e-07 GTCAGCCGGT CTCAATCGTA GATCAGAGGC -SEQ5; 185 8.71e-07 ACGGGCAAGC CTCAATCGTA GAGGAT -SEQ4; 173 8.71e-07 GTTCGAGAGC CTCAATCGTA GATAACCTCT -SEQ3; 112 8.71e-07 GTTATATTGG CTCAATCGTA GATGAAACCA -SEQ2; 172 8.71e-07 AAGCGTCGTG CTCAATCGTA GATAACAGAG -SEQ1; 52 8.71e-07 CTTTACTCGG CTCAATCGTA GAGGCGGTGC --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -SEQ10; 8.7e-07 2_[1]_188 -SEQ9; 8.7e-07 92_[1]_98 -SEQ8; 8.7e-07 171_[1]_19 -SEQ7; 8.7e-07 176_[1]_14 -SEQ6; 8.7e-07 104_[1]_86 -SEQ5; 8.7e-07 184_[1]_6 -SEQ4; 8.7e-07 172_[1]_18 -SEQ3; 8.7e-07 111_[1]_79 -SEQ2; 8.7e-07 171_[1]_19 -SEQ1; 8.7e-07 51_[1]_139 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 1 width=10 seqs=10 -SEQ10; ( 3) CTCAATCGTA 1 -SEQ9; ( 93) CTCAATCGTA 1 -SEQ8; ( 172) CTCAATCGTA 1 -SEQ7; ( 177) CTCAATCGTA 1 -SEQ6; ( 105) CTCAATCGTA 1 -SEQ5; ( 185) CTCAATCGTA 1 -SEQ4; ( 173) CTCAATCGTA 1 -SEQ3; ( 112) CTCAATCGTA 1 -SEQ2; ( 172) CTCAATCGTA 1 -SEQ1; ( 52) CTCAATCGTA 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 4 w= 10 n= 1910 bayes= 8.51691 E= 1.1e-022 - -997 208 -997 -997 - -997 -997 -997 200 - -997 208 -997 -997 - 197 -997 -997 -997 - 197 -997 -997 -997 - -997 -997 -997 200 - -997 208 -997 -997 - -997 -997 194 -997 - -997 -997 -997 200 - 197 -997 -997 -997 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 4 w= 10 nsites= 10 E= 1.1e-022 - 0.000000 1.000000 0.000000 0.000000 - 0.000000 0.000000 0.000000 1.000000 - 0.000000 1.000000 0.000000 0.000000 - 1.000000 0.000000 0.000000 0.000000 - 1.000000 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.000000 1.000000 - 0.000000 1.000000 0.000000 0.000000 - 0.000000 0.000000 1.000000 0.000000 - 0.000000 0.000000 0.000000 1.000000 - 1.000000 0.000000 0.000000 0.000000 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 regular expression --------------------------------------------------------------------------------- -CTCAATCGTA --------------------------------------------------------------------------------- - - - - -Time 0.26 secs. - -******************************************************************************** - - -******************************************************************************** -SUMMARY OF MOTIFS -******************************************************************************** - --------------------------------------------------------------------------------- - Combined block diagrams: non-overlapping sites with p-value < 0.0001 --------------------------------------------------------------------------------- -SEQUENCE NAME COMBINED P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -SEQ1; 1.66e-04 51_[1(8.71e-07)]_139 -SEQ2; 1.66e-04 171_[1(8.71e-07)]_19 -SEQ3; 1.66e-04 111_[1(8.71e-07)]_79 -SEQ4; 1.66e-04 172_[1(8.71e-07)]_18 -SEQ5; 1.66e-04 184_[1(8.71e-07)]_6 -SEQ6; 1.66e-04 104_[1(8.71e-07)]_86 -SEQ7; 1.66e-04 176_[1(8.71e-07)]_14 -SEQ8; 1.66e-04 171_[1(8.71e-07)]_19 -SEQ9; 1.66e-04 92_[1(8.71e-07)]_98 -SEQ10; 1.66e-04 2_[1(8.71e-07)]_188 --------------------------------------------------------------------------------- - -******************************************************************************** - - -******************************************************************************** -Stopped because nmotifs = 1 reached. -******************************************************************************** - -CPU: pc-arendt9 - -******************************************************************************** diff --git a/Tests/Motif/meme.protein.oops.txt b/Tests/Motif/meme.protein.oops.txt deleted file mode 100644 index 1d72fe2e16e..00000000000 --- a/Tests/Motif/meme.protein.oops.txt +++ /dev/null @@ -1,630 +0,0 @@ -******************************************************************************** -MEME - Motif discovery tool -******************************************************************************** -MEME version 3.0 (Release date: 2004/08/18 09:07:01) - -For further information on how to interpret these results or to get -a copy of the MEME software please access http://meme.sdsc.edu. - -This file may be used as input to the MAST algorithm for searching -sequence databases for matches to groups of motifs. MAST is available -for interactive use and downloading at http://meme.sdsc.edu. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** -If you use this program in your research, please cite: - -Timothy L. Bailey and Charles Elkan, -"Fitting a mixture model by expectation maximization to discover -motifs in biopolymers", Proceedings of the Second International -Conference on Intelligent Systems for Molecular Biology, pp. 28-36, -AAAI Press, Menlo Park, California, 1994. -******************************************************************************** - - -******************************************************************************** -TRAINING SET -******************************************************************************** -DATAFILE= adh.s -ALPHABET= ACDEFGHIKLMNPQRSTVWY -Sequence name Weight Length Sequence name Weight Length -------------- ------ ------ ------------- ------ ------ -2BHD_STREX 1.0000 255 3BHD_COMTE 1.0000 253 -ADH_DROME 1.0000 255 AP27_MOUSE 1.0000 244 -BA72_EUBSP 1.0000 249 BDH_HUMAN 1.0000 343 -BPHB_PSEPS 1.0000 275 BUDC_KLETE 1.0000 241 -DHES_HUMAN 1.0000 327 DHGB_BACME 1.0000 262 -DHII_HUMAN 1.0000 292 DHMA_FLAS1 1.0000 270 -ENTA_ECOLI 1.0000 248 FIXR_BRAJA 1.0000 278 -GUTD_ECOLI 1.0000 259 HDE_CANTR 1.0000 906 -HDHA_ECOLI 1.0000 255 LIGD_PSEPA 1.0000 305 -NODG_RHIME 1.0000 245 RIDH_KLEAE 1.0000 249 -YINL_LISMO 1.0000 248 YRTP_BACSU 1.0000 238 -CSGA_MYXXA 1.0000 166 DHB2_HUMAN 1.0000 387 -DHB3_HUMAN 1.0000 310 DHCA_HUMAN 1.0000 276 -FABI_ECOLI 1.0000 262 FVT1_HUMAN 1.0000 332 -HMTR_LEIMA 1.0000 287 MAS1_AGRRA 1.0000 476 -PCR_PEA 1.0000 399 RFBB_NEIGO 1.0000 346 -YURA_MYXXA 1.0000 258 -******************************************************************************** - -******************************************************************************** -COMMAND LINE SUMMARY -******************************************************************************** -This information can also be useful in the event you wish to report a -problem with the MEME software. - -command: meme adh.s -mod oops -protein -nmotifs 2 - -model: mod= oops nmotifs= 2 evt= inf -object function= E-value of product of p-values -width: minw= 8 maxw= 50 minic= 0.00 -width: wg= 11 ws= 1 endgaps= yes -nsites: minsites= 33 maxsites= 33 wnsites= 0.8 -theta: prob= 1 spmap= pam spfuzz= 120 -em: prior= dmix b= 0 maxiter= 50 - distance= 1e-05 -data: n= 9996 N= 33 - -sample: seed= 0 seqfrac= 1 -Dirichlet mixture priors file: prior30.plib -Letter frequencies in dataset: -A 0.111 C 0.012 D 0.050 E 0.055 F 0.036 G 0.090 H 0.018 I 0.057 K 0.052 -L 0.092 M 0.027 N 0.041 P 0.041 Q 0.029 R 0.049 S 0.064 T 0.057 V 0.083 -W 0.010 Y 0.027 -Background letter frequencies (from dataset with add-one prior applied): -A 0.111 C 0.012 D 0.050 E 0.055 F 0.036 G 0.090 H 0.018 I 0.057 K 0.052 -L 0.092 M 0.027 N 0.041 P 0.041 Q 0.029 R 0.049 S 0.064 T 0.057 V 0.083 -W 0.010 Y 0.027 -******************************************************************************** - - -******************************************************************************** -MOTIF 1 width = 29 sites = 33 llr = 1118 E-value = 3.6e-165 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 1 Description --------------------------------------------------------------------------------- -Simplified A :162:56112:1:215:::4:::::::11 -pos.-specific C :1:::::::::::1:::::::::::1:1: -probability D ::::::::1:::1::::2::::::::::: -matrix E ::::::::1:::21:::5:111::::::: - F :::::31:::2:::::1:::::::::::: - G :2:::13::4:::1:1:::11:5:1:11: - H :::::::::1::1:::1::::1::::::: - I :::::::11:::::::::1::::5:2:13 - K ::::9:::1:::2:::1::121::1:1:: - L :::::::31:6:1:5:215::1:::::12 - M ::1::::1:2:1::2:1:1:::::::::: - N :::::::::1::::::::::::2:1:41: - P ::::::::::::::::::::3:::::::: - Q ::::::::::::1::11:::11::::::: - R ::::::::2:::3::22::21:1:4:1:: - S :426::1:11:213:::::1:11:::21: - T :1:2::::1::3:1::::::11::1:11: - V :::::::41::2:1::1:1::::316123 - W :::::1::::::::::::::::::::::: - Y 9:::::::::::::::::2::2::::::: - - bits 6.7 - 6.0 - 5.4 - 4.7 * -Information 4.0 * * -content 3.4 * * -(48.9 bits) 2.7 * ** * - 2.0 * **** * * * * * - 1.3 ******** ******* ** * ***** * - 0.7 ***************************** - 0.0 ----------------------------- - -Multilevel YSASKAAVxGLTRSLAxELAPxGIRVNVV -consensus FGL FSE MR D V I -sequence - - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Start P-value Site -------------- ----- --------- ----------------------------- -YRTP_BACSU 155 1.64e-22 GQRGAAVTSA YSASKFAVLGLTESLMQEVRKHNIRVSAL TPSTVASDMS -AP27_MOUSE 149 6.32e-22 AHVTFPNLIT YSSTKGAMTMLTKAMAMELGPHKIRVNSV NPTVVLTDMG -NODG_RHIME 152 1.13e-21 GAIGNPGQTN YCASKAGMIGFSKSLAQEIATRNITVNCV APGFIESAMT -BUDC_KLETE 152 4.04e-21 GHVGNPELAV YSSSKFAVRGLTQTAARDLAPLGITVNGF CPGIVKTPMW -FIXR_BRAJA 189 6.12e-21 SRVHPFAGSA YATSKAALASLTRELAHDYAPHGIRVNAI APGEIRTDML -DHGB_BACME 160 7.52e-20 WKIPWPLFVH YAASKGGMKLMTETLALEYAPKGIRVNNI GPGAINTPIN -HMTR_LEIMA 193 3.35e-19 TNQPLLGYTI YTMAKGALEGLTRSAALELAPLQIRVNGV GPGLSVLVDD -YURA_MYXXA 160 4.82e-19 AGFRGLPATR YSASKAFLSTFMESLRVDLRGTGVRVTCI YPGFVKSELT -GUTD_ECOLI 154 4.82e-19 GKVGSKHNSG YSAAKFGGVGLTQSLALDLAEYGITVHSL MLGNLLKSPM -2BHD_STREX 152 1.11e-18 GLMGLALTSS YGASKWGVRGLSKLAAVELGTDRIRVNSV HPGMTYTPMT -HDHA_ECOLI 159 1.25e-18 AENKNINMTS YASSKAAASHLVRNMAFDLGEKNIRVNGI APGAILTDAL -DHB2_HUMAN 232 2.23e-18 GGAPMERLAS YGSSKAAVTMFSSVMRLELSKWGIKVASI QPGGFLTNIA -DHMA_FLAS1 165 5.53e-18 SFMAEPEAAA YVAAKGGVAMLTRAMAVDLARHGILVNMI APGPVDVTGN -HDE_CANTR 467 9.65e-18 GIYGNFGQAN YSSSKAGILGLSKTMAIEGAKNNIKVNIV APHAETAMTL -FVT1_HUMAN 186 2.86e-17 GQLGLFGFTA YSASKFAIRGLAEALQMEVKPYNVYITVA YPPDTDTPGF -BDH_HUMAN 208 8.20e-17 GRMANPARSP YCITKFGVEAFSDCLRYEMYPLGVKVSVV EPGNFIAATS -RIDH_KLEAE 160 9.09e-17 GVVPVIWEPV YTASKFAVQAFVHTTRRQVAQYGVRVGAV LPGPVVTALL -DHES_HUMAN 155 1.37e-16 GLMGLPFNDV YCASKFALEGLCESLAVLLLPFGVHLSLI ECGPVHTAFM -BA72_EUBSP 157 2.52e-16 GIFGSLSGVG YPASKASVIGLTHGLGREIIRKNIRVVGV APGVVNTDMT -LIGD_PSEPA 157 1.21e-15 GFMGSALAGP YSAAKAASINLMEGYRQGLEKYGIGVSVC TPANIKSNIA -DHII_HUMAN 183 1.61e-15 GKVAYPMVAA YSASKFALDGFFSSIRKEYSVSRVNVSIT LCVLGLIDTE -ENTA_ECOLI 144 1.77e-15 AHTPRIGMSA YGASKAALKSLALSVGLELAGSGVRCNVV SPGSTDTDMQ -3BHD_COMTE 151 7.81e-15 SWLPIEQYAG YSASKAAVSALTRAAALSCRKQGYAIRVN SIHPDGIYTP -DHB3_HUMAN 198 8.55e-15 ALFPWPLYSM YSASKAFVCAFSKALQEEYKAKEVIIQVL TPYAVSTAMT -RFBB_NEIGO 165 1.47e-14 ETTPYAPSSP YSASKAAADHLVRAWQRTYRLPSIVSNCS NNYGPRQFPE -YINL_LISMO 154 3.24e-14 GLKAYPGGAV YGATKWAVRDLMEVLRMESAQEGTNIRTA TIYPAAINTE -BPHB_PSEPS 153 1.80e-12 GFYPNGGGPL YTAAKQAIVGLVRELAFELAPYVRVNGVG PGGMNSDMRG -CSGA_MYXXA 88 2.10e-12 AANTDGGAYA YRMSKAALNMAVRSMSTDLRPEGFVTVLL HPGWVQTDMG -FABI_ECOLI 159 4.15e-12 AERAIPNYNV MGLAKASLEANVRYMANAMGPEGVRVNAI SAGPIRTLAA -ADH_DROME 152 5.20e-12 GFNAIYQVPV YSGTKAAVVNFTSSLAKLAPITGVTAYTV NPGITRTTLV -DHCA_HUMAN 193 4.80e-10 HQKEGWPSSA YGVTKIGVTVLSRIHARKLSEQRKGDKIL LNACCPGWVR -PCR_PEA 26 2.77e-08 PKEGKIGASL KDSTLFGVSSLSDSLKGDFTSSALRCKEL RQKVGAVRAE -MAS1_AGRRA 349 5.72e-08 VDYQRFHLQW YINCVAPLRMTELCLPHLYETGSGRIVNI NSMSGQRVLN --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -YRTP_BACSU 1.6e-22 154_[1]_55 -AP27_MOUSE 6.3e-22 148_[1]_67 -NODG_RHIME 1.1e-21 151_[1]_65 -BUDC_KLETE 4e-21 151_[1]_61 -FIXR_BRAJA 6.1e-21 188_[1]_61 -DHGB_BACME 7.5e-20 159_[1]_74 -HMTR_LEIMA 3.4e-19 192_[1]_66 -YURA_MYXXA 4.8e-19 159_[1]_70 -GUTD_ECOLI 4.8e-19 153_[1]_77 -2BHD_STREX 1.1e-18 151_[1]_75 -HDHA_ECOLI 1.2e-18 158_[1]_68 -DHB2_HUMAN 2.2e-18 231_[1]_127 -DHMA_FLAS1 5.5e-18 164_[1]_77 -HDE_CANTR 9.7e-18 466_[1]_411 -FVT1_HUMAN 2.9e-17 185_[1]_118 -BDH_HUMAN 8.2e-17 207_[1]_107 -RIDH_KLEAE 9.1e-17 159_[1]_61 -DHES_HUMAN 1.4e-16 154_[1]_144 -BA72_EUBSP 2.5e-16 156_[1]_64 -LIGD_PSEPA 1.2e-15 156_[1]_120 -DHII_HUMAN 1.6e-15 182_[1]_81 -ENTA_ECOLI 1.8e-15 143_[1]_76 -3BHD_COMTE 7.8e-15 150_[1]_74 -DHB3_HUMAN 8.6e-15 197_[1]_84 -RFBB_NEIGO 1.5e-14 164_[1]_153 -YINL_LISMO 3.2e-14 153_[1]_66 -BPHB_PSEPS 1.8e-12 152_[1]_94 -CSGA_MYXXA 2.1e-12 87_[1]_50 -FABI_ECOLI 4.2e-12 158_[1]_75 -ADH_DROME 5.2e-12 151_[1]_75 -DHCA_HUMAN 4.8e-10 192_[1]_55 -PCR_PEA 2.8e-08 25_[1]_345 -MAS1_AGRRA 5.7e-08 348_[1]_99 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 1 width=29 seqs=33 -YRTP_BACSU ( 155) YSASKFAVLGLTESLMQEVRKHNIRVSAL 1 -AP27_MOUSE ( 149) YSSTKGAMTMLTKAMAMELGPHKIRVNSV 1 -NODG_RHIME ( 152) YCASKAGMIGFSKSLAQEIATRNITVNCV 1 -BUDC_KLETE ( 152) YSSSKFAVRGLTQTAARDLAPLGITVNGF 1 -FIXR_BRAJA ( 189) YATSKAALASLTRELAHDYAPHGIRVNAI 1 -DHGB_BACME ( 160) YAASKGGMKLMTETLALEYAPKGIRVNNI 1 -HMTR_LEIMA ( 193) YTMAKGALEGLTRSAALELAPLQIRVNGV 1 -YURA_MYXXA ( 160) YSASKAFLSTFMESLRVDLRGTGVRVTCI 1 -GUTD_ECOLI ( 154) YSAAKFGGVGLTQSLALDLAEYGITVHSL 1 -2BHD_STREX ( 152) YGASKWGVRGLSKLAAVELGTDRIRVNSV 1 -HDHA_ECOLI ( 159) YASSKAAASHLVRNMAFDLGEKNIRVNGI 1 -DHB2_HUMAN ( 232) YGSSKAAVTMFSSVMRLELSKWGIKVASI 1 -DHMA_FLAS1 ( 165) YVAAKGGVAMLTRAMAVDLARHGILVNMI 1 -HDE_CANTR ( 467) YSSSKAGILGLSKTMAIEGAKNNIKVNIV 1 -FVT1_HUMAN ( 186) YSASKFAIRGLAEALQMEVKPYNVYITVA 1 -BDH_HUMAN ( 208) YCITKFGVEAFSDCLRYEMYPLGVKVSVV 1 -RIDH_KLEAE ( 160) YTASKFAVQAFVHTTRRQVAQYGVRVGAV 1 -DHES_HUMAN ( 155) YCASKFALEGLCESLAVLLLPFGVHLSLI 1 -BA72_EUBSP ( 157) YPASKASVIGLTHGLGREIIRKNIRVVGV 1 -LIGD_PSEPA ( 157) YSAAKAASINLMEGYRQGLEKYGIGVSVC 1 -DHII_HUMAN ( 183) YSASKFALDGFFSSIRKEYSVSRVNVSIT 1 -ENTA_ECOLI ( 144) YGASKAALKSLALSVGLELAGSGVRCNVV 1 -3BHD_COMTE ( 151) YSASKAAVSALTRAAALSCRKQGYAIRVN 1 -DHB3_HUMAN ( 198) YSASKAFVCAFSKALQEEYKAKEVIIQVL 1 -RFBB_NEIGO ( 165) YSASKAAADHLVRAWQRTYRLPSIVSNCS 1 -YINL_LISMO ( 154) YGATKWAVRDLMEVLRMESAQEGTNIRTA 1 -BPHB_PSEPS ( 153) YTAAKQAIVGLVRELAFELAPYVRVNGVG 1 -CSGA_MYXXA ( 88) YRMSKAALNMAVRSMSTDLRPEGFVTVLL 1 -FABI_ECOLI ( 159) MGLAKASLEANVRYMANAMGPEGVRVNAI 1 -ADH_DROME ( 152) YSGTKAAVVNFTSSLAKLAPITGVTAYTV 1 -DHCA_HUMAN ( 193) YGVTKIGVTVLSRIHARKLSEQRKGDKIL 1 -PCR_PEA ( 26) KDSTLFGVSSLSDSLKGDFTSSALRCKEL 1 -MAS1_AGRRA ( 349) YINCVAPLRMTELCLPHLYETGSGRIVNI 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 20 w= 29 n= 9072 bayes= 8.09755 E= 3.6e-165 - -716 -497 -698 -691 -172 -730 -304 -562 -80 -507 12 -550 -661 -513 -563 -605 -625 -609 -217 508 - -51 240 -90 -249 -334 53 -232 -106 -219 -337 -278 -209 -72 -192 -79 276 81 -153 -332 -306 - 257 -64 -390 -343 -320 -160 -326 -144 -343 -193 26 -120 -436 -313 -349 86 -117 -123 -309 -358 - 57 116 -511 -541 -504 -420 -453 -532 -481 -549 -470 -353 -434 -438 -470 325 161 -493 -485 -492 - -433 -317 -482 -429 -524 -520 -342 -409 413 -326 -416 -349 -435 -343 -57 -433 -392 -321 -375 -453 - 205 -259 -559 -505 282 34 -356 -70 -475 -333 -299 -445 -475 -1 -468 -326 -334 -312 255 -226 - 235 -273 -630 -622 68 154 -557 -603 -636 -616 -544 -517 -44 -518 -595 5 -371 -482 -571 -615 - -80 -195 -498 -421 -234 -161 -307 78 -389 146 165 -378 -410 -325 -384 -96 -250 223 -275 -265 - -75 107 27 110 -355 -358 -150 45 46 -69 -279 -14 -296 44 145 85 61 -5 -331 -279 - 33 -283 -54 -168 -321 176 159 -291 -150 -137 223 56 -315 -120 -208 49 -61 -125 -319 -277 - -215 -277 -532 -447 217 -558 -360 -167 -423 288 23 -95 -427 -325 -396 -413 -122 -250 -311 -327 - -89 117 -466 -101 -26 -476 -318 -198 -376 -255 156 -325 -415 -315 -380 173 259 99 -304 -300 - -295 -344 26 181 -386 -383 160 -385 151 -69 -304 -196 -322 112 242 49 -236 -378 -354 -305 - 56 211 -348 6 -258 -72 -244 -52 -256 -112 -197 -40 -364 -217 -294 224 101 -30 -286 10 - -3 -202 -498 -419 -224 -471 54 -37 -388 234 272 -382 -407 -317 -379 -325 -74 -85 143 8 - 204 -306 -264 -189 -396 -69 -189 -390 -14 -378 3 -217 -51 156 193 -72 -251 -369 -361 -322 - -277 -280 -236 -39 57 -152 158 -80 39 79 153 -21 -313 156 145 -204 -60 39 -315 8 - -164 -464 216 305 -496 -158 -268 -472 -47 -16 -396 -277 -379 31 -306 -90 -81 -452 -469 -411 - -157 123 -496 -418 -13 -163 -303 31 -385 220 113 -376 -406 -318 -379 -98 -251 25 -270 253 - 149 -311 -209 33 -354 22 -151 -97 45 -149 -278 -172 -46 -92 145 50 -59 -348 -330 5 - -142 -318 -208 76 -356 -68 -151 -97 147 -150 -280 -172 276 112 34 -61 60 -140 -332 -280 - -268 -317 -47 77 -38 -150 250 -359 121 -18 -279 -15 -46 112 -37 50 13 -352 139 222 - -156 -338 -229 -48 -391 239 -183 -396 -30 -386 -318 190 -329 27 74 -6 -247 -151 -356 -313 - -372 -253 -410 -399 -123 -293 -366 345 -197 -52 -102 -354 -429 -337 -203 -342 -171 118 -313 -110 - -144 -290 -233 -162 -327 -68 73 -84 85 -139 -256 56 -312 -113 285 -203 97 1 -321 8 - -146 94 -193 -329 -285 -443 -265 73 -337 -177 -228 -168 -320 -310 -312 -198 -113 308 -344 -389 - -142 -319 -207 -138 -356 -68 75 -360 46 -347 -280 302 -296 44 34 113 13 -5 -332 6 - 6 273 -480 -92 -230 26 -297 77 -371 -32 33 44 -403 -310 -372 80 13 130 -270 -259 - -80 123 -491 -416 -13 -162 -302 217 -383 93 -169 -51 -405 -320 -379 -96 -66 178 -270 -260 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 20 w= 29 nsites= 33 E= 3.6e-165 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.939394 - 0.090909 0.090909 0.030303 0.000000 0.000000 0.181818 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.030303 0.393939 0.090909 0.030303 0.000000 0.000000 - 0.575758 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.030303 0.000000 0.030303 0.060606 0.030303 0.000000 0.000000 0.000000 0.181818 0.030303 0.030303 0.000000 0.000000 - 0.181818 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.606061 0.181818 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.939394 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 - 0.484848 0.000000 0.000000 0.000000 0.272727 0.121212 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.060606 0.000000 - 0.575758 0.000000 0.000000 0.000000 0.060606 0.272727 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.060606 0.000000 0.000000 0.000000 0.000000 - 0.060606 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.090909 0.000000 0.272727 0.090909 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.424242 0.000000 0.000000 - 0.060606 0.030303 0.060606 0.121212 0.000000 0.000000 0.000000 0.090909 0.060606 0.060606 0.000000 0.030303 0.000000 0.030303 0.151515 0.121212 0.090909 0.090909 0.000000 0.000000 - 0.151515 0.000000 0.030303 0.000000 0.000000 0.363636 0.060606 0.000000 0.000000 0.030303 0.151515 0.060606 0.000000 0.000000 0.000000 0.090909 0.030303 0.030303 0.000000 0.000000 - 0.030303 0.000000 0.000000 0.000000 0.242424 0.000000 0.000000 0.000000 0.000000 0.636364 0.030303 0.030303 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 - 0.060606 0.030303 0.000000 0.030303 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.090909 0.000000 0.000000 0.000000 0.000000 0.242424 0.333333 0.181818 0.000000 0.000000 - 0.000000 0.000000 0.060606 0.212121 0.000000 0.000000 0.060606 0.000000 0.151515 0.060606 0.000000 0.000000 0.000000 0.060606 0.303030 0.090909 0.000000 0.000000 0.000000 0.000000 - 0.181818 0.060606 0.000000 0.060606 0.000000 0.060606 0.000000 0.030303 0.000000 0.030303 0.000000 0.030303 0.000000 0.000000 0.000000 0.333333 0.121212 0.060606 0.000000 0.030303 - 0.121212 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.030303 0.000000 0.484848 0.212121 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.030303 0.030303 0.030303 - 0.515152 0.000000 0.000000 0.000000 0.000000 0.060606 0.000000 0.000000 0.030303 0.000000 0.030303 0.000000 0.030303 0.090909 0.212121 0.030303 0.000000 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.030303 0.060606 0.030303 0.060606 0.030303 0.060606 0.181818 0.090909 0.030303 0.000000 0.090909 0.151515 0.000000 0.030303 0.121212 0.000000 0.030303 - 0.030303 0.000000 0.242424 0.484848 0.000000 0.030303 0.000000 0.000000 0.030303 0.090909 0.000000 0.000000 0.000000 0.030303 0.000000 0.030303 0.030303 0.000000 0.000000 0.000000 - 0.030303 0.030303 0.000000 0.000000 0.030303 0.030303 0.000000 0.060606 0.000000 0.454545 0.060606 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.090909 0.000000 0.181818 - 0.363636 0.000000 0.000000 0.060606 0.000000 0.121212 0.000000 0.030303 0.060606 0.030303 0.000000 0.000000 0.030303 0.000000 0.151515 0.090909 0.030303 0.000000 0.000000 0.030303 - 0.030303 0.000000 0.000000 0.090909 0.000000 0.060606 0.000000 0.030303 0.151515 0.030303 0.000000 0.000000 0.333333 0.060606 0.060606 0.030303 0.090909 0.030303 0.000000 0.000000 - 0.000000 0.000000 0.030303 0.090909 0.030303 0.030303 0.121212 0.000000 0.121212 0.090909 0.000000 0.030303 0.030303 0.060606 0.030303 0.090909 0.060606 0.000000 0.030303 0.151515 - 0.030303 0.000000 0.000000 0.030303 0.000000 0.515152 0.000000 0.000000 0.030303 0.000000 0.000000 0.181818 0.000000 0.030303 0.090909 0.060606 0.000000 0.030303 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.030303 0.030303 0.000000 0.484848 0.030303 0.030303 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.030303 0.303030 0.000000 0.030303 - 0.030303 0.000000 0.000000 0.000000 0.000000 0.060606 0.030303 0.030303 0.090909 0.030303 0.000000 0.060606 0.000000 0.000000 0.424242 0.000000 0.121212 0.090909 0.000000 0.030303 - 0.030303 0.060606 0.030303 0.000000 0.000000 0.000000 0.000000 0.151515 0.000000 0.030303 0.000000 0.030303 0.000000 0.000000 0.000000 0.030303 0.030303 0.606061 0.000000 0.000000 - 0.030303 0.000000 0.000000 0.000000 0.000000 0.060606 0.030303 0.000000 0.060606 0.000000 0.000000 0.393939 0.000000 0.030303 0.060606 0.151515 0.060606 0.090909 0.000000 0.030303 - 0.121212 0.090909 0.000000 0.030303 0.000000 0.121212 0.000000 0.090909 0.000000 0.060606 0.030303 0.060606 0.000000 0.000000 0.000000 0.121212 0.060606 0.212121 0.000000 0.000000 - 0.060606 0.030303 0.000000 0.000000 0.030303 0.030303 0.000000 0.272727 0.000000 0.181818 0.000000 0.030303 0.000000 0.000000 0.000000 0.030303 0.030303 0.303030 0.000000 0.000000 --------------------------------------------------------------------------------- - - - - - -Time 36.66 secs. - -******************************************************************************** - - -******************************************************************************** -MOTIF 2 width = 29 sites = 33 llr = 1106 E-value = 2.3e-159 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 2 Description --------------------------------------------------------------------------------- -Simplified A :14::::531:1:1516:2:322:51111 -pos.-specific C :::::::1::::::::1:::::::::::: -probability D ::::::::1:::::1:::1:::1:::::: -matrix E ::::::::::::::2::11::22:::::: - F :::::::::::::1:::::2::::1::1: - G ::::::a4218:9:::::1::1:8:1:11 - H :::::::::::::1:::11:1:1::1::: - I ::124::::::5:::2:1::1:::::1:1 - K 61:::::::2:::2:::3:::21:13::: - L :::51::::::3:2:2:1151:1:111:3 - M :::::::::::::::1::::::1:::::: - N :1::::::1:::::::::::::1::1::: - P 1:::::::::::::::::::::::::::: - Q 1::::::::1:::::::::::11:::::: - R :1:::::::2:::2:::21::111:1::: - S :::::::123::1:1:1:::111:1111: - T 111::9::::1::::2::1::1:::::1: - V :4425::::::1:::12:1:1:::::553 - W ::::::::::::::::::::::::::::: - Y :::::::::::::::1::::::::1:::: - - bits 6.7 - 6.0 - 5.4 - 4.7 -Information 4.0 -content 3.4 ** -(48.4 bits) 2.7 *** * * - 2.0 * ****** *** * * * - 1.3 ****************** * * ** *** - 0.7 ***************************** - 0.0 ----------------------------- - -Multilevel KVALVTGAASGIGKATAKxLAAEGAKVVL -consensus VII GG L R I F K V -sequence S - - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Start P-value Site -------------- ----- --------- ----------------------------- -HDE_CANTR 323 2.44e-23 SGAPTVSLKD KVVLITGAGAGLGKEYAKWFAKYGAKVVV NDFKDATKTV -DHII_HUMAN 35 5.50e-23 EEFRPEMLQG KKVIVTGASKGIGREMAYHLAKMGAHVVV TARSKETLQK -YINL_LISMO 6 5.38e-22 MTIKN KVIIITGASSGIGKATALLLAEKGAKLVL AARRVEKLEK -HDHA_ECOLI 12 5.65e-20 FNSDNLRLDG KCAIITGAGAGIGKEIAITFATAGASVVV SDINADAANH -RIDH_KLEAE 15 1.17e-19 VSSMNTSLSG KVAAITGAASGIGLECARTLLGAGAKVVL IDREGEKLNK -BUDC_KLETE 3 1.17e-19 MQ KVALVTGAGQGIGKAIALRLVKDGFAVAI ADYNDATATA -ENTA_ECOLI 6 4.74e-19 MDFSG KNVWVTGAGKGIGYATALAFVEAGAKVTG FDQAFTQEQY -AP27_MOUSE 8 9.31e-19 MKLNFSG LRALVTGAGKGIGRDTVKALHASGAKVVA VTRTNSDLVS -DHMA_FLAS1 15 2.50e-18 VSRRPGRLAG KAAIVTGAAGGIGRATVEAYLREGASVVA MDLAPRLAAT -YRTP_BACSU 7 3.45e-18 MQSLQH KTALITGGGRGIGRATALALAKEGVNIGL IGRTSANVEK -DHGB_BACME 8 5.86e-18 MYKDLEG KVVVITGSSTGLGKSMAIRFATEKAKVVV NYRSKEDEAN -DHB3_HUMAN 49 9.86e-18 LPKSFLRSMG QWAVITGAGDGIGKAYSFELAKRGLNVVL ISRTLEKLEA -PCR_PEA 87 2.47e-17 SSEGKKTLRK GNVVITGASSGLGLATAKALAESGKWHVI MACRDYLKAA -BDH_HUMAN 56 3.01e-17 YASAAEPVGS KAVLVTGCDSGFGFSLAKHLHSKGFLVFA GCLMKDKGHD -BA72_EUBSP 7 3.33e-17 MNLVQD KVTIITGGTRGIGFAAAKIFIDNGAKVSI FGETQEEVDT -FIXR_BRAJA 37 4.06e-17 VNARVDRGEP KVMLLTGASRGIGHATAKLFSEAGWRIIS CARQPFDGER -3BHD_COMTE 7 4.06e-17 TNRLQG KVALVTGGASGVGLEVVKLLLGEGAKVAF SDINEAAGQQ -2BHD_STREX 7 8.05e-17 MNDLSG KTVIITGGARGLGAEAARQAVAAGARVVL ADVLDEEGAA -HMTR_LEIMA 7 1.90e-16 MTAPTV PVALVTGAAKRLGRSIAEGLHAEGYAVCL HYHRSAAEAN -FVT1_HUMAN 33 2.77e-16 ISPKPLALPG AHVVVTGGSSGIGKCIAIECYKQGAFITL VARNEDKLLQ -DHB2_HUMAN 83 3.65e-16 SGQELLPVDQ KAVLVTGGDCGLGHALCKYLDELGFTVFA GVLNENGPGA -LIGD_PSEPA 7 8.31e-16 MKDFQD QVAFITGGASGAGFGQAKVFGQAGAKIVV ADVRAEAVEK -NODG_RHIME 7 4.05e-15 MFELTG RKALVTGASGAIGGAIARVLHAQGAIVGL HGTQIEKLET -DHCA_HUMAN 5 5.24e-15 SSGI HVALVTGGNKGIGLAIVRDLCRLFSGDVV LTARDVTRGQ -MAS1_AGRRA 246 3.00e-14 SHWTVEIHQS PVILVSGSNRGVGKAIAEDLIAHGYRLSL GARKVKDLEV -BPHB_PSEPS 6 8.47e-14 MKLKG EAVLITGGASGLGRALVDRFVAEAKVAVL DKSAERLAEL -GUTD_ECOLI 3 1.46e-13 MN QVAVVIGGGQTLGAFLCHGLAAEGYRVAV VDIQSDKAAN -DHES_HUMAN 3 1.46e-13 AR TVVLITGCSSGIGLHLAVRLASDPSQSFK VYATLRDLKT -RFBB_NEIGO 7 1.59e-12 MQTEGK KNILVTGGAGFIGSAVVRHIIQNTRDSVV NLDKLTYAGN -ADH_DROME 7 6.97e-10 SFTLTN KNVIFVAGLGGIGLDTSKELLKRDLKNLV ILDRIENPAA -FABI_ECOLI 7 3.15e-09 MGFLSG KRILVTGVASKLSIAYGIAQAMHREGAEL AFTYQNDKLK -YURA_MYXXA 117 2.77e-07 RLPWERVRGI IDTNVTGAAATLSAVLPQMVERKRGHLVG VSSLAGFRGL -CSGA_MYXXA 52 4.24e-07 TINALGPLRV TSAMLPGLRQGALRRVAHVTSRMGSLAAN TDGGAYAYRM --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -HDE_CANTR 2.4e-23 322_[2]_555 -DHII_HUMAN 5.5e-23 34_[2]_229 -YINL_LISMO 5.4e-22 5_[2]_214 -HDHA_ECOLI 5.7e-20 11_[2]_215 -RIDH_KLEAE 1.2e-19 14_[2]_206 -BUDC_KLETE 1.2e-19 2_[2]_210 -ENTA_ECOLI 4.7e-19 5_[2]_214 -AP27_MOUSE 9.3e-19 7_[2]_208 -DHMA_FLAS1 2.5e-18 14_[2]_227 -YRTP_BACSU 3.4e-18 6_[2]_203 -DHGB_BACME 5.9e-18 7_[2]_226 -DHB3_HUMAN 9.9e-18 48_[2]_233 -PCR_PEA 2.5e-17 86_[2]_284 -BDH_HUMAN 3e-17 55_[2]_259 -BA72_EUBSP 3.3e-17 6_[2]_214 -FIXR_BRAJA 4.1e-17 36_[2]_213 -3BHD_COMTE 4.1e-17 6_[2]_218 -2BHD_STREX 8e-17 6_[2]_220 -HMTR_LEIMA 1.9e-16 6_[2]_252 -FVT1_HUMAN 2.8e-16 32_[2]_271 -DHB2_HUMAN 3.7e-16 82_[2]_276 -LIGD_PSEPA 8.3e-16 6_[2]_270 -NODG_RHIME 4.1e-15 6_[2]_210 -DHCA_HUMAN 5.2e-15 4_[2]_243 -MAS1_AGRRA 3e-14 245_[2]_202 -BPHB_PSEPS 8.5e-14 5_[2]_241 -GUTD_ECOLI 1.5e-13 2_[2]_228 -DHES_HUMAN 1.5e-13 2_[2]_296 -RFBB_NEIGO 1.6e-12 6_[2]_311 -ADH_DROME 7e-10 6_[2]_220 -FABI_ECOLI 3.1e-09 6_[2]_227 -YURA_MYXXA 2.8e-07 116_[2]_113 -CSGA_MYXXA 4.2e-07 51_[2]_86 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 2 width=29 seqs=33 -HDE_CANTR ( 323) KVVLITGAGAGLGKEYAKWFAKYGAKVVV 1 -DHII_HUMAN ( 35) KKVIVTGASKGIGREMAYHLAKMGAHVVV 1 -YINL_LISMO ( 6) KVIIITGASSGIGKATALLLAEKGAKLVL 1 -HDHA_ECOLI ( 12) KCAIITGAGAGIGKEIAITFATAGASVVV 1 -RIDH_KLEAE ( 15) KVAAITGAASGIGLECARTLLGAGAKVVL 1 -BUDC_KLETE ( 3) KVALVTGAGQGIGKAIALRLVKDGFAVAI 1 -ENTA_ECOLI ( 6) KNVWVTGAGKGIGYATALAFVEAGAKVTG 1 -AP27_MOUSE ( 8) LRALVTGAGKGIGRDTVKALHASGAKVVA 1 -DHMA_FLAS1 ( 15) KAAIVTGAAGGIGRATVEAYLREGASVVA 1 -YRTP_BACSU ( 7) KTALITGGGRGIGRATALALAKEGVNIGL 1 -DHGB_BACME ( 8) KVVVITGSSTGLGKSMAIRFATEKAKVVV 1 -DHB3_HUMAN ( 49) QWAVITGAGDGIGKAYSFELAKRGLNVVL 1 -PCR_PEA ( 87) GNVVITGASSGLGLATAKALAESGKWHVI 1 -BDH_HUMAN ( 56) KAVLVTGCDSGFGFSLAKHLHSKGFLVFA 1 -BA72_EUBSP ( 7) KVTIITGGTRGIGFAAAKIFIDNGAKVSI 1 -FIXR_BRAJA ( 37) KVMLLTGASRGIGHATAKLFSEAGWRIIS 1 -3BHD_COMTE ( 7) KVALVTGGASGVGLEVVKLLLGEGAKVAF 1 -2BHD_STREX ( 7) KTVIITGGARGLGAEAARQAVAAGARVVL 1 -HMTR_LEIMA ( 7) PVALVTGAAKRLGRSIAEGLHAEGYAVCL 1 -FVT1_HUMAN ( 33) AHVVVTGGSSGIGKCIAIECYKQGAFITL 1 -DHB2_HUMAN ( 83) KAVLVTGGDCGLGHALCKYLDELGFTVFA 1 -LIGD_PSEPA ( 7) QVAFITGGASGAGFGQAKVFGQAGAKIVV 1 -NODG_RHIME ( 7) RKALVTGASGAIGGAIARVLHAQGAIVGL 1 -DHCA_HUMAN ( 5) HVALVTGGNKGIGLAIVRDLCRLFSGDVV 1 -MAS1_AGRRA ( 246) PVILVSGSNRGVGKAIAEDLIAHGYRLSL 1 -BPHB_PSEPS ( 6) EAVLITGGASGLGRALVDRFVAEAKVAVL 1 -GUTD_ECOLI ( 3) QVAVVIGGGQTLGAFLCHGLAAEGYRVAV 1 -DHES_HUMAN ( 3) TVVLITGCSSGIGLHLAVRLASDPSQSFK 1 -RFBB_NEIGO ( 7) KNILVTGGAGFIGSAVVRHIIQNTRDSVV 1 -ADH_DROME ( 7) KNVIFVAGLGGIGLDTSKELLKRDLKNLV 1 -FABI_ECOLI ( 7) KRILVTGVASKLSIAYGIAQAMHREGAEL 1 -YURA_MYXXA ( 117) IDTNVTGAAATLSAVLPQMVERKRGHLVG 1 -CSGA_MYXXA ( 52) TSAMLPGLRQGALRRVAHVTSRMGSLAAN 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 20 w= 29 n= 9072 bayes= 8.09755 E= 2.3e-159 - -143 -322 -212 -29 -360 -150 75 -97 323 -150 -283 -174 41 159 -35 -190 13 -356 -334 -283 - 0 103 -55 -150 -343 -365 68 -277 37 -328 -270 137 -300 -104 27 -68 8 216 134 -284 - 184 -328 -685 -628 -415 -668 -559 121 -607 -334 17 -587 -610 -559 -621 -535 7 212 -519 -487 - -171 -214 -506 -425 -20 -485 -317 170 -395 240 30 -63 -412 -320 -383 -339 -272 73 137 -278 - -548 -378 -767 -727 -25 -789 -710 277 -719 -45 -344 -695 -699 -669 -749 -678 -480 260 -602 -574 - -390 -251 -433 -462 -434 -534 -365 -109 -376 -462 -320 -258 -80 -318 -390 -26 386 -153 -406 -454 - -198 -345 -337 -398 -504 336 -361 -523 -381 -566 -470 -293 -442 -394 -385 -308 -423 -506 -397 -449 - 205 225 -616 -594 -556 193 -536 -552 -603 -163 -504 -510 -489 -501 -571 4 -361 -139 -540 -583 - 131 -370 35 -219 -442 128 -213 -456 -212 -158 -382 70 -370 -181 -55 181 -71 -445 -421 -359 - -30 108 -48 -141 -360 23 -153 -364 148 -351 -284 -173 -299 159 146 204 -60 -357 -335 -283 - -204 -337 -316 -378 -112 323 -341 -505 -141 -549 -452 -272 -427 -375 -136 -296 -74 -492 -378 -430 - -95 -361 -677 -632 -23 -695 -554 323 -604 166 -255 -593 -615 -536 -614 -563 -440 -15 -461 -462 - -326 -337 -324 -385 -492 331 -348 -511 -368 -235 -458 -280 -431 -381 -372 -66 -413 -495 -385 -437 - -30 -269 -258 -187 114 -153 158 -74 206 82 -239 -214 -326 -135 191 -71 -224 -281 -312 9 - 237 89 -43 92 -86 -158 5 -315 -262 -325 -263 -289 -394 -232 -114 27 -229 -127 -314 -338 - -79 126 -496 -418 -229 -460 -302 183 -384 95 114 -373 -406 -2 -380 -314 194 28 -269 160 - 260 176 -422 -376 -354 -156 -358 -340 -378 -355 -293 -375 -107 -342 -381 -6 -258 55 -342 -392 - -270 -314 -48 76 -39 -361 159 86 236 21 -276 -175 -299 43 146 -189 -211 -138 -330 6 - 56 -301 25 75 -338 -68 212 -90 -130 -15 8 -180 -302 41 116 -193 13 -2 141 7 - -177 105 -501 -421 238 -487 -316 -44 -391 257 -134 -393 -406 -19 -377 -340 -87 -96 -279 -4 - 151 122 -87 -90 -241 -159 248 70 -331 38 -181 -340 -395 -280 -347 -10 -242 54 -276 11 - 75 -319 -47 137 -356 -68 -150 -361 189 -347 5 -170 -296 112 117 5 13 -353 -332 -279 - 55 -318 27 179 -355 -357 159 -360 88 -69 94 59 -295 112 34 6 -209 -352 -331 6 - -204 -337 -127 -377 -112 318 -341 -505 -141 -549 -452 -272 -122 -374 -52 -296 -161 -492 -378 -430 - 225 -109 -325 -105 68 -158 -257 -288 -22 -98 -250 -283 -385 -221 -102 31 -229 -121 98 106 - -75 -318 -47 -137 -40 -68 159 -97 236 -69 -279 59 -295 44 117 6 -59 -140 140 -279 - -91 -125 -192 -330 -284 -443 -59 58 -338 -101 -228 -167 -320 -310 -312 -132 -175 304 -343 -387 - -59 39 -374 -175 10 -177 -269 -2 -342 -164 -220 -355 -327 -311 -318 -112 -56 295 -333 -366 - 6 -192 -493 -415 -11 -69 -301 78 -81 174 -170 -49 -406 -318 -378 -94 -246 162 -269 -259 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 20 w= 29 nsites= 33 E= 2.3e-159 - 0.030303 0.000000 0.000000 0.030303 0.000000 0.030303 0.030303 0.030303 0.575758 0.030303 0.000000 0.000000 0.060606 0.090909 0.030303 0.000000 0.060606 0.000000 0.000000 0.000000 - 0.121212 0.030303 0.030303 0.000000 0.000000 0.000000 0.030303 0.000000 0.060606 0.000000 0.000000 0.121212 0.000000 0.000000 0.060606 0.030303 0.060606 0.424242 0.030303 0.000000 - 0.424242 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.121212 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.060606 0.363636 0.000000 0.000000 - 0.030303 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.212121 0.000000 0.484848 0.030303 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.151515 0.030303 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.393939 0.000000 0.060606 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.515152 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.030303 0.878788 0.030303 0.000000 0.000000 - 0.030303 0.000000 0.000000 0.000000 0.000000 0.969697 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 - 0.454545 0.060606 0.000000 0.000000 0.000000 0.363636 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.060606 0.000000 0.030303 0.000000 0.000000 - 0.303030 0.000000 0.060606 0.000000 0.000000 0.242424 0.000000 0.000000 0.000000 0.030303 0.000000 0.060606 0.000000 0.000000 0.030303 0.242424 0.030303 0.000000 0.000000 0.000000 - 0.090909 0.030303 0.030303 0.000000 0.000000 0.121212 0.000000 0.000000 0.151515 0.000000 0.000000 0.000000 0.000000 0.090909 0.151515 0.303030 0.030303 0.000000 0.000000 0.000000 - 0.030303 0.000000 0.000000 0.000000 0.030303 0.818182 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.060606 0.000000 0.000000 0.000000 - 0.060606 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.545455 0.000000 0.303030 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.060606 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.909091 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.060606 0.000000 0.000000 0.000000 0.000000 - 0.090909 0.000000 0.000000 0.000000 0.090909 0.030303 0.060606 0.030303 0.242424 0.181818 0.000000 0.000000 0.000000 0.000000 0.212121 0.030303 0.000000 0.000000 0.000000 0.030303 - 0.484848 0.030303 0.060606 0.181818 0.030303 0.030303 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.090909 0.000000 0.030303 0.000000 0.000000 - 0.060606 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.212121 0.000000 0.181818 0.060606 0.000000 0.000000 0.030303 0.000000 0.000000 0.242424 0.090909 0.000000 0.090909 - 0.636364 0.060606 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.060606 0.000000 0.181818 0.000000 0.000000 - 0.000000 0.000000 0.030303 0.090909 0.030303 0.000000 0.060606 0.121212 0.303030 0.121212 0.000000 0.000000 0.000000 0.030303 0.151515 0.000000 0.000000 0.030303 0.000000 0.030303 - 0.181818 0.000000 0.060606 0.090909 0.000000 0.060606 0.090909 0.030303 0.000000 0.090909 0.030303 0.000000 0.000000 0.030303 0.121212 0.000000 0.060606 0.090909 0.030303 0.030303 - 0.030303 0.030303 0.000000 0.000000 0.242424 0.000000 0.000000 0.030303 0.000000 0.545455 0.000000 0.000000 0.000000 0.030303 0.000000 0.000000 0.030303 0.030303 0.000000 0.030303 - 0.333333 0.030303 0.030303 0.030303 0.000000 0.030303 0.121212 0.090909 0.000000 0.121212 0.000000 0.000000 0.000000 0.000000 0.000000 0.060606 0.000000 0.121212 0.000000 0.030303 - 0.212121 0.000000 0.030303 0.151515 0.000000 0.060606 0.000000 0.000000 0.212121 0.000000 0.030303 0.000000 0.000000 0.060606 0.121212 0.060606 0.060606 0.000000 0.000000 0.000000 - 0.181818 0.000000 0.060606 0.212121 0.000000 0.000000 0.060606 0.000000 0.090909 0.060606 0.060606 0.060606 0.000000 0.060606 0.060606 0.060606 0.000000 0.000000 0.000000 0.030303 - 0.030303 0.000000 0.030303 0.000000 0.030303 0.757576 0.000000 0.000000 0.030303 0.000000 0.000000 0.000000 0.030303 0.000000 0.060606 0.000000 0.030303 0.000000 0.000000 0.000000 - 0.454545 0.000000 0.000000 0.030303 0.090909 0.030303 0.000000 0.000000 0.060606 0.060606 0.000000 0.000000 0.000000 0.000000 0.030303 0.090909 0.000000 0.030303 0.030303 0.090909 - 0.060606 0.000000 0.030303 0.000000 0.030303 0.060606 0.060606 0.030303 0.303030 0.060606 0.000000 0.060606 0.000000 0.030303 0.121212 0.060606 0.030303 0.030303 0.030303 0.000000 - 0.090909 0.000000 0.030303 0.000000 0.000000 0.000000 0.030303 0.121212 0.000000 0.090909 0.000000 0.030303 0.000000 0.000000 0.000000 0.060606 0.000000 0.545455 0.000000 0.000000 - 0.121212 0.030303 0.000000 0.030303 0.090909 0.060606 0.000000 0.030303 0.000000 0.030303 0.000000 0.000000 0.000000 0.000000 0.000000 0.060606 0.060606 0.484848 0.000000 0.000000 - 0.121212 0.000000 0.000000 0.000000 0.030303 0.060606 0.000000 0.090909 0.030303 0.333333 0.000000 0.030303 0.000000 0.000000 0.000000 0.030303 0.000000 0.272727 0.000000 0.000000 --------------------------------------------------------------------------------- - - - - - -Time 67.32 secs. - -******************************************************************************** - - -******************************************************************************** -SUMMARY OF MOTIFS -******************************************************************************** - --------------------------------------------------------------------------------- - Combined block diagrams: non-overlapping sites with p-value < 0.0001 --------------------------------------------------------------------------------- -SEQUENCE NAME COMBINED P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -2BHD_STREX 3.15e-28 6_[2(8.05e-17)]_116_[1(1.11e-18)]_75 -3BHD_COMTE 9.69e-25 6_[2(4.06e-17)]_115_[1(7.81e-15)]_74 -ADH_DROME 6.95e-15 6_[2(6.97e-10)]_116_[1(5.20e-12)]_75 -AP27_MOUSE 2.21e-33 7_[2(9.31e-19)]_112_[1(6.32e-22)]_67 -BA72_EUBSP 2.62e-26 6_[2(3.33e-17)]_121_[1(2.52e-16)]_64 -BDH_HUMAN 1.58e-26 55_[2(3.01e-17)]_123_[1(8.20e-17)]_107 -BPHB_PSEPS 4.38e-19 5_[2(8.47e-14)]_118_[1(1.80e-12)]_94 -BUDC_KLETE 1.73e-33 2_[2(1.17e-19)]_120_[1(4.04e-21)]_61 -DHES_HUMAN 9.98e-23 2_[2(1.46e-13)]_123_[1(1.37e-16)]_144 -DHGB_BACME 1.78e-30 7_[2(5.86e-18)]_123_[1(7.52e-20)]_74 -DHII_HUMAN 4.64e-31 34_[2(5.50e-23)]_119_[1(1.61e-15)]_81 -DHMA_FLAS1 5.69e-29 14_[2(2.50e-18)]_121_[1(5.53e-18)]_77 -ENTA_ECOLI 2.69e-27 5_[2(4.74e-19)]_109_[1(1.77e-15)]_76 -FIXR_BRAJA 1.15e-30 36_[2(4.06e-17)]_123_[1(6.12e-21)]_61 -GUTD_ECOLI 2.33e-25 2_[2(1.46e-13)]_122_[1(4.82e-19)]_77 -HDE_CANTR 1.43e-32 8_[2(3.45e-18)]_125_[1(4.01e-13)]_131_[2(2.44e-23)]_115_[1(9.65e-18)]_411 -HDHA_ECOLI 2.75e-31 11_[2(5.65e-20)]_74_[1(4.42e-05)]_15_[1(1.25e-18)]_68 -LIGD_PSEPA 4.54e-24 6_[2(8.31e-16)]_121_[1(1.21e-15)]_120 -NODG_RHIME 1.55e-29 6_[2(4.05e-15)]_116_[1(1.13e-21)]_65 -RIDH_KLEAE 3.67e-29 14_[2(1.17e-19)]_116_[1(9.09e-17)]_61 -YINL_LISMO 5.92e-29 5_[2(5.38e-22)]_119_[1(3.24e-14)]_66 -YRTP_BACSU 2.01e-33 6_[2(3.45e-18)]_119_[1(1.64e-22)]_55 -CSGA_MYXXA 5.53e-13 51_[2(4.24e-07)]_7_[1(2.10e-12)]_50 -DHB2_HUMAN 6.87e-27 82_[2(3.65e-16)]_120_[1(2.23e-18)]_127 -DHB3_HUMAN 4.11e-25 48_[2(9.86e-18)]_120_[1(8.55e-15)]_84 -DHCA_HUMAN 6.86e-18 4_[2(5.24e-15)]_159_[1(4.80e-10)]_55 -FABI_ECOLI 2.57e-14 6_[2(3.15e-09)]_123_[1(4.15e-12)]_75 -FVT1_HUMAN 4.64e-26 32_[2(2.77e-16)]_124_[1(2.86e-17)]_118 -HMTR_LEIMA 2.93e-28 6_[2(1.90e-16)]_157_[1(3.35e-19)]_66 -MAS1_AGRRA 1.26e-14 245_[2(3.00e-14)]_74_[1(5.72e-08)]_99 -PCR_PEA 4.22e-18 25_[1(2.77e-08)]_32_[2(2.47e-17)]_284 -RFBB_NEIGO 1.14e-19 6_[2(1.59e-12)]_129_[1(1.47e-14)]_153 -YURA_MYXXA 3.34e-19 116_[2(2.77e-07)]_14_[1(4.82e-19)]_70 --------------------------------------------------------------------------------- - -******************************************************************************** - - -******************************************************************************** -Stopped because nmotifs = 2 reached. -******************************************************************************** - -CPU: pmgm2 - -******************************************************************************** diff --git a/Tests/Motif/meme.protein.tcm.txt b/Tests/Motif/meme.protein.tcm.txt deleted file mode 100644 index 0356bc1bc03..00000000000 --- a/Tests/Motif/meme.protein.tcm.txt +++ /dev/null @@ -1,466 +0,0 @@ -******************************************************************************** -MEME - Motif discovery tool -******************************************************************************** -MEME version 3.0 (Release date: 2004/08/18 09:07:01) - -For further information on how to interpret these results or to get -a copy of the MEME software please access http://meme.sdsc.edu. - -This file may be used as input to the MAST algorithm for searching -sequence databases for matches to groups of motifs. MAST is available -for interactive use and downloading at http://meme.sdsc.edu. -******************************************************************************** - - -******************************************************************************** -REFERENCE -******************************************************************************** -If you use this program in your research, please cite: - -Timothy L. Bailey and Charles Elkan, -"Fitting a mixture model by expectation maximization to discover -motifs in biopolymers", Proceedings of the Second International -Conference on Intelligent Systems for Molecular Biology, pp. 28-36, -AAAI Press, Menlo Park, California, 1994. -******************************************************************************** - - -******************************************************************************** -TRAINING SET -******************************************************************************** -DATAFILE= farntrans5.s -ALPHABET= ACDEFGHIKLMNPQRSTVWY -Sequence name Weight Length Sequence name Weight Length -------------- ------ ------ ------------- ------ ------ -RAM1_YEAST 1.0000 431 PFTB_RAT 1.0000 437 -BET2_YEAST 1.0000 325 RATRABGERB 1.0000 331 -CAL1_YEAST 1.0000 376 -******************************************************************************** - -******************************************************************************** -COMMAND LINE SUMMARY -******************************************************************************** -This information can also be useful in the event you wish to report a -problem with the MEME software. - -command: meme farntrans5.s -mod tcm -protein -nmotifs 2 - -model: mod= tcm nmotifs= 2 evt= inf -object function= E-value of product of p-values -width: minw= 8 maxw= 50 minic= 0.00 -width: wg= 11 ws= 1 endgaps= yes -nsites: minsites= 2 maxsites= 25 wnsites= 0.8 -theta: prob= 1 spmap= pam spfuzz= 120 -em: prior= megap b= 9500 maxiter= 50 - distance= 1e-05 -data: n= 1900 N= 5 - -sample: seed= 0 seqfrac= 1 -Dirichlet mixture priors file: prior30.plib -Letter frequencies in dataset: -A 0.061 C 0.037 D 0.062 E 0.061 F 0.044 G 0.075 H 0.030 I 0.053 K 0.051 -L 0.114 M 0.021 N 0.034 P 0.041 Q 0.038 R 0.041 S 0.078 T 0.046 V 0.057 -W 0.018 Y 0.041 -Background letter frequencies (from dataset with add-one prior applied): -A 0.061 C 0.037 D 0.061 E 0.060 F 0.044 G 0.075 H 0.030 I 0.053 K 0.051 -L 0.113 M 0.021 N 0.034 P 0.041 Q 0.039 R 0.041 S 0.078 T 0.046 V 0.057 -W 0.018 Y 0.041 -******************************************************************************** - - -******************************************************************************** -MOTIF 1 width = 30 sites = 24 llr = 854 E-value = 2.2e-094 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 1 Description --------------------------------------------------------------------------------- -Simplified A :::2::::1:2:11:1:1413314:1:1:: -pos.-specific C ::::1::::::::2::132::::1:::::: -probability D ::::21::11:6::::::::::::::2:13 -matrix E ::::::1114::::::::::::::::1111 - F :16:::::::::111:31::::::::::1: - G 861241:42:::221:::2:22::::3::1 - H ::::::::11:4::2::::::::::::::1 - I ::1:::1::::::::1:::1:1::42:11: - K :::::1::3::::::::::::::::::2:: - L ::21:::::2::31::1::5::9:55::4: - M :::::::::::::::::::::::::::::: - N :1:1:::21:::::::::::1:::::1::: - P ::::::5:::2::::::::::::::::::: - Q :::1::::1:::::1:::::1::::::::: - R :::::3:::::::2:::::::::::::1:: - S :1:2::::::2:::13:1::32:3::12:: - T ::::::::::::2::5:1:1:1:1:1:::1 - V :::::::1::3:2:::::12::::1::::: - W ::::::::::::::::13:::::::::::: - Y :::::::::::::14:31:::::::1:::: - - bits 5.8 - 5.2 - 4.6 - 4.0 -Information 3.5 * -content 2.9 * * * -(51.4 bits) 2.3 *** * * ** * * - 1.7 *** ** *** ***** * **** - 1.2 ****************************** - 0.6 ****************************** - 0.0 ------------------------------ - -Multilevel GGFGGRPGKEVDLCYTFCALAALALLGSLD -consensus LAH HSYWCVSS SI -sequence P G - - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Start P-value Site -------------- ----- --------- ------------------------------ -BET2_YEAST 223 7.28e-22 WWLCERQLPE GGLNGRPSKLPDVCYSWWVLSSLAIIGRLD WINYEKLTEF -RATRABGERB 227 6.18e-21 WWLCERQLPS GGLNGRPEKLPDVCYSWWVLASLKIIGRLH WIDREKLRSF -CAL1_YEAST 275 9.17e-20 LNASYDQSDD GGFQGRENKFADTCYAFWCLNSLHLLTKDW KMLCQTELVT -PFTB_RAT 237 1.15e-19 EWIARCQNWE GGIGGVPGMEAHGGYTFCGLAALVILKKER SLNLKSLLQW -PFTB_RAT 138 4.30e-19 QFLELCQSPD GGFGGGPGQYPHLAPTYAAVNALCIIGTEE AYNVINREKL -RATRABGERB 179 7.36e-19 EFVLSCMNFD GGFGCRPGSESHAGQIYCCTGFLAITSQLH QVNSDLLGWW -RATRABGERB 131 8.19e-19 AYVQSLQKED GSFAGDIWGEIDTRFSFCAVATLALLGKLD AINVEKAIEF -BET2_YEAST 172 2.10e-18 DFVLKCYNFD GGFGLCPNAESHAAQAFTCLGALAIANKLD MLSDDQLEEI -RATRABGERB 276 1.43e-17 FILACQDEET GGFADRPGDMVDPFHTLFGIAGLSLLGEEQ IKPVSPVFCM -BET2_YEAST 124 3.41e-17 SFIRGNQLED GSFQGDRFGEVDTRFVYTALSALSILGELT SEVVDPAVDF -RAM1_YEAST 247 5.00e-17 YLKNCQNYEG GFGSCPHVDEAHGGYTFCATASLAILRSMD QINVEKLLEW -BET2_YEAST 272 6.64e-17 FILKCQDEKK GGISDRPENEVDVFHTVFGVAGLSLMGYDN LVPIDPIYCM -RAM1_YEAST 145 1.27e-16 VKLFTISPSG GPFGGGPGQLSHLASTYAAINALSLCDNID GCWDRIDRKG -PFTB_RAT 286 3.17e-16 WVTSRQMRFE GGFQGRCNKLVDGCYSFWQAGLLPLLHRAL HAQGDPALSM -RAM1_YEAST 296 3.47e-16 WSSARQLQEE RGFCGRSNKLVDGCYSFWVGGSAAILEAFG YGQCFNKHAL -PFTB_RAT 348 4.30e-15 YILMCCQCPA GGLLDKPGKSRDFYHTCYCLSGLSIAQHFG SGAMLHDVVM -RATRABGERB 83 2.40e-14 VFIKSCQHEC GGVSASIGHDPHLLYTLSAVQILTLYDSIH VINVDKVVAY -PFTB_RAT 189 2.81e-14 QYLYSLKQPD GSFLMHVGGEVDVRSAYCAASVASLTNIIT PDLFEGTAEW -BET2_YEAST 73 7.78e-14 FVLSCWDDKY GAFAPFPRHDAHLLTTLSAVQILATYDALD VLGKDRKVRL -CAL1_YEAST 205 1.14e-13 LLGYIMSQQC YNGAFGAHNEPHSGYTSCALSTLALLSSLE KLSDKFKEDT -RAM1_YEAST 198 1.33e-13 WLISLKEPNG GFKTCLEVGEVDTRGIYCALSIATLLNILT EELTEGVLNY -RAM1_YEAST 349 3.52e-13 ILYCCQEKEQ PGLRDKPGAHSDFYHTNYCLLGLAVAESSY SCTPNDSPHN -CAL1_YEAST 327 5.47e-13 LLDRTQKTLT GGFSKNDEEDADLYHSCLGSAALALIEGKF NGELCIPQEI -BET2_YEAST 24 3.11e-10 RYIESLDTNK HNFEYWLTEHLRLNGIYWGLTALCVLDSPE TFVKEEVISF --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -BET2_YEAST 3.1e-10 23_[1]_19_[1]_21_[1]_18_[1]_21_ - [1]_19_[1]_24 -RATRABGERB 2.4e-14 82_[1]_18_[1]_18_[1]_18_[1]_19_[1]_26 -CAL1_YEAST 1.1e-13 204_[1]_40_[1]_22_[1]_20 -PFTB_RAT 4.3e-15 137_[1]_21_[1]_18_[1]_19_[1]_32_ - [1]_60 -RAM1_YEAST 1.3e-13 144_[1]_23_[1]_19_[1]_19_[1]_23_ - [1]_53 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 1 width=30 seqs=24 -BET2_YEAST ( 223) GGLNGRPSKLPDVCYSWWVLSSLAIIGRLD 1 -RATRABGERB ( 227) GGLNGRPEKLPDVCYSWWVLASLKIIGRLH 1 -CAL1_YEAST ( 275) GGFQGRENKFADTCYAFWCLNSLHLLTKDW 1 -PFTB_RAT ( 237) GGIGGVPGMEAHGGYTFCGLAALVILKKER 1 -PFTB_RAT ( 138) GGFGGGPGQYPHLAPTYAAVNALCIIGTEE 1 -RATRABGERB ( 179) GGFGCRPGSESHAGQIYCCTGFLAITSQLH 1 -RATRABGERB ( 131) GSFAGDIWGEIDTRFSFCAVATLALLGKLD 1 -BET2_YEAST ( 172) GGFGLCPNAESHAAQAFTCLGALAIANKLD 1 -RATRABGERB ( 276) GGFADRPGDMVDPFHTLFGIAGLSLLGEEQ 1 -BET2_YEAST ( 124) GSFQGDRFGEVDTRFVYTALSALSILGELT 1 -RAM1_YEAST ( 247) GFGSCPHVDEAHGGYTFCATASLAILRSMD 1 -BET2_YEAST ( 272) GGISDRPENEVDVFHTVFGVAGLSLMGYDN 1 -RAM1_YEAST ( 145) GPFGGGPGQLSHLASTYAAINALSLCDNID 1 -PFTB_RAT ( 286) GGFQGRCNKLVDGCYSFWQAGLLPLLHRAL 1 -RAM1_YEAST ( 296) RGFCGRSNKLVDGCYSFWVGGSAAILEAFG 1 -PFTB_RAT ( 348) GGLLDKPGKSRDFYHTCYCLSGLSIAQHFG 1 -RATRABGERB ( 83) GGVSASIGHDPHLLYTLSAVQILTLYDSIH 1 -PFTB_RAT ( 189) GSFLMHVGGEVDVRSAYCAASVASLTNIIT 1 -BET2_YEAST ( 73) GAFAPFPRHDAHLLTTLSAVQILATYDALD 1 -CAL1_YEAST ( 205) YNGAFGAHNEPHSGYTSCALSTLALLSSLE 1 -RAM1_YEAST ( 198) GFKTCLEVGEVDTRGIYCALSIATLLNILT 1 -RAM1_YEAST ( 349) PGLRDKPGAHSDFYHTNYCLLGLAVAESSY 1 -CAL1_YEAST ( 327) GGFSKNDEEDADLYHSCLGSAALALIEGKF 1 -BET2_YEAST ( 24) HNFEYWLTEHLRLNGIYWGLTALCVLDSPE 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 20 w= 30 n= 1755 bayes= 6.12445 E= 2.2e-094 - -218 -476 -324 -369 -492 351 -61 -474 -334 -558 -392 -225 -103 -392 -86 -303 -361 -418 -449 -109 - -81 -470 -327 -377 -1 330 -398 -485 -347 -566 -404 54 -81 -396 -330 -9 -350 -421 -467 -482 - -244 -349 -513 -434 367 -26 -384 59 -61 38 -123 -357 -404 -371 -364 -348 -238 -14 -322 -240 - 127 -17 -201 -5 -348 114 -187 -312 -81 -63 -205 120 -263 155 27 92 7 -262 -388 -304 - -26 143 115 -207 -17 219 -252 -171 -9 -112 95 -186 -16 -179 -195 -235 -164 -152 -351 -13 - -146 -17 33 -115 -32 45 38 -310 85 -143 -204 47 -13 -95 271 -53 -143 -50 83 -303 - -47 -38 -90 6 -396 -374 0 15 -233 -169 -288 -259 375 -235 -26 -97 -231 -75 -473 -424 - -148 -438 -204 99 -31 196 38 -302 -85 -335 -202 205 -266 -99 26 -54 6 33 83 -304 - 47 -444 33 57 -349 84 122 -315 209 -342 79 121 -263 109 -118 -53 -144 -265 -389 -305 - -160 -416 82 250 -25 -316 123 -254 -111 61 85 -136 -283 -122 -146 -60 -156 -221 -375 -14 - 161 -320 -465 -373 -228 -392 -333 13 -325 -90 -101 -301 208 -309 -7 90 -181 221 -331 -288 - -438 -650 318 -350 -594 -396 355 -672 -341 -686 -596 -158 -495 -373 4 -357 -386 -623 -637 -514 - 47 -316 -480 -387 82 91 -337 -111 -338 103 -98 -307 -15 -318 -312 -83 170 152 -328 -285 - 94 221 -398 -311 80 90 -301 -129 -266 -30 -111 21 -354 -263 179 -282 -178 -115 -329 139 - -167 -305 -309 -259 149 -77 196 -221 -241 -249 -153 -216 -75 19 -217 -54 -68 -205 -133 341 - 71 -383 -460 -463 -428 -445 -427 86 -370 -442 -260 -234 -417 -351 -356 143 348 -55 -479 -484 - -170 21 -317 -268 232 -354 -39 -217 -250 -54 -152 -36 -288 -261 -224 -116 -223 -78 132 328 - 46 272 -489 -397 84 -402 -334 -118 -347 -90 -105 -314 -378 -325 -320 -1 82 -106 351 88 - 277 240 -625 -594 -564 141 -583 -514 -585 -580 -446 -452 -451 4 -521 -277 -297 107 -614 -619 - 45 -317 -489 -395 -222 -96 -340 81 -346 184 -95 -313 -373 -323 -316 -85 81 179 -327 -285 - 211 -440 -247 -196 -412 97 -258 -374 -166 -146 -278 178 -325 106 -202 153 0 -314 -457 -377 - 213 -308 -476 -384 -5 88 -337 123 -337 -91 -99 -307 -372 -318 -311 118 79 12 -328 -287 - 87 -518 -675 -601 -320 -554 -562 -243 -564 290 -164 -543 -528 -491 -503 -489 -418 -296 -502 -500 - 317 38 -358 -284 -334 -229 -37 -276 -81 -332 -204 -260 -82 -270 -263 99 23 -47 -389 -383 - -408 -489 -735 -670 -387 -688 -678 294 -638 194 -240 -597 -632 -619 -631 -623 -16 84 -602 -556 - 91 0 -492 -398 -220 -404 -343 158 -348 187 109 -317 -375 -324 -318 -313 78 -100 -329 80 - -159 -454 121 100 -362 163 39 -329 20 -356 -221 172 -274 38 24 13 5 -279 -402 -316 - 47 -443 -200 57 -348 -88 38 37 160 -341 -205 47 -262 41 145 120 7 -264 -388 -19 - -28 -364 22 86 69 -344 -250 108 -9 153 95 -185 -17 -174 -192 -72 -165 -153 -352 -294 - -146 -443 171 100 -32 -6 175 -314 -80 -144 -205 47 -262 41 27 -178 126 -263 82 -19 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 1 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 20 w= 30 nsites= 24 E= 2.2e-094 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.833333 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 0.041667 - 0.041667 0.000000 0.000000 0.000000 0.083333 0.625000 0.000000 0.000000 0.000000 0.000000 0.000000 0.083333 0.041667 0.000000 0.000000 0.125000 0.000000 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.583333 0.083333 0.000000 0.083333 0.041667 0.166667 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.000000 - 0.166667 0.041667 0.000000 0.041667 0.000000 0.208333 0.000000 0.000000 0.000000 0.083333 0.000000 0.083333 0.000000 0.125000 0.041667 0.166667 0.041667 0.000000 0.000000 0.000000 - 0.041667 0.125000 0.166667 0.000000 0.041667 0.416667 0.000000 0.000000 0.041667 0.041667 0.041667 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 - 0.000000 0.041667 0.083333 0.000000 0.041667 0.125000 0.041667 0.000000 0.083333 0.041667 0.000000 0.041667 0.041667 0.000000 0.333333 0.041667 0.000000 0.041667 0.041667 0.000000 - 0.041667 0.041667 0.041667 0.083333 0.000000 0.000000 0.041667 0.083333 0.000000 0.041667 0.000000 0.000000 0.500000 0.000000 0.041667 0.041667 0.000000 0.041667 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.125000 0.041667 0.375000 0.041667 0.000000 0.000000 0.000000 0.000000 0.166667 0.000000 0.000000 0.041667 0.041667 0.041667 0.083333 0.041667 0.000000 - 0.083333 0.000000 0.083333 0.083333 0.000000 0.166667 0.083333 0.000000 0.250000 0.000000 0.041667 0.083333 0.000000 0.083333 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.125000 0.416667 0.041667 0.000000 0.083333 0.000000 0.000000 0.208333 0.041667 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.000000 0.000000 0.041667 - 0.208333 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.041667 0.000000 0.000000 0.208333 0.000000 0.041667 0.166667 0.000000 0.291667 0.000000 0.000000 - 0.000000 0.000000 0.583333 0.000000 0.000000 0.000000 0.375000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 - 0.083333 0.000000 0.000000 0.000000 0.083333 0.166667 0.000000 0.000000 0.000000 0.250000 0.000000 0.000000 0.041667 0.000000 0.000000 0.041667 0.166667 0.166667 0.000000 0.000000 - 0.125000 0.208333 0.000000 0.000000 0.083333 0.166667 0.000000 0.000000 0.000000 0.083333 0.000000 0.041667 0.000000 0.000000 0.166667 0.000000 0.000000 0.000000 0.000000 0.125000 - 0.000000 0.000000 0.000000 0.000000 0.083333 0.083333 0.208333 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.083333 0.000000 0.083333 0.041667 0.000000 0.000000 0.375000 - 0.125000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.125000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.250000 0.458333 0.041667 0.000000 0.000000 - 0.000000 0.083333 0.000000 0.000000 0.291667 0.000000 0.000000 0.000000 0.000000 0.125000 0.000000 0.041667 0.000000 0.000000 0.000000 0.041667 0.000000 0.041667 0.083333 0.291667 - 0.083333 0.291667 0.000000 0.000000 0.083333 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 0.083333 0.083333 0.000000 0.250000 0.083333 - 0.416667 0.208333 0.000000 0.000000 0.000000 0.208333 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.000000 0.000000 0.125000 0.000000 0.000000 - 0.083333 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.083333 0.000000 0.458333 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.083333 0.208333 0.000000 0.000000 - 0.291667 0.000000 0.000000 0.000000 0.000000 0.166667 0.000000 0.000000 0.000000 0.041667 0.000000 0.125000 0.000000 0.083333 0.000000 0.250000 0.041667 0.000000 0.000000 0.000000 - 0.291667 0.000000 0.000000 0.000000 0.041667 0.166667 0.000000 0.125000 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 0.208333 0.083333 0.041667 0.000000 0.000000 - 0.125000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.875000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 - 0.416667 0.083333 0.000000 0.000000 0.000000 0.000000 0.041667 0.000000 0.041667 0.000000 0.000000 0.000000 0.041667 0.000000 0.000000 0.250000 0.083333 0.041667 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.416667 0.000000 0.458333 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.041667 0.083333 0.000000 0.000000 - 0.125000 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 0.166667 0.000000 0.458333 0.041667 0.000000 0.000000 0.000000 0.000000 0.000000 0.083333 0.000000 0.000000 0.083333 - 0.000000 0.000000 0.166667 0.125000 0.000000 0.291667 0.041667 0.000000 0.041667 0.000000 0.000000 0.125000 0.000000 0.041667 0.041667 0.083333 0.041667 0.000000 0.000000 0.000000 - 0.083333 0.000000 0.000000 0.083333 0.000000 0.041667 0.041667 0.083333 0.166667 0.000000 0.000000 0.041667 0.000000 0.041667 0.125000 0.208333 0.041667 0.000000 0.000000 0.041667 - 0.041667 0.000000 0.083333 0.125000 0.083333 0.000000 0.000000 0.125000 0.041667 0.375000 0.041667 0.000000 0.041667 0.000000 0.000000 0.041667 0.000000 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.250000 0.125000 0.041667 0.083333 0.125000 0.000000 0.000000 0.041667 0.000000 0.041667 0.000000 0.041667 0.041667 0.000000 0.125000 0.000000 0.041667 0.041667 --------------------------------------------------------------------------------- - - - - - -Time 32.68 secs. - -******************************************************************************** - - -******************************************************************************** -MOTIF 2 width = 14 sites = 21 llr = 376 E-value = 3.1e-019 -******************************************************************************** --------------------------------------------------------------------------------- - Motif 2 Description --------------------------------------------------------------------------------- -Simplified A ::::111::::1:: -pos.-specific C ::::::::::::61 -probability D 12:11::1:::::: -matrix E 1::61::2:::::: - F 1:::::::5::::: - G ::::1::::::::: - H :::::::::::::: - I 5::::12::4:::: - K ::313:::::11:1 - L 11:::53::24:1: - M :::::::::::::: - N :4:::::::::::: - P :::::::::::::: - Q ::1::::2:::::6 - R ::1:::1::::11: - S ::::1::1:114:: - T ::1::1:::::::: - V :12::21::3:::: - W ::::::::2::::: - Y ::::::::3::::: - - bits 5.8 - 5.2 - 4.6 - 4.0 -Information 3.5 * -content 2.9 * ** -(25.8 bits) 2.3 * ** ** - 1.7 **** * ** ** - 1.2 ************** - 0.6 ************** - 0.0 -------------- - -Multilevel INKEKLLEFILSCQ -consensus YV -sequence WL - - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 sites sorted by position p-value --------------------------------------------------------------------------------- -Sequence name Start P-value Site -------------- ----- --------- -------------- -BET2_YEAST 254 2.24e-13 SLAIIGRLDW INYEKLTEFILKCQ DEKKGGISDR -RATRABGERB 258 1.30e-12 SLKIIGRLHW IDREKLRSFILACQ DEETGGFADR -RATRABGERB 162 4.20e-12 TLALLGKLDA INVEKAIEFVLSCM NFDGGFGCRP -RATRABGERB 66 9.60e-12 VMDLMGQLHR MNKEEILVFIKSCQ HECGGVSASI -RAM1_YEAST 278 5.08e-11 SLAILRSMDQ INVEKLLEWSSARQ LQEERGFCGR -CAL1_YEAST 190 5.01e-10 CRSKEDFDEY IDTEKLLGYIMSQQ CYNGAFGAHN -BET2_YEAST 55 6.90e-10 ALCVLDSPET FVKEEVISFVLSCW DDKYGAFAPF -RATRABGERB 114 1.57e-09 ILTLYDSIHV INVDKVVAYVQSLQ KEDGSFAGDI -PFTB_RAT 172 2.34e-09 IIGTEEAYNV INREKLLQYLYSLK QPDGSFLMHV -RAM1_YEAST 330 4.59e-09 ILEAFGYGQC FNKHALRDYILYCC QEKEQPGLRD -CAL1_YEAST 126 1.65e-08 LRDYEYFETI LDKRSLARFVSKCQ RPDRGSFVSC -PFTB_RAT 268 1.65e-08 ALVILKKERS LNLKSLLQWVTSRQ MRFEGGFQGR -PFTB_RAT 220 1.65e-08 VASLTNIITP DLFEGTAEWIARCQ NWEGGIGGVP -RAM1_YEAST 229 2.54e-08 IATLLNILTE ELTEGVLNYLKNCQ NYEGGFGSCP -PFTB_RAT 330 4.58e-08 DPALSMSHWM FHQQALQEYILMCC QCPAGGLLDK -CAL1_YEAST 239 5.86e-08 LLSSLEKLSD KFKEDTITWLLHRQ VSSHGCMKFE -PFTB_RAT 121 1.52e-07 LELLDEPIPQ IVATDVCQFLELCQ SPDGGFGGGP -CAL1_YEAST 362 1.91e-07 IEGKFNGELC IPQEIFNDFSKRCC F -BET2_YEAST 107 4.34e-07 TYDALDVLGK DRKVRLISFIRGNQ LEDGSFQGDR -BET2_YEAST 155 5.01e-07 ALSILGELTS EVVDPAVDFVLKCY NFDGGFGLCP -RAM1_YEAST 180 5.78e-07 CDNIDGCWDR IDRKGIYQWLISLK EPNGGFKTCL --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 block diagrams --------------------------------------------------------------------------------- -SEQUENCE NAME POSITION P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -BET2_YEAST 4.3e-07 54_[2]_38_[2]_34_[2]_85_[2]_58 -RATRABGERB 1.6e-09 65_[2]_34_[2]_34_[2]_82_[2]_60 -RAM1_YEAST 5.8e-07 179_[2]_35_[2]_35_[2]_38_[2]_88 -CAL1_YEAST 5.9e-08 125_[2]_50_[2]_35_[2]_109_[2]_1 -PFTB_RAT 2.3e-09 120_[2]_37_[2]_34_[2]_34_[2]_48_ - [2]_94 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 in BLOCKS format --------------------------------------------------------------------------------- -BL MOTIF 2 width=14 seqs=21 -BET2_YEAST ( 254) INYEKLTEFILKCQ 1 -RATRABGERB ( 258) IDREKLRSFILACQ 1 -RATRABGERB ( 162) INVEKAIEFVLSCM 1 -RATRABGERB ( 66) MNKEEILVFIKSCQ 1 -RAM1_YEAST ( 278) INVEKLLEWSSARQ 1 -CAL1_YEAST ( 190) IDTEKLLGYIMSQQ 1 -BET2_YEAST ( 55) FVKEEVISFVLSCW 1 -RATRABGERB ( 114) INVDKVVAYVQSLQ 1 -PFTB_RAT ( 172) INREKLLQYLYSLK 1 -RAM1_YEAST ( 330) FNKHALRDYILYCC 1 -CAL1_YEAST ( 126) LDKRSLARFVSKCQ 1 -PFTB_RAT ( 268) LNLKSLLQWVTSRQ 1 -PFTB_RAT ( 220) DLFEGTAEWIARCQ 1 -RAM1_YEAST ( 229) ELTEGVLNYLKNCQ 1 -PFTB_RAT ( 330) FHQQALQEYILMCC 1 -CAL1_YEAST ( 239) KFKEDTITWLLHRQ 1 -PFTB_RAT ( 121) IVATDVCQFLELCQ 1 -CAL1_YEAST ( 362) IPQEIFNDFSKRCC 1 -BET2_YEAST ( 107) DRKVRLISFIRGNQ 1 -BET2_YEAST ( 155) EVVDPAVDFVLKCY 1 -RAM1_YEAST ( 180) IDRKGIYQWLISLK 1 -// - --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 position-specific scoring matrix --------------------------------------------------------------------------------- -log-odds matrix: alength= 20 w= 14 n= 1835 bayes= 7.42721 E= 3.1e-019 - -180 -316 28 36 140 -376 -308 299 -24 -14 117 -265 -348 -268 -271 -280 -167 -65 -321 -275 - -149 -393 133 -139 -11 -301 50 -227 -104 -40 -165 317 0 -117 32 -189 -143 106 -363 -290 - -9 -383 -234 -146 -8 -309 -205 -210 224 -111 -154 -135 -277 119 160 -196 94 147 -356 0 - -174 -491 57 300 -396 -328 51 -351 95 -378 -243 -145 -289 50 33 -217 12 -38 -433 -348 - 61 -430 48 71 -335 60 -173 -37 243 -328 -192 -95 2 -80 41 27 -130 -250 -375 -291 - 55 -332 -507 -413 10 -422 -361 96 -365 194 -94 -334 -390 -340 -334 -331 92 167 -344 -304 - 61 19 -456 -363 -205 -378 -316 176 -315 118 -79 32 -352 14 98 -285 19 91 -309 10 - -6 -431 96 174 -336 -74 -174 -302 -67 -329 -193 61 -249 205 41 72 21 -37 -376 -291 - -585 -591 -675 -658 339 -654 -303 -494 -585 -478 -424 -461 -604 -498 -508 -576 -538 -496 362 282 - -333 -431 -610 -551 -351 -574 -548 290 -506 94 -192 -471 -535 -506 -502 6 -316 227 -516 -458 - -7 -410 -200 4 -314 -290 -182 -28 139 137 97 -106 -256 51 38 25 20 -221 -366 -4 - 61 -428 -188 -102 -334 -74 52 -298 141 -130 93 61 -249 -81 112 195 -129 -248 -374 -5 - -318 386 -431 -324 -470 -438 -297 -372 -61 20 -304 47 -439 41 186 -360 -300 -347 -474 -424 - -264 97 -402 -180 -325 -446 -118 -335 29 -335 49 -229 -343 411 -155 -328 -274 -317 47 -50 --------------------------------------------------------------------------------- - --------------------------------------------------------------------------------- - Motif 2 position-specific probability matrix --------------------------------------------------------------------------------- -letter-probability matrix: alength= 20 w= 14 nsites= 21 E= 3.1e-019 - 0.000000 0.000000 0.095238 0.095238 0.142857 0.000000 0.000000 0.476190 0.047619 0.095238 0.047619 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 - 0.000000 0.000000 0.190476 0.000000 0.047619 0.000000 0.047619 0.000000 0.000000 0.095238 0.000000 0.380952 0.047619 0.000000 0.047619 0.000000 0.000000 0.142857 0.000000 0.000000 - 0.047619 0.000000 0.000000 0.000000 0.047619 0.000000 0.000000 0.000000 0.285714 0.047619 0.000000 0.000000 0.000000 0.095238 0.142857 0.000000 0.095238 0.190476 0.000000 0.047619 - 0.000000 0.000000 0.095238 0.571429 0.000000 0.000000 0.047619 0.000000 0.095238 0.000000 0.000000 0.000000 0.000000 0.047619 0.047619 0.000000 0.047619 0.047619 0.000000 0.000000 - 0.095238 0.000000 0.095238 0.095238 0.000000 0.142857 0.000000 0.047619 0.333333 0.000000 0.000000 0.000000 0.047619 0.000000 0.047619 0.095238 0.000000 0.000000 0.000000 0.000000 - 0.095238 0.000000 0.000000 0.000000 0.047619 0.000000 0.000000 0.095238 0.000000 0.476190 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.095238 0.190476 0.000000 0.000000 - 0.095238 0.047619 0.000000 0.000000 0.000000 0.000000 0.000000 0.190476 0.000000 0.285714 0.000000 0.047619 0.000000 0.047619 0.095238 0.000000 0.047619 0.095238 0.000000 0.047619 - 0.047619 0.000000 0.142857 0.238095 0.000000 0.047619 0.000000 0.000000 0.000000 0.000000 0.000000 0.047619 0.000000 0.190476 0.047619 0.142857 0.047619 0.047619 0.000000 0.000000 - 0.000000 0.000000 0.000000 0.000000 0.476190 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.238095 0.285714 - 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.380952 0.000000 0.238095 0.000000 0.000000 0.000000 0.000000 0.000000 0.095238 0.000000 0.285714 0.000000 0.000000 - 0.047619 0.000000 0.000000 0.047619 0.000000 0.000000 0.000000 0.047619 0.142857 0.380952 0.047619 0.000000 0.000000 0.047619 0.047619 0.095238 0.047619 0.000000 0.000000 0.047619 - 0.095238 0.000000 0.000000 0.000000 0.000000 0.047619 0.047619 0.000000 0.142857 0.047619 0.047619 0.047619 0.000000 0.000000 0.095238 0.380952 0.000000 0.000000 0.000000 0.047619 - 0.000000 0.619048 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.142857 0.000000 0.047619 0.000000 0.047619 0.142857 0.000000 0.000000 0.000000 0.000000 0.000000 - 0.000000 0.142857 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.095238 0.000000 0.047619 0.000000 0.000000 0.619048 0.000000 0.000000 0.000000 0.000000 0.047619 0.047619 --------------------------------------------------------------------------------- - - - - - -Time 51.86 secs. - -******************************************************************************** - - -******************************************************************************** -SUMMARY OF MOTIFS -******************************************************************************** - --------------------------------------------------------------------------------- - Combined block diagrams: non-overlapping sites with p-value < 0.0001 --------------------------------------------------------------------------------- -SEQUENCE NAME COMBINED P-VALUE MOTIF DIAGRAM -------------- ---------------- ------------- -RAM1_YEAST 2.14e-20 144_[1(1.27e-16)]_5_[2(5.78e-07)]_4_[1(1.33e-13)]_1_[2(2.54e-08)]_4_[1(5.00e-17)]_1_[2(5.08e-11)]_4_[1(3.47e-16)]_4_[2(4.59e-09)]_5_[1(3.52e-13)]_35_[2(9.16e-05)]_4 -PFTB_RAT 2.44e-21 120_[2(1.52e-07)]_3_[1(4.30e-19)]_4_[2(2.34e-09)]_3_[1(2.81e-14)]_1_[2(1.65e-08)]_3_[1(1.15e-19)]_1_[2(1.65e-08)]_4_[1(3.17e-16)]_14_[2(4.58e-08)]_4_[1(4.30e-15)]_60 -BET2_YEAST 1.02e-27 6_[2(5.17e-05)]_3_[1(3.11e-10)]_1_[2(6.90e-10)]_4_[1(7.78e-14)]_4_[2(4.34e-07)]_3_[1(3.41e-17)]_1_[2(5.01e-07)]_3_[1(2.10e-18)]_21_[1(7.28e-22)]_1_[2(2.24e-13)]_4_[1(6.64e-17)]_24 -RATRABGERB 4.90e-26 65_[2(9.60e-12)]_3_[1(2.40e-14)]_1_[2(1.57e-09)]_3_[1(8.19e-19)]_1_[2(4.20e-12)]_3_[1(7.36e-19)]_18_[1(6.18e-21)]_1_[2(1.30e-12)]_4_[1(1.43e-17)]_26 -CAL1_YEAST 3.16e-22 125_[2(1.65e-08)]_50_[2(5.01e-10)]_1_[1(1.14e-13)]_4_[2(5.86e-08)]_22_[1(9.17e-20)]_22_[1(5.47e-13)]_5_[2(1.91e-07)]_1 --------------------------------------------------------------------------------- - -******************************************************************************** - - -******************************************************************************** -Stopped because nmotifs = 2 reached. -******************************************************************************** - -CPU: pmgm2 - -******************************************************************************** diff --git a/Tests/Motif/transfac.dat b/Tests/Motif/transfac.dat deleted file mode 100644 index 9287f8622e5..00000000000 --- a/Tests/Motif/transfac.dat +++ /dev/null @@ -1,31 +0,0 @@ -VV EXAMPLE January 15, 2013 -XX -// -ID motif1 -P0 A C G T -01 1 2 2 0 S -02 2 1 2 0 R -03 3 0 1 1 A -04 0 5 0 0 C -05 5 0 0 0 A -06 0 0 4 1 G -07 0 1 4 0 G -08 0 0 0 5 T -09 0 0 5 0 G -10 0 1 2 2 K -11 0 2 0 3 Y -12 1 0 3 1 G -// -ID motif2 -P0 A C G T -01 2 1 2 0 R -02 1 2 2 0 S -03 0 5 0 0 C -04 3 0 1 1 A -05 0 0 4 1 G -06 5 0 0 0 A -07 0 1 4 0 G -08 0 0 5 0 G -09 0 0 0 5 T -10 0 2 0 3 Y -//
fidals__shopelectro-422
helpers.py:58-59: Move selenium timeout to env var. stb2... The puzzle `371-6673a075` from #371 has to be resolved: https://github.com/fidals/shopelectro/blob/7d319f81942692c16105a8f8703e483a7067f1ef/shopelectro/tests/helpers.py#L58-L59 The puzzle was created by duker33 on 19-Jun-18. Estimate: 15 minutes, If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'refarm_redirects',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'refarm_redirects.middleware.RedirectAllMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDATABASE_URL = os.environ[\"POSTGRES_URL\"]\n\n# to activate django connections pool for persistent connections.\n# https://docs.djangoproject.com/en/1.11/ref/databases/#persistent-connections\nCONN_MAX_AGE = None\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n 'HOST': os.environ['POSTGRES_URL'],\n 'PORT': '5432',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENTS = os.environ.get('EMAIL_RECIPIENTS', '[email protected]').split(',')\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n\ndef get_robots_content():\n with open(os.path.join(TEMPLATE_DIR, 'robots.txt')) as robots_file:\n return robots_file.read()\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n },\n 'robots': {\n 'slug': 'robots.txt',\n 'content': get_robots_content(),\n },\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n", "path": "shopelectro/settings/base.py" } ]
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'refarm_redirects',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'refarm_redirects.middleware.RedirectAllMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDATABASE_URL = os.environ[\"POSTGRES_URL\"]\n\n# to activate django connections pool for persistent connections.\n# https://docs.djangoproject.com/en/1.11/ref/databases/#persistent-connections\nCONN_MAX_AGE = None\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n 'HOST': os.environ['POSTGRES_URL'],\n 'PORT': '5432',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\nSELENIUM_WAIT_SECONDS = int(os.environ.get('SELENIUM_WAIT_SECONDS', 60))\nSELENIUM_TIMEOUT_SECONDS = int(os.environ.get('SELENIUM_TIMEOUT_SECONDS', 30))\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENTS = os.environ.get('EMAIL_RECIPIENTS', '[email protected]').split(',')\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n\ndef get_robots_content():\n with open(os.path.join(TEMPLATE_DIR, 'robots.txt')) as robots_file:\n return robots_file.read()\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n },\n 'robots': {\n 'slug': 'robots.txt',\n 'content': get_robots_content(),\n },\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n", "path": "shopelectro/settings/base.py" } ]
diff --git a/.drone.yml b/.drone.yml index c484de09..4c4cde5e 100644 --- a/.drone.yml +++ b/.drone.yml @@ -173,10 +173,13 @@ services: selenium: image: selenium/standalone-chrome-debug:3.10.0 - environment: # https://github.com/SeleniumHQ/docker-selenium/issues/392 + environment: + # https://github.com/SeleniumHQ/docker-selenium/issues/392 - DBUS_SESSION_BUS_ADDRESS=/dev/null - SCREEN_WIDTH=1366 - SCREEN_HEIGHT=768 + - SELENIUM_WAIT_SECONDS=${SELENIUM_WAIT_SECONDS} + - SELENIUM_TIMEOUT_SECONDS=${SELENIUM_TIMEOUT_SECONDS} shm_size: 4G volumes: # https://github.com/SeleniumHQ/docker-selenium#running-the-images - /dev/shm:/dev/shm diff --git a/docker/env_files/app.dist b/docker/env_files/app.dist index c957a6d2..7cb27a0a 100644 --- a/docker/env_files/app.dist +++ b/docker/env_files/app.dist @@ -16,3 +16,6 @@ REDIS_URL=redis REDIS_PORT=6379 RABBITMQ_URL=rabbitmq RABBITMQ_PORT=5672 + +SELENIUM_WAIT_SECONDS=60 +SELENIUM_TIMEOUT_SECONDS=30 diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py index 9c9b4d31..11b21f27 100644 --- a/shopelectro/settings/base.py +++ b/shopelectro/settings/base.py @@ -217,6 +217,8 @@ } SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub') +SELENIUM_WAIT_SECONDS = int(os.environ.get('SELENIUM_WAIT_SECONDS', 60)) +SELENIUM_TIMEOUT_SECONDS = int(os.environ.get('SELENIUM_TIMEOUT_SECONDS', 30)) SITE_CREATED = datetime(2013, 1, 1) diff --git a/shopelectro/tests/helpers.py b/shopelectro/tests/helpers.py index 30b37f6c..4bf99770 100644 --- a/shopelectro/tests/helpers.py +++ b/shopelectro/tests/helpers.py @@ -58,9 +58,9 @@ def setUpClass(cls): ) # @todo #371:15m Move selenium timeout to env var. stb2 # To be able to change it from drone without touching code. - cls.wait = WebDriverWait(cls.browser, 60) + cls.wait = WebDriverWait(cls.browser, settings.SELENIUM_WAIT_SECONDS) cls.browser.implicitly_wait(30) - cls.browser.set_page_load_timeout(30) + cls.browser.set_page_load_timeout(settings.SELENIUM_TIMEOUT_SECONDS) # Fresh created browser failures on maximizing window. # This bug is won't fixed by selenium guys https://goo.gl/6Ttguf # Ohh, so selenium is so selenium ...
feast-dev__feast-3904
Table in Postgres OnlineStore is not populated after calling `materialize` ## Expected Behavior When calling the `materialize` functionality to materialize data from a `SnowflakeSource` offline store to a local `PostgreSQLOnlineStore`, the table is not populated with the data. ## Current Behavior The feature table in the local Postgres instance is not populated, while no exception is raised, and from the logs it seems like the data should be pushed to Postgres. ## Steps to reproduce 1) Use this feature_store.yaml file: ``` project: my_project provider: local registry: registry_type: sql path: postgresql://postgres:[email protected]:5432/feature_store cache_ttl_seconds: 60 online_store: type: postgres host: 0.0.0.0 port: 5432 database: feature_store db_schema: public user: postgres password: test offline_store: <SNOWFLAKE_INFORMATION> entity_key_serialization_version: 2 ``` 2) Spin up this docker-compose file: ``` --- version: "3" services: db: restart: always image: postgres:15-alpine container_name: feast_db ports: - "5432:5432" volumes: - ~/feast_postgres_data:/var/lib/postgresql/data environment: - POSTGRES_DB=feature_store - POSTGRES_USER=postgres - POSTGRES_PASSWORD=test volumes: feast_postgres_data: null ``` 3) Initialize the Entities, SnowflakeSource (or another source), FeatureView, and FeatureService, and apply these. All using the Python SDK. ``` from datetime import timedelta from feast import ( Entity, FeatureService, FeatureView, Field, SnowflakeSource, ValueType, FeatureStore, ) from feast.types import Float32 feature_store = FeatureStore() entity = Entity( name="entity", join_keys=["entity_ID"], value_type=ValueType.STRING, ) source = SnowflakeSource( name="snowflake_source_name", timestamp_field="EVENT_TIMESTAMP", schema="TEMP", table="TABLE" ) feature_view = FeatureView( name="feature_view_name", entities=[entity], ttl=timedelta(days=0), schema=[ Field(name="feature_1", dtype=Float32), Field(name="feature_2", dtype=Float32), ], online=True, source=source, tags={"team": "team"}, ) feature_service = FeatureService( name="feature_service", features=[feature_view], ) feature_store.apply( [ entity, source, feature_view, feature_service, ] ) ``` 4) Run materialize commands using the Python SDK ``` feature_store = FeatureStore() feature_store.materialize( start_date=datetime.utcnow() - timedelta(weeks=52), end_date=datetime.utcnow(), feature_views=["feature_view_name"], ) ``` ### Specifications - Version: 0.35.0 - Platform: Local MacBook M1 ## Possible Solution It seems like a `conn.commit()` statement is missing in the `online_write_batch` method of the `PostgreSQLOnlineStore`. Specifically, on [this line](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/infra/online_stores/contrib/postgres.py#L102). After adding this, the table is populated. The PR implementing this proposed fix can be found [here](https://github.com/feast-dev/feast/pull/3904). ## Additional notes When replacing the the postgres online store with the following sqlite online store in the config file, everything works without any code changes ``` online_store: type: sqlite path: data/online_store.db ```
[ { "content": "import contextlib\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nimport psycopg2\nimport pytz\nfrom psycopg2 import sql\nfrom psycopg2.extras import execute_values\nfrom psycopg2.pool import SimpleConnectionPool\nfrom pydantic.schema import Literal\n\nfrom feast import Entity\nfrom feast.feature_view import FeatureView\nfrom feast.infra.key_encoding_utils import serialize_entity_key\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.infra.utils.postgres.connection_utils import _get_conn, _get_connection_pool\nfrom feast.infra.utils.postgres.postgres_config import ConnectionType, PostgreSQLConfig\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import RepoConfig\nfrom feast.usage import log_exceptions_and_usage\n\n\nclass PostgreSQLOnlineStoreConfig(PostgreSQLConfig):\n type: Literal[\"postgres\"] = \"postgres\"\n\n\nclass PostgreSQLOnlineStore(OnlineStore):\n _conn: Optional[psycopg2._psycopg.connection] = None\n _conn_pool: Optional[SimpleConnectionPool] = None\n\n @contextlib.contextmanager\n def _get_conn(self, config: RepoConfig):\n assert config.online_store.type == \"postgres\"\n if config.online_store.conn_type == ConnectionType.pool:\n if not self._conn_pool:\n self._conn_pool = _get_connection_pool(config.online_store)\n connection = self._conn_pool.getconn()\n yield connection\n self._conn_pool.putconn(connection)\n else:\n if not self._conn:\n self._conn = _get_conn(config.online_store)\n yield self._conn\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n project = config.project\n\n with self._get_conn(config) as conn, conn.cursor() as cur:\n insert_values = []\n for entity_key, values, timestamp, created_ts in data:\n entity_key_bin = serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n )\n timestamp = _to_naive_utc(timestamp)\n if created_ts is not None:\n created_ts = _to_naive_utc(created_ts)\n\n for feature_name, val in values.items():\n insert_values.append(\n (\n entity_key_bin,\n feature_name,\n val.SerializeToString(),\n timestamp,\n created_ts,\n )\n )\n # Control the batch so that we can update the progress\n batch_size = 5000\n for i in range(0, len(insert_values), batch_size):\n cur_batch = insert_values[i : i + batch_size]\n execute_values(\n cur,\n sql.SQL(\n \"\"\"\n INSERT INTO {}\n (entity_key, feature_name, value, event_ts, created_ts)\n VALUES %s\n ON CONFLICT (entity_key, feature_name) DO\n UPDATE SET\n value = EXCLUDED.value,\n event_ts = EXCLUDED.event_ts,\n created_ts = EXCLUDED.created_ts;\n \"\"\",\n ).format(sql.Identifier(_table_id(project, table))),\n cur_batch,\n page_size=batch_size,\n )\n if progress:\n progress(len(cur_batch))\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n project = config.project\n with self._get_conn(config) as conn, conn.cursor() as cur:\n # Collecting all the keys to a list allows us to make fewer round trips\n # to PostgreSQL\n keys = []\n for entity_key in entity_keys:\n keys.append(\n serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n )\n )\n\n if not requested_features:\n cur.execute(\n sql.SQL(\n \"\"\"\n SELECT entity_key, feature_name, value, event_ts\n FROM {} WHERE entity_key = ANY(%s);\n \"\"\"\n ).format(\n sql.Identifier(_table_id(project, table)),\n ),\n (keys,),\n )\n else:\n cur.execute(\n sql.SQL(\n \"\"\"\n SELECT entity_key, feature_name, value, event_ts\n FROM {} WHERE entity_key = ANY(%s) and feature_name = ANY(%s);\n \"\"\"\n ).format(\n sql.Identifier(_table_id(project, table)),\n ),\n (keys, requested_features),\n )\n\n rows = cur.fetchall()\n\n # Since we don't know the order returned from PostgreSQL we'll need\n # to construct a dict to be able to quickly look up the correct row\n # when we iterate through the keys since they are in the correct order\n values_dict = defaultdict(list)\n for row in rows if rows is not None else []:\n values_dict[row[0].tobytes()].append(row[1:])\n\n for key in keys:\n if key in values_dict:\n value = values_dict[key]\n res = {}\n for feature_name, value_bin, event_ts in value:\n val = ValueProto()\n val.ParseFromString(bytes(value_bin))\n res[feature_name] = val\n result.append((event_ts, res))\n else:\n result.append((None, None))\n\n return result\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n project = config.project\n schema_name = config.online_store.db_schema or config.online_store.user\n with self._get_conn(config) as conn, conn.cursor() as cur:\n # If a db_schema is provided, then that schema gets created if it doesn't\n # exist. Else a schema is created for the feature store user.\n\n cur.execute(\n \"\"\"\n SELECT schema_name\n FROM information_schema.schemata\n WHERE schema_name = %s\n \"\"\",\n (schema_name,),\n )\n schema_exists = cur.fetchone()\n if not schema_exists:\n cur.execute(\n sql.SQL(\"CREATE SCHEMA IF NOT EXISTS {} AUTHORIZATION {}\").format(\n sql.Identifier(schema_name),\n sql.Identifier(config.online_store.user),\n ),\n )\n\n for table in tables_to_delete:\n table_name = _table_id(project, table)\n cur.execute(_drop_table_and_index(table_name))\n\n for table in tables_to_keep:\n table_name = _table_id(project, table)\n cur.execute(\n sql.SQL(\n \"\"\"\n CREATE TABLE IF NOT EXISTS {}\n (\n entity_key BYTEA,\n feature_name TEXT,\n value BYTEA,\n event_ts TIMESTAMPTZ,\n created_ts TIMESTAMPTZ,\n PRIMARY KEY(entity_key, feature_name)\n );\n CREATE INDEX IF NOT EXISTS {} ON {} (entity_key);\n \"\"\"\n ).format(\n sql.Identifier(table_name),\n sql.Identifier(f\"{table_name}_ek\"),\n sql.Identifier(table_name),\n )\n )\n\n conn.commit()\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n project = config.project\n try:\n with self._get_conn(config) as conn, conn.cursor() as cur:\n for table in tables:\n table_name = _table_id(project, table)\n cur.execute(_drop_table_and_index(table_name))\n except Exception:\n logging.exception(\"Teardown failed\")\n raise\n\n\ndef _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"\n\n\ndef _drop_table_and_index(table_name):\n return sql.SQL(\n \"\"\"\n DROP TABLE IF EXISTS {};\n DROP INDEX IF EXISTS {};\n \"\"\"\n ).format(\n sql.Identifier(table_name),\n sql.Identifier(f\"{table_name}_ek\"),\n )\n\n\ndef _to_naive_utc(ts: datetime):\n if ts.tzinfo is None:\n return ts\n else:\n return ts.astimezone(pytz.utc).replace(tzinfo=None)\n", "path": "sdk/python/feast/infra/online_stores/contrib/postgres.py" } ]
[ { "content": "import contextlib\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nimport psycopg2\nimport pytz\nfrom psycopg2 import sql\nfrom psycopg2.extras import execute_values\nfrom psycopg2.pool import SimpleConnectionPool\nfrom pydantic.schema import Literal\n\nfrom feast import Entity\nfrom feast.feature_view import FeatureView\nfrom feast.infra.key_encoding_utils import serialize_entity_key\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.infra.utils.postgres.connection_utils import _get_conn, _get_connection_pool\nfrom feast.infra.utils.postgres.postgres_config import ConnectionType, PostgreSQLConfig\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import RepoConfig\nfrom feast.usage import log_exceptions_and_usage\n\n\nclass PostgreSQLOnlineStoreConfig(PostgreSQLConfig):\n type: Literal[\"postgres\"] = \"postgres\"\n\n\nclass PostgreSQLOnlineStore(OnlineStore):\n _conn: Optional[psycopg2._psycopg.connection] = None\n _conn_pool: Optional[SimpleConnectionPool] = None\n\n @contextlib.contextmanager\n def _get_conn(self, config: RepoConfig):\n assert config.online_store.type == \"postgres\"\n if config.online_store.conn_type == ConnectionType.pool:\n if not self._conn_pool:\n self._conn_pool = _get_connection_pool(config.online_store)\n connection = self._conn_pool.getconn()\n yield connection\n self._conn_pool.putconn(connection)\n else:\n if not self._conn:\n self._conn = _get_conn(config.online_store)\n yield self._conn\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n project = config.project\n\n with self._get_conn(config) as conn, conn.cursor() as cur:\n insert_values = []\n for entity_key, values, timestamp, created_ts in data:\n entity_key_bin = serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n )\n timestamp = _to_naive_utc(timestamp)\n if created_ts is not None:\n created_ts = _to_naive_utc(created_ts)\n\n for feature_name, val in values.items():\n insert_values.append(\n (\n entity_key_bin,\n feature_name,\n val.SerializeToString(),\n timestamp,\n created_ts,\n )\n )\n # Control the batch so that we can update the progress\n batch_size = 5000\n for i in range(0, len(insert_values), batch_size):\n cur_batch = insert_values[i : i + batch_size]\n execute_values(\n cur,\n sql.SQL(\n \"\"\"\n INSERT INTO {}\n (entity_key, feature_name, value, event_ts, created_ts)\n VALUES %s\n ON CONFLICT (entity_key, feature_name) DO\n UPDATE SET\n value = EXCLUDED.value,\n event_ts = EXCLUDED.event_ts,\n created_ts = EXCLUDED.created_ts;\n \"\"\",\n ).format(sql.Identifier(_table_id(project, table))),\n cur_batch,\n page_size=batch_size,\n )\n conn.commit()\n if progress:\n progress(len(cur_batch))\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n project = config.project\n with self._get_conn(config) as conn, conn.cursor() as cur:\n # Collecting all the keys to a list allows us to make fewer round trips\n # to PostgreSQL\n keys = []\n for entity_key in entity_keys:\n keys.append(\n serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n )\n )\n\n if not requested_features:\n cur.execute(\n sql.SQL(\n \"\"\"\n SELECT entity_key, feature_name, value, event_ts\n FROM {} WHERE entity_key = ANY(%s);\n \"\"\"\n ).format(\n sql.Identifier(_table_id(project, table)),\n ),\n (keys,),\n )\n else:\n cur.execute(\n sql.SQL(\n \"\"\"\n SELECT entity_key, feature_name, value, event_ts\n FROM {} WHERE entity_key = ANY(%s) and feature_name = ANY(%s);\n \"\"\"\n ).format(\n sql.Identifier(_table_id(project, table)),\n ),\n (keys, requested_features),\n )\n\n rows = cur.fetchall()\n\n # Since we don't know the order returned from PostgreSQL we'll need\n # to construct a dict to be able to quickly look up the correct row\n # when we iterate through the keys since they are in the correct order\n values_dict = defaultdict(list)\n for row in rows if rows is not None else []:\n values_dict[row[0].tobytes()].append(row[1:])\n\n for key in keys:\n if key in values_dict:\n value = values_dict[key]\n res = {}\n for feature_name, value_bin, event_ts in value:\n val = ValueProto()\n val.ParseFromString(bytes(value_bin))\n res[feature_name] = val\n result.append((event_ts, res))\n else:\n result.append((None, None))\n\n return result\n\n @log_exceptions_and_usage(online_store=\"postgres\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n project = config.project\n schema_name = config.online_store.db_schema or config.online_store.user\n with self._get_conn(config) as conn, conn.cursor() as cur:\n # If a db_schema is provided, then that schema gets created if it doesn't\n # exist. Else a schema is created for the feature store user.\n\n cur.execute(\n \"\"\"\n SELECT schema_name\n FROM information_schema.schemata\n WHERE schema_name = %s\n \"\"\",\n (schema_name,),\n )\n schema_exists = cur.fetchone()\n if not schema_exists:\n cur.execute(\n sql.SQL(\"CREATE SCHEMA IF NOT EXISTS {} AUTHORIZATION {}\").format(\n sql.Identifier(schema_name),\n sql.Identifier(config.online_store.user),\n ),\n )\n\n for table in tables_to_delete:\n table_name = _table_id(project, table)\n cur.execute(_drop_table_and_index(table_name))\n\n for table in tables_to_keep:\n table_name = _table_id(project, table)\n cur.execute(\n sql.SQL(\n \"\"\"\n CREATE TABLE IF NOT EXISTS {}\n (\n entity_key BYTEA,\n feature_name TEXT,\n value BYTEA,\n event_ts TIMESTAMPTZ,\n created_ts TIMESTAMPTZ,\n PRIMARY KEY(entity_key, feature_name)\n );\n CREATE INDEX IF NOT EXISTS {} ON {} (entity_key);\n \"\"\"\n ).format(\n sql.Identifier(table_name),\n sql.Identifier(f\"{table_name}_ek\"),\n sql.Identifier(table_name),\n )\n )\n\n conn.commit()\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n project = config.project\n try:\n with self._get_conn(config) as conn, conn.cursor() as cur:\n for table in tables:\n table_name = _table_id(project, table)\n cur.execute(_drop_table_and_index(table_name))\n except Exception:\n logging.exception(\"Teardown failed\")\n raise\n\n\ndef _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"\n\n\ndef _drop_table_and_index(table_name):\n return sql.SQL(\n \"\"\"\n DROP TABLE IF EXISTS {};\n DROP INDEX IF EXISTS {};\n \"\"\"\n ).format(\n sql.Identifier(table_name),\n sql.Identifier(f\"{table_name}_ek\"),\n )\n\n\ndef _to_naive_utc(ts: datetime):\n if ts.tzinfo is None:\n return ts\n else:\n return ts.astimezone(pytz.utc).replace(tzinfo=None)\n", "path": "sdk/python/feast/infra/online_stores/contrib/postgres.py" } ]
diff --git a/sdk/python/feast/infra/online_stores/contrib/postgres.py b/sdk/python/feast/infra/online_stores/contrib/postgres.py index a12e66f1090..49f87ddb0ae 100644 --- a/sdk/python/feast/infra/online_stores/contrib/postgres.py +++ b/sdk/python/feast/infra/online_stores/contrib/postgres.py @@ -99,6 +99,7 @@ def online_write_batch( cur_batch, page_size=batch_size, ) + conn.commit() if progress: progress(len(cur_batch))
netbox-community__netbox-15568
Typo in Tag model ### Deployment Type Self-hosted ### NetBox Version v3.7.4 ### Python Version 3.8 ### Steps to Reproduce Typo in help_text where "this" is mistakenly repeated. https://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40 ### Expected Behavior The object type(s) to which this tag can be applied. ### Observed Behavior The object type(s) to which this this tag can be applied.
[ { "content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py" } ]
[ { "content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py" } ]
diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py index 3aba6df60ab..97b9087c593 100644 --- a/netbox/extras/models/tags.py +++ b/netbox/extras/models/tags.py @@ -37,7 +37,7 @@ class Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase): to='contenttypes.ContentType', related_name='+', blank=True, - help_text=_("The object type(s) to which this this tag can be applied.") + help_text=_("The object type(s) to which this tag can be applied.") ) clone_fields = (
cisagov__manage.get.gov-726
Create a Staging Environment in Cloud.gov ### User Story Developers and Reviewers require a Staging environment in cloud.gov to support using stable as prod. ### Acceptance Criteria Update the environment creation script to produce and maintain a cloud.gov staging environment. ### Additional Context (optional) _No response_ ### Issue Links (optional) _No response_
[ { "content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom base64 import b64decode\nfrom cfenv import AppEnv # type: ignore\nfrom pathlib import Path\nfrom typing import Final\n\nfrom botocore.config import Config\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\nif key_service and key_service.credentials:\n secret = key_service.credentials.get\nelse:\n secret = env\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nenv_base_url = env.str(\"DJANGO_BASE_URL\")\nenv_getgov_public_site_url = env.str(\"GETGOV_PUBLIC_SITE_URL\", \"\")\n\nsecret_login_key = b64decode(secret(\"DJANGO_SECRET_LOGIN_KEY\", \"\"))\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\nsecret_aws_ses_key_id = secret(\"AWS_ACCESS_KEY_ID\", None)\nsecret_aws_ses_key = secret(\"AWS_SECRET_ACCESS_KEY\", None)\n\nsecret_registry_cl_id = secret(\"REGISTRY_CL_ID\")\nsecret_registry_password = secret(\"REGISTRY_PASSWORD\")\nsecret_registry_cert = b64decode(secret(\"REGISTRY_CERT\", \"\"))\nsecret_registry_key = b64decode(secret(\"REGISTRY_KEY\", \"\"))\nsecret_registry_key_passphrase = secret(\"REGISTRY_KEY_PASSPHRASE\", \"\")\nsecret_registry_hostname = secret(\"REGISTRY_HOSTNAME\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\n# (settings.py is in `src/registrar/config/`: BASE_DIR is `src/`)\nBASE_DIR = path.resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env_debug\n\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # application used for integrating with Login.gov\n \"djangooidc\",\n # audit logging of changes to models\n \"auditlog\",\n # library to simplify form templating\n \"widget_tweaks\",\n # library for Finite State Machine statuses\n \"django_fsm\",\n # library for phone numbers\n \"phonenumber_field\",\n # let's be sure to install our own application!\n \"registrar\",\n # Our internal API application\n \"api\",\n # Only for generating documentation, uncomment to run manage.py generate_puml\n # \"puml_generator\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # serve static assets in production\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # Require login for every single request by default\n \"login_required.middleware.LoginRequiredMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n # django-auditlog: obtain the request User for use in logging\n \"auditlog.middleware.AuditlogMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"registrar\" / \"public\"\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"registrar\" / \"assets\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"registrar\" / \"templates\"],\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"registrar.context_processors.language_code\",\n \"registrar.context_processors.canonical_path\",\n \"registrar.context_processors.is_demo_site\",\n ],\n },\n },\n]\n\n# Stop using table-based default form renderer which is deprecated\nFORM_RENDERER = \"django.forms.renderers.DjangoDivFormRenderer\"\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\n# IS_DEMO_SITE controls whether or not we show our big red \"TEST SITE\" banner\n# underneath the \"this is a real government website\" banner.\nIS_DEMO_SITE = True\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# Use our user model instead of the default\nAUTH_USER_MODEL = \"registrar.User\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# Configuration for accessing AWS SES\nAWS_ACCESS_KEY_ID = secret_aws_ses_key_id\nAWS_SECRET_ACCESS_KEY = secret_aws_ses_key\nAWS_REGION = \"us-gov-west-1\"\n# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#standard-retry-mode\nAWS_RETRY_MODE: Final = \"standard\"\n# base 2 exponential backoff with max of 20 seconds:\nAWS_MAX_ATTEMPTS = 3\nBOTO_CONFIG = Config(retries={\"mode\": AWS_RETRY_MODE, \"max_attempts\": AWS_MAX_ATTEMPTS})\n\n# email address to use for various automated correspondence\n# TODO: pick something sensible here\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"[email protected]\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Security-Policy configuration\n# this can be restrictive because we have few external scripts\nallowed_sources = (\"'self'\",)\nCSP_DEFAULT_SRC = allowed_sources\n# Most things fall back to default-src, but these two do not and should be\n# explicitly set\nCSP_FRAME_ANCESTORS = allowed_sources\nCSP_FORM_ACTION = allowed_sources\n\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# setting for phonenumber library\nPHONENUMBER_DEFAULT_REGION = \"US\"\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# A Python logging configuration consists of four parts:\n# Loggers\n# Handlers\n# Filters\n# Formatters\n# https://docs.djangoproject.com/en/4.1/topics/logging/\n\n# Log a message by doing this:\n#\n# import logging\n# logger = logging.getLogger(__name__)\n#\n# Then:\n#\n# logger.debug(\"We're about to execute function xyz. Wish us luck!\")\n# logger.info(\"Oh! Here's something you might want to know.\")\n# logger.warning(\"Something kinda bad happened.\")\n# logger.error(\"Can't do this important task. Something is very wrong.\")\n# logger.critical(\"Going to crash now.\")\n\nLOGGING = {\n \"version\": 1,\n # Don't import Django's existing loggers\n \"disable_existing_loggers\": True,\n # define how to convert log messages into text;\n # each handler has its choice of format\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n \"django.server\": {\n \"()\": \"django.utils.log.ServerFormatter\",\n \"format\": \"[{server_time}] {message}\",\n \"style\": \"{\",\n },\n },\n # define where log messages will be sent;\n # each logger can have one or more handlers\n \"handlers\": {\n \"console\": {\n \"level\": env_log_level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n # No file logger is configured,\n # because containerized apps\n # do not log to the file system.\n },\n # define loggers: these are \"sinks\" into which\n # messages are sent for processing\n \"loggers\": {\n # Django's generic logger\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's template processor\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver\n \"django.server\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver requests\n \"django.request\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # OpenID Connect logger\n \"oic\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django wrapper for OpenID Connect\n \"djangooidc\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Our app!\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n # root logger catches anything, unless\n # defined by a more specific logger\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# list of Python classes used when trying to authenticate a user\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"djangooidc.backends.OpenIdConnectBackend\",\n]\n\n# this is where unauthenticated requests are redirected when using\n# the login_required() decorator, LoginRequiredMixin, or AccessMixin\nLOGIN_URL = \"/openid/login\"\n\n# We don't want the OIDC app to be login-required because then it can't handle\n# the initial login requests without erroring.\nLOGIN_REQUIRED_IGNORE_PATHS = [\n r\"/openid/(.+)$\",\n]\n\n# where to go after logging out\nLOGOUT_REDIRECT_URL = \"home\"\n\n# disable dynamic client registration,\n# only the OP inside OIDC_PROVIDERS will be available\nOIDC_ALLOW_DYNAMIC_OP = False\n\n# which provider to use if multiple are available\n# (code does not currently support user selection)\nOIDC_ACTIVE_PROVIDER = \"login.gov\"\n\n\nOIDC_PROVIDERS = {\n \"login.gov\": {\n \"srv_discovery_url\": \"https://idp.int.identitysandbox.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": \"cisa_dotgov_registrar\",\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n }\n}\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# Base URL of our separate static public website. Used by the\n# {% public_site_url subdir/path %} template tag\nGETGOV_PUBLIC_SITE_URL = env_getgov_public_site_url\n\n# endregion\n# region: Registry----------------------------------------------------------###\n\n# SECURITY WARNING: keep all registry variables in production secret!\nSECRET_REGISTRY_CL_ID = secret_registry_cl_id\nSECRET_REGISTRY_PASSWORD = secret_registry_password\nSECRET_REGISTRY_CERT = secret_registry_cert\nSECRET_REGISTRY_KEY = secret_registry_key\nSECRET_REGISTRY_KEY_PASSPHRASE = secret_registry_key_passphrase\nSECRET_REGISTRY_HOSTNAME = secret_registry_hostname\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-stable.app.cloud.gov\",\n \"getgov-gd.app.cloud.gov\",\n \"getgov-rb.app.cloud.gov\",\n \"getgov-ko.app.cloud.gov\",\n \"getgov-ab.app.cloud.gov\",\n \"getgov-bl.app.cloud.gov\",\n \"getgov-rjm.app.cloud.gov\",\n \"get.gov\",\n]\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Allows session cookie to be sent if the user\n# is coming to our site from an external page\n# unless it is via \"risky\" paths, i.e. POST requests\nSESSION_COOKIE_SAMESITE = \"Lax\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop and docker-compose network to connect\n ALLOWED_HOSTS += (\"localhost\", \"app\")\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n # turned off for now, because django-auditlog has some issues\n NPLUSONE_RAISE = False\n NPLUSONE_WHITELIST = [\n {\"model\": \"admin.LogEntry\", \"field\": \"user\"},\n ]\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n", "path": "src/registrar/config/settings.py" } ]
[ { "content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom base64 import b64decode\nfrom cfenv import AppEnv # type: ignore\nfrom pathlib import Path\nfrom typing import Final\n\nfrom botocore.config import Config\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\nif key_service and key_service.credentials:\n secret = key_service.credentials.get\nelse:\n secret = env\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nenv_base_url = env.str(\"DJANGO_BASE_URL\")\nenv_getgov_public_site_url = env.str(\"GETGOV_PUBLIC_SITE_URL\", \"\")\n\nsecret_login_key = b64decode(secret(\"DJANGO_SECRET_LOGIN_KEY\", \"\"))\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\nsecret_aws_ses_key_id = secret(\"AWS_ACCESS_KEY_ID\", None)\nsecret_aws_ses_key = secret(\"AWS_SECRET_ACCESS_KEY\", None)\n\nsecret_registry_cl_id = secret(\"REGISTRY_CL_ID\")\nsecret_registry_password = secret(\"REGISTRY_PASSWORD\")\nsecret_registry_cert = b64decode(secret(\"REGISTRY_CERT\", \"\"))\nsecret_registry_key = b64decode(secret(\"REGISTRY_KEY\", \"\"))\nsecret_registry_key_passphrase = secret(\"REGISTRY_KEY_PASSPHRASE\", \"\")\nsecret_registry_hostname = secret(\"REGISTRY_HOSTNAME\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\n# (settings.py is in `src/registrar/config/`: BASE_DIR is `src/`)\nBASE_DIR = path.resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env_debug\n\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # application used for integrating with Login.gov\n \"djangooidc\",\n # audit logging of changes to models\n \"auditlog\",\n # library to simplify form templating\n \"widget_tweaks\",\n # library for Finite State Machine statuses\n \"django_fsm\",\n # library for phone numbers\n \"phonenumber_field\",\n # let's be sure to install our own application!\n \"registrar\",\n # Our internal API application\n \"api\",\n # Only for generating documentation, uncomment to run manage.py generate_puml\n # \"puml_generator\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # serve static assets in production\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # Require login for every single request by default\n \"login_required.middleware.LoginRequiredMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n # django-auditlog: obtain the request User for use in logging\n \"auditlog.middleware.AuditlogMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"registrar\" / \"public\"\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"registrar\" / \"assets\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"registrar\" / \"templates\"],\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"registrar.context_processors.language_code\",\n \"registrar.context_processors.canonical_path\",\n \"registrar.context_processors.is_demo_site\",\n ],\n },\n },\n]\n\n# Stop using table-based default form renderer which is deprecated\nFORM_RENDERER = \"django.forms.renderers.DjangoDivFormRenderer\"\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\n# IS_DEMO_SITE controls whether or not we show our big red \"TEST SITE\" banner\n# underneath the \"this is a real government website\" banner.\nIS_DEMO_SITE = True\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# Use our user model instead of the default\nAUTH_USER_MODEL = \"registrar.User\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# Configuration for accessing AWS SES\nAWS_ACCESS_KEY_ID = secret_aws_ses_key_id\nAWS_SECRET_ACCESS_KEY = secret_aws_ses_key\nAWS_REGION = \"us-gov-west-1\"\n# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#standard-retry-mode\nAWS_RETRY_MODE: Final = \"standard\"\n# base 2 exponential backoff with max of 20 seconds:\nAWS_MAX_ATTEMPTS = 3\nBOTO_CONFIG = Config(retries={\"mode\": AWS_RETRY_MODE, \"max_attempts\": AWS_MAX_ATTEMPTS})\n\n# email address to use for various automated correspondence\n# TODO: pick something sensible here\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"[email protected]\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Security-Policy configuration\n# this can be restrictive because we have few external scripts\nallowed_sources = (\"'self'\",)\nCSP_DEFAULT_SRC = allowed_sources\n# Most things fall back to default-src, but these two do not and should be\n# explicitly set\nCSP_FRAME_ANCESTORS = allowed_sources\nCSP_FORM_ACTION = allowed_sources\n\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# setting for phonenumber library\nPHONENUMBER_DEFAULT_REGION = \"US\"\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# A Python logging configuration consists of four parts:\n# Loggers\n# Handlers\n# Filters\n# Formatters\n# https://docs.djangoproject.com/en/4.1/topics/logging/\n\n# Log a message by doing this:\n#\n# import logging\n# logger = logging.getLogger(__name__)\n#\n# Then:\n#\n# logger.debug(\"We're about to execute function xyz. Wish us luck!\")\n# logger.info(\"Oh! Here's something you might want to know.\")\n# logger.warning(\"Something kinda bad happened.\")\n# logger.error(\"Can't do this important task. Something is very wrong.\")\n# logger.critical(\"Going to crash now.\")\n\nLOGGING = {\n \"version\": 1,\n # Don't import Django's existing loggers\n \"disable_existing_loggers\": True,\n # define how to convert log messages into text;\n # each handler has its choice of format\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n \"django.server\": {\n \"()\": \"django.utils.log.ServerFormatter\",\n \"format\": \"[{server_time}] {message}\",\n \"style\": \"{\",\n },\n },\n # define where log messages will be sent;\n # each logger can have one or more handlers\n \"handlers\": {\n \"console\": {\n \"level\": env_log_level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n # No file logger is configured,\n # because containerized apps\n # do not log to the file system.\n },\n # define loggers: these are \"sinks\" into which\n # messages are sent for processing\n \"loggers\": {\n # Django's generic logger\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's template processor\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver\n \"django.server\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver requests\n \"django.request\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # OpenID Connect logger\n \"oic\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django wrapper for OpenID Connect\n \"djangooidc\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Our app!\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n # root logger catches anything, unless\n # defined by a more specific logger\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# list of Python classes used when trying to authenticate a user\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"djangooidc.backends.OpenIdConnectBackend\",\n]\n\n# this is where unauthenticated requests are redirected when using\n# the login_required() decorator, LoginRequiredMixin, or AccessMixin\nLOGIN_URL = \"/openid/login\"\n\n# We don't want the OIDC app to be login-required because then it can't handle\n# the initial login requests without erroring.\nLOGIN_REQUIRED_IGNORE_PATHS = [\n r\"/openid/(.+)$\",\n]\n\n# where to go after logging out\nLOGOUT_REDIRECT_URL = \"home\"\n\n# disable dynamic client registration,\n# only the OP inside OIDC_PROVIDERS will be available\nOIDC_ALLOW_DYNAMIC_OP = False\n\n# which provider to use if multiple are available\n# (code does not currently support user selection)\nOIDC_ACTIVE_PROVIDER = \"login.gov\"\n\n\nOIDC_PROVIDERS = {\n \"login.gov\": {\n \"srv_discovery_url\": \"https://idp.int.identitysandbox.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": \"cisa_dotgov_registrar\",\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n }\n}\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# Base URL of our separate static public website. Used by the\n# {% public_site_url subdir/path %} template tag\nGETGOV_PUBLIC_SITE_URL = env_getgov_public_site_url\n\n# endregion\n# region: Registry----------------------------------------------------------###\n\n# SECURITY WARNING: keep all registry variables in production secret!\nSECRET_REGISTRY_CL_ID = secret_registry_cl_id\nSECRET_REGISTRY_PASSWORD = secret_registry_password\nSECRET_REGISTRY_CERT = secret_registry_cert\nSECRET_REGISTRY_KEY = secret_registry_key\nSECRET_REGISTRY_KEY_PASSPHRASE = secret_registry_key_passphrase\nSECRET_REGISTRY_HOSTNAME = secret_registry_hostname\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-stable.app.cloud.gov\",\n \"getgov-staging.app.cloud.gov\",\n \"getgov-gd.app.cloud.gov\",\n \"getgov-rb.app.cloud.gov\",\n \"getgov-ko.app.cloud.gov\",\n \"getgov-ab.app.cloud.gov\",\n \"getgov-bl.app.cloud.gov\",\n \"getgov-rjm.app.cloud.gov\",\n \"get.gov\",\n]\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Allows session cookie to be sent if the user\n# is coming to our site from an external page\n# unless it is via \"risky\" paths, i.e. POST requests\nSESSION_COOKIE_SAMESITE = \"Lax\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop and docker-compose network to connect\n ALLOWED_HOSTS += (\"localhost\", \"app\")\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n # turned off for now, because django-auditlog has some issues\n NPLUSONE_RAISE = False\n NPLUSONE_WHITELIST = [\n {\"model\": \"admin.LogEntry\", \"field\": \"user\"},\n ]\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n", "path": "src/registrar/config/settings.py" } ]
diff --git a/.github/ISSUE_TEMPLATE/developer-onboarding.md b/.github/ISSUE_TEMPLATE/developer-onboarding.md index 74af9ef60..92ae9e3a1 100644 --- a/.github/ISSUE_TEMPLATE/developer-onboarding.md +++ b/.github/ISSUE_TEMPLATE/developer-onboarding.md @@ -83,6 +83,6 @@ export GPG_TTY ## Setting up developer sandbox -We have two types of environments: stable, and sandbox. Stable gets deployed via tagged release every sprint, and developer sandboxes are given to get.gov developers to mess around in a production-like environment without disrupting stable. Each sandbox is namespaced and will automatically be deployed too when the appropriate branch syntax is used for that space in an open pull request. There are several things you need to setup to make the sandbox work for a developer. +We have three types of environments: stable, staging, and sandbox. Stable (production)and staging (pre-prod) get deployed via tagged release, and developer sandboxes are given to get.gov developers to mess around in a production-like environment without disrupting stable or staging. Each sandbox is namespaced and will automatically be deployed too when the appropriate branch syntax is used for that space in an open pull request. There are several things you need to setup to make the sandbox work for a developer. All automation for setting up a developer sandbox is documented in the scripts for [creating a developer sandbox](../../ops/scripts/create_dev_sandbox.sh) and [removing a developer sandbox](../../ops/scripts/destroy_dev_sandbox.sh). A Cloud.gov organization administrator will have to perform the script in order to create the sandbox. diff --git a/.github/workflows/deploy-staging.yaml b/.github/workflows/deploy-staging.yaml new file mode 100644 index 000000000..068751c30 --- /dev/null +++ b/.github/workflows/deploy-staging.yaml @@ -0,0 +1,41 @@ +# This workflow runs on pushes of tagged commits. +# "Releases" of tagged commits will deploy selected branch to staging. + +name: Build and deploy staging for tagged release + +on: + push: + paths-ignore: + - 'docs/**' + - '**.md' + - '.gitignore' + + tags: + - staging-* + +jobs: + deploy-staging: + if: ${{ github.ref_type == 'tag' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Compile USWDS assets + working-directory: ./src + run: | + docker compose run node npm install && + docker compose run node npx gulp copyAssets && + docker compose run node npx gulp compile + - name: Collect static assets + working-directory: ./src + run: docker compose run app python manage.py collectstatic --no-input + - name: Deploy to cloud.gov sandbox + uses: 18f/cg-deploy-action@main + env: + DEPLOY_NOW: thanks + with: + cf_username: ${{ secrets.CF_STAGING_USERNAME }} + cf_password: ${{ secrets.CF_STAGING_PASSWORD }} + cf_org: cisa-getgov-prototyping + cf_space: staging + push_arguments: "-f ops/manifests/manifest-staging.yaml" diff --git a/.github/workflows/migrate.yaml b/.github/workflows/migrate.yaml index 574ec9fa0..28447a605 100644 --- a/.github/workflows/migrate.yaml +++ b/.github/workflows/migrate.yaml @@ -14,6 +14,7 @@ on: description: Which environment should we run migrations for? options: - stable + - staging - gd - rb - ko diff --git a/.github/workflows/reset-db.yaml b/.github/workflows/reset-db.yaml index 4b9a9eafb..0d3ed4934 100644 --- a/.github/workflows/reset-db.yaml +++ b/.github/workflows/reset-db.yaml @@ -15,6 +15,7 @@ on: description: Which environment should we flush and re-load data for? options: - stable + - staging - gd - rb - ko diff --git a/docs/developer/database-access.md b/docs/developer/database-access.md index 9d615c477..859ef2fd6 100644 --- a/docs/developer/database-access.md +++ b/docs/developer/database-access.md @@ -42,10 +42,9 @@ Optionally, load data from fixtures as well cf run-task getgov-ENVIRONMENT --wait --command 'python manage.py load' --name loaddata ``` -For the `stable` environment, developers don't have credentials so we need to -run that command using Github Actions. Go to +For the `stable` or `staging` environments, developers don't have credentials so we need to run that command using Github Actions. Go to <https://github.com/cisagov/getgov/actions/workflows/migrate.yaml> and select -the "Run workflow" button, making sure that `stable` is selected. +the "Run workflow" button, making sure that `stable` or `staging` depending on which envirornment you desire to update. ## Getting data for fixtures diff --git a/docs/operations/README.md b/docs/operations/README.md index e9d67a5af..e4ab64135 100644 --- a/docs/operations/README.md +++ b/docs/operations/README.md @@ -35,7 +35,9 @@ Binding the database in `manifest-<ENVIRONMENT>.json` automatically inserts the # Deploy -We have two types of environments: developer "sandboxes" and `stable`. Developers can deploy locally to their sandbox whenever they want. However, only our CD service can deploy to `stable`, and it does so when we make tagged releases of `main`. This is to ensure that we have a "golden" environment to point to, and can still test things out in a sandbox space. You should make sure all of the USWDS assets are compiled and collected before deploying to your sandbox. To deploy locally to `sandbox`: +We have three types of environments: developer "sandboxes", `staging` and `stable`. Developers can deploy locally to their sandbox whenever they want. However, only our CD service can deploy to `staging` and `stable`, and it does so when we make tagged releases of `main`. For `staging`, this is done to ensure there is a non-production level test envirornment that can be used for user testing or for testing code before it is pushed to `stable`. `Staging` can be especially helpful when testing database changes or migrations that could have adververse affects in `stable`. On the other hand, `stable` is used to ensure that we have a "golden" environment to point to. We can refer to `stable` as our production environment and `staging` as our pre-production (pre-prod) environment. As such, code on main should always be tagged for `staging` before it is tagged for `stable`. + +You should make sure all of the USWDS assets are compiled and collected before deploying to your sandbox. To deploy locally to `sandbox`: For ease of use, you can run the `deploy.sh <sandbox name>` script in the `/src` directory to build the assets and deploy to your sandbox. Similarly, you could run `build.sh <sandbox name>` script to just compile and collect the assets without deploying. diff --git a/ops/manifests/manifest-staging.yaml b/ops/manifests/manifest-staging.yaml new file mode 100644 index 000000000..93c44071c --- /dev/null +++ b/ops/manifests/manifest-staging.yaml @@ -0,0 +1,29 @@ +--- +applications: +- name: getgov-staging + buildpacks: + - python_buildpack + path: ../../src + instances: 1 + memory: 512M + stack: cflinuxfs4 + timeout: 180 + command: ./run.sh + health-check-type: http + health-check-http-endpoint: /health + env: + # Send stdout and stderr straight to the terminal without buffering + PYTHONUNBUFFERED: yup + # Tell Django where to find its configuration + DJANGO_SETTINGS_MODULE: registrar.config.settings + # Tell Django where it is being hosted + DJANGO_BASE_URL: https://getgov-staging.app.cloud.gov + # Tell Django how much stuff to log + DJANGO_LOG_LEVEL: INFO + # default public site location + GETGOV_PUBLIC_SITE_URL: https://beta.get.gov + routes: + - route: getgov-staging.app.cloud.gov + services: + - getgov-credentials + - getgov-staging-database diff --git a/ops/scripts/create_dev_sandbox.sh b/ops/scripts/create_dev_sandbox.sh index df10d8d90..f180ada8d 100755 --- a/ops/scripts/create_dev_sandbox.sh +++ b/ops/scripts/create_dev_sandbox.sh @@ -43,7 +43,7 @@ cp ops/scripts/manifest-sandbox-template.yaml ops/manifests/manifest-$1.yaml sed -i '' "s/ENVIRONMENT/$1/" "ops/manifests/manifest-$1.yaml" echo "Adding new environment to settings.py..." -sed -i '' '/getgov-stable.app.cloud.gov/ {a\ +sed -i '' '/getgov-staging.app.cloud.gov/ {a\ '\"getgov-$1.app.cloud.gov\"', }' src/registrar/config/settings.py @@ -65,7 +65,7 @@ done echo "Creating new cloud.gov credentials for $1..." django_key=$(python3 -c 'from django.core.management.utils import get_random_secret_key; print(get_random_secret_key())') openssl req -nodes -x509 -days 365 -newkey rsa:2048 -keyout private-$1.pem -out public-$1.crt -login_key=$(base64 private-$1.pem) +login_key=$(base64 -i private-$1.pem) jq -n --arg django_key "$django_key" --arg login_key "$login_key" '{"DJANGO_SECRET_KEY":$django_key,"DJANGO_SECRET_LOGIN_KEY":$login_key}' > credentials-$1.json cf cups getgov-credentials -p credentials-$1.json @@ -105,11 +105,11 @@ echo echo "Moving on to setup Github automation..." echo "Adding new environment to Github Actions..." -sed -i '' '/ - stable/ {a\ +sed -i '' '/ - staging/ {a\ - '"$1"' }' .github/workflows/reset-db.yaml -sed -i '' '/ - stable/ {a\ +sed -i '' '/ - staging/ {a\ - '"$1"' }' .github/workflows/migrate.yaml diff --git a/src/registrar/config/settings.py b/src/registrar/config/settings.py index 15f8b45a9..4710b0c65 100644 --- a/src/registrar/config/settings.py +++ b/src/registrar/config/settings.py @@ -564,6 +564,7 @@ # web server configurations. ALLOWED_HOSTS = [ "getgov-stable.app.cloud.gov", + "getgov-staging.app.cloud.gov", "getgov-gd.app.cloud.gov", "getgov-rb.app.cloud.gov", "getgov-ko.app.cloud.gov",
yt-project__yt-2226
get_yt_version not working in Python 3 ### Bug report Running `yt.get_yt_version` shows the following behavior. ``` >>> import yt >>> yt.get_yt_version() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/britton/Documents/work/yt/yt/yt/funcs.py", line 652, in get_yt_version return version[:12].strip().decode('utf-8') AttributeError: 'str' object has no attribute 'decode' ``` This is on the yt dev tip with Python 3.7.1. I can take care of this, but not at the moment. I'm making this issue so I don't forget.
[ { "content": "\"\"\"\nUseful functions. If non-original, see function for citation.\n\n\n\n\"\"\"\nfrom __future__ import print_function\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport errno\nfrom yt.extern.six import string_types\nfrom yt.extern.six.moves import input, builtins\nimport time\nimport inspect\nimport traceback\nimport sys\nimport pdb\nimport os\nimport re\nimport contextlib\nimport warnings\nimport struct\nimport subprocess\nimport numpy as np\nimport itertools\nimport base64\nimport numpy\nimport matplotlib\nimport getpass\nfrom math import floor, ceil\nfrom numbers import Number as numeric_type\n\nfrom yt.extern.six.moves import urllib\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.utilities.lru_cache import lru_cache\nfrom yt.utilities.exceptions import \\\n YTInvalidWidthError, \\\n YTEquivalentDimsError\nfrom yt.extern.tqdm import tqdm\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom functools import wraps\n\n# Some functions for handling sequences and other types\n\ndef iterable(obj):\n \"\"\"\n Grabbed from Python Cookbook / matplotlib.cbook. Returns true/false for\n *obj* iterable.\n \"\"\"\n try: len(obj)\n except: return False\n return True\n\ndef ensure_list(obj):\n \"\"\"\n This function ensures that *obj* is a list. Typically used to convert a\n string to a list, for instance ensuring the *fields* as an argument is a\n list.\n \"\"\"\n if obj is None:\n return [obj]\n if not isinstance(obj, list):\n return [obj]\n return obj\n\ndef ensure_numpy_array(obj):\n \"\"\"\n This function ensures that *obj* is a numpy array. Typically used to\n convert scalar, list or tuple argument passed to functions using Cython.\n \"\"\"\n if isinstance(obj, np.ndarray):\n if obj.shape == ():\n return np.array([obj])\n # We cast to ndarray to catch ndarray subclasses\n return np.array(obj)\n elif isinstance(obj, (list, tuple)):\n return np.asarray(obj)\n else:\n return np.asarray([obj])\n\ndef ensure_tuple(obj):\n \"\"\"\n This function ensures that *obj* is a tuple. Typically used to convert\n scalar, list, or array arguments specified by a user in a context where\n we assume a tuple internally\n \"\"\"\n if isinstance(obj, tuple):\n return obj\n elif isinstance(obj, (list, np.ndarray)):\n return tuple(obj)\n else:\n return (obj,)\n\ndef read_struct(f, fmt):\n \"\"\"\n This reads a struct, and only that struct, from an open file.\n \"\"\"\n s = f.read(struct.calcsize(fmt))\n return struct.unpack(fmt, s)\n\ndef just_one(obj):\n # If we have an iterable, sometimes we only want one item\n if hasattr(obj,'flat'):\n if isinstance(obj, YTArray):\n return YTQuantity(obj.flat[0], obj.units, registry=obj.units.registry)\n return obj.flat[0]\n elif iterable(obj):\n return obj[0]\n return obj\n\n\ndef compare_dicts(dict1, dict2):\n if not set(dict1) <= set(dict2):\n return False\n for key in dict1.keys():\n if dict1[key] is not None and dict2[key] is not None:\n if isinstance(dict1[key], dict):\n if compare_dicts(dict1[key], dict2[key]):\n continue\n else:\n return False\n try:\n comparison = (dict1[key] == dict2[key]).all()\n except AttributeError:\n comparison = (dict1[key] == dict2[key])\n if not comparison:\n return False\n return True\n\n# Taken from\n# http://www.goldb.org/goldblog/2008/02/06/PythonConvertSecsIntoHumanReadableTimeStringHHMMSS.aspx\ndef humanize_time(secs):\n \"\"\"\n Takes *secs* and returns a nicely formatted string\n \"\"\"\n mins, secs = divmod(secs, 60)\n hours, mins = divmod(mins, 60)\n return '%02d:%02d:%02d' % (hours, mins, secs)\n\n#\n# Some function wrappers that come in handy once in a while\n#\n\n# we use the resource module to get the memory page size\n\ntry:\n import resource\nexcept ImportError:\n pass\n\ndef get_memory_usage(subtract_share = False):\n \"\"\"\n Returning resident size in megabytes\n \"\"\"\n pid = os.getpid()\n try:\n pagesize = resource.getpagesize()\n except NameError:\n return -1024\n status_file = \"/proc/%s/statm\" % (pid)\n if not os.path.isfile(status_file):\n return -1024\n line = open(status_file).read()\n size, resident, share, text, library, data, dt = [int(i) for i in line.split()]\n if subtract_share: resident -= share\n return resident * pagesize / (1024 * 1024) # return in megs\n\ndef time_execution(func):\n r\"\"\"\n Decorator for seeing how long a given function takes, depending on whether\n or not the global 'yt.timefunctions' config parameter is set.\n \"\"\"\n @wraps(func)\n def wrapper(*arg, **kw):\n t1 = time.time()\n res = func(*arg, **kw)\n t2 = time.time()\n mylog.debug('%s took %0.3f s', func.__name__, (t2-t1))\n return res\n from yt.config import ytcfg\n if ytcfg.getboolean(\"yt\",\"timefunctions\") is True:\n return wrapper\n else:\n return func\n\ndef print_tb(func):\n \"\"\"\n This function is used as a decorate on a function to have the calling stack\n printed whenever that function is entered.\n\n This can be used like so:\n\n .. code-block:: python\n\n @print_tb\n def some_deeply_nested_function(...):\n\n \"\"\"\n @wraps(func)\n def run_func(*args, **kwargs):\n traceback.print_stack()\n return func(*args, **kwargs)\n return run_func\n\ndef rootonly(func):\n \"\"\"\n This is a decorator that, when used, will only call the function on the\n root processor and then broadcast the results of the function to all other\n processors.\n\n This can be used like so:\n\n .. code-block:: python\n\n @rootonly\n def some_root_only_function(...):\n\n \"\"\"\n from yt.config import ytcfg\n @wraps(func)\n def check_parallel_rank(*args, **kwargs):\n if ytcfg.getint(\"yt\",\"__topcomm_parallel_rank\") > 0:\n return\n return func(*args, **kwargs)\n return check_parallel_rank\n\ndef rootloginfo(*args):\n from yt.config import ytcfg\n if ytcfg.getint(\"yt\", \"__topcomm_parallel_rank\") > 0: return\n mylog.info(*args)\n\ndef deprecate(replacement):\n def real_deprecate(func):\n \"\"\"\n This decorator issues a deprecation warning.\n\n This can be used like so:\n\n .. code-block:: python\n\n @deprecate(\"new_function\")\n def some_really_old_function(...):\n\n \"\"\"\n @wraps(func)\n def run_func(*args, **kwargs):\n message = \"%s has been deprecated and may be removed without notice!\"\n if replacement is not None:\n message += \" Use %s instead.\" % replacement\n warnings.warn(message % func.__name__, DeprecationWarning,\n stacklevel=2)\n func(*args, **kwargs)\n return run_func\n return real_deprecate\n\ndef pdb_run(func):\n \"\"\"\n This decorator inserts a pdb session on top of the call-stack into a\n function.\n\n This can be used like so:\n\n .. code-block:: python\n\n @pdb_run\n def some_function_to_debug(...):\n\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kw):\n pdb.runcall(func, *args, **kw)\n return wrapper\n\n__header = \"\"\"\n== Welcome to the embedded IPython Shell ==\n\n You are currently inside the function:\n %(fname)s\n\n Defined in:\n %(filename)s:%(lineno)s\n\"\"\"\n\ndef insert_ipython(num_up=1):\n \"\"\"\n Placed inside a function, this will insert an IPython interpreter at that\n current location. This will enabled detailed inspection of the current\n execution environment, as well as (optional) modification of that environment.\n *num_up* refers to how many frames of the stack get stripped off, and\n defaults to 1 so that this function itself is stripped off.\n \"\"\"\n import IPython\n from IPython.terminal.embed import InteractiveShellEmbed\n try:\n from traitlets.config.loader import Config\n except ImportError:\n from IPython.config.loader import Config\n\n frame = inspect.stack()[num_up]\n loc = frame[0].f_locals.copy()\n glo = frame[0].f_globals\n dd = dict(fname = frame[3], filename = frame[1],\n lineno = frame[2])\n cfg = Config()\n cfg.InteractiveShellEmbed.local_ns = loc\n cfg.InteractiveShellEmbed.global_ns = glo\n IPython.embed(config=cfg, banner2 = __header % dd)\n ipshell = InteractiveShellEmbed(config=cfg)\n\n del ipshell\n\n\n#\n# Our progress bar types and how to get one\n#\n\nclass TqdmProgressBar(object):\n # This is a drop in replacement for pbar\n # called tqdm\n def __init__(self,title, maxval):\n self._pbar = tqdm(leave=True, total=maxval, desc=title)\n self.i = 0\n def update(self, i=None):\n if i is None:\n i = self.i + 1\n n = i - self.i\n self.i = i\n self._pbar.update(n)\n def finish(self):\n self._pbar.close()\n\nclass DummyProgressBar(object):\n # This progressbar gets handed if we don't\n # want ANY output\n def __init__(self, *args, **kwargs):\n return\n def update(self, *args, **kwargs):\n return\n def finish(self, *args, **kwargs):\n return\n\nclass ParallelProgressBar(object):\n # This is just a simple progress bar\n # that prints on start/stop\n def __init__(self, title, maxval):\n self.title = title\n mylog.info(\"Starting '%s'\", title)\n def update(self, *args, **kwargs):\n return\n def finish(self):\n mylog.info(\"Finishing '%s'\", self.title)\n\nclass GUIProgressBar(object):\n def __init__(self, title, maxval):\n import wx\n self.maxval = maxval\n self.last = 0\n self._pbar = wx.ProgressDialog(\"Working...\",\n title, maximum=maxval,\n style=wx.PD_REMAINING_TIME|wx.PD_ELAPSED_TIME|wx.PD_APP_MODAL)\n def update(self, val):\n # An update is only meaningful if it's on the order of 1/100 or greater\n if ceil(100*self.last / self.maxval) + 1 == \\\n floor(100*val / self.maxval) or val == self.maxval:\n self._pbar.Update(val)\n self.last = val\n def finish(self):\n self._pbar.Destroy()\n\ndef get_pbar(title, maxval, parallel=False):\n \"\"\"\n This returns a progressbar of the most appropriate type, given a *title*\n and a *maxval*.\n \"\"\"\n maxval = max(maxval, 1)\n from yt.config import ytcfg\n if ytcfg.getboolean(\"yt\", \"suppressStreamLogging\") or \\\n ytcfg.getboolean(\"yt\", \"__withintesting\") or \\\n maxval == 1: \\\n return DummyProgressBar()\n elif ytcfg.getboolean(\"yt\", \"__parallel\"):\n # If parallel is True, update progress on root only.\n if parallel:\n if is_root():\n return TqdmProgressBar(title, maxval)\n else:\n return DummyProgressBar()\n else:\n return ParallelProgressBar(title, maxval)\n pbar = TqdmProgressBar(title,maxval)\n return pbar\n\ndef only_on_root(func, *args, **kwargs):\n \"\"\"\n This function accepts a *func*, a set of *args* and *kwargs* and then only\n on the root processor calls the function. All other processors get \"None\"\n handed back.\n \"\"\"\n from yt.config import ytcfg\n if kwargs.pop(\"global_rootonly\", False):\n cfg_option = \"__global_parallel_rank\"\n else:\n cfg_option = \"__topcomm_parallel_rank\"\n if not ytcfg.getboolean(\"yt\",\"__parallel\"):\n return func(*args,**kwargs)\n if ytcfg.getint(\"yt\", cfg_option) > 0: return\n return func(*args, **kwargs)\n\ndef is_root():\n \"\"\"\n This function returns True if it is on the root processor of the\n topcomm and False otherwise.\n \"\"\"\n from yt.config import ytcfg\n cfg_option = \"__topcomm_parallel_rank\"\n if not ytcfg.getboolean(\"yt\",\"__parallel\"):\n return True\n if ytcfg.getint(\"yt\", cfg_option) > 0:\n return False\n return True\n\n\n#\n# Our signal and traceback handling functions\n#\n\ndef signal_print_traceback(signo, frame):\n print(traceback.print_stack(frame))\n\ndef signal_problem(signo, frame):\n raise RuntimeError()\n\ndef signal_ipython(signo, frame):\n insert_ipython(2)\n\ndef paste_traceback(exc_type, exc, tb):\n \"\"\"\n This is a traceback handler that knows how to paste to the pastebin.\n Should only be used in sys.excepthook.\n \"\"\"\n sys.__excepthook__(exc_type, exc, tb)\n from yt.extern.six.moves import StringIO, xmlrpc_client\n p = xmlrpc_client.ServerProxy(\n \"http://paste.yt-project.org/xmlrpc/\",\n allow_none=True)\n s = StringIO()\n traceback.print_exception(exc_type, exc, tb, file=s)\n s = s.getvalue()\n ret = p.pastes.newPaste('pytb', s, None, '', '', True)\n print()\n print(\"Traceback pasted to http://paste.yt-project.org/show/%s\" % (ret))\n print()\n\ndef paste_traceback_detailed(exc_type, exc, tb):\n \"\"\"\n This is a traceback handler that knows how to paste to the pastebin.\n Should only be used in sys.excepthook.\n \"\"\"\n import cgitb\n from yt.extern.six.moves import StringIO, xmlrpc_client\n s = StringIO()\n handler = cgitb.Hook(format=\"text\", file = s)\n handler(exc_type, exc, tb)\n s = s.getvalue()\n print(s)\n p = xmlrpc_client.ServerProxy(\n \"http://paste.yt-project.org/xmlrpc/\",\n allow_none=True)\n ret = p.pastes.newPaste('text', s, None, '', '', True)\n print()\n print(\"Traceback pasted to http://paste.yt-project.org/show/%s\" % (ret))\n print()\n\n_ss = \"fURbBUUBE0cLXgETJnZgJRMXVhVGUQpQAUBuehQMUhJWRFFRAV1ERAtBXw1dAxMLXT4zXBFfABNN\\nC0ZEXw1YUURHCxMXVlFERwxWCQw=\\n\"\ndef _rdbeta(key):\n enc_s = base64.decodestring(_ss)\n dec_s = ''.join([ chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)) ])\n print(dec_s)\n\n#\n# Some exceptions\n#\n\nclass NoCUDAException(Exception):\n pass\n\nclass YTEmptyClass(object):\n pass\n\ndef update_hg_or_git(path):\n if os.path.exists(os.sep.join([path, '.hg'])):\n update_hg(path)\n elif os.path.exists(os.sep.join([path, '.git'])):\n update_git(path)\n\ndef update_git(path):\n try:\n import git\n except ImportError:\n print(\"Updating and precise version information requires \")\n print(\"gitpython to be installed.\")\n print(\"Try: pip install gitpython\")\n return -1\n with open(os.path.join(path, \"yt_updater.log\"), \"a\") as f:\n repo = git.Repo(path)\n if repo.is_dirty(untracked_files=True):\n print(\"Changes have been made to the yt source code so I won't \")\n print(\"update the code. You will have to do this yourself.\")\n print(\"Here's a set of sample commands:\")\n print(\"\")\n print(\" $ cd %s\" % (path))\n print(\" $ git stash\")\n print(\" $ git checkout master\")\n print(\" $ git pull\")\n print(\" $ git stash pop\")\n print(\" $ %s setup.py develop\" % (sys.executable))\n print(\"\")\n return 1\n if repo.active_branch.name != 'master':\n print(\"yt repository is not tracking the master branch so I won't \")\n print(\"update the code. You will have to do this yourself.\")\n print(\"Here's a set of sample commands:\")\n print(\"\")\n print(\" $ cd %s\" % (path))\n print(\" $ git checkout master\")\n print(\" $ git pull\")\n print(\" $ %s setup.py develop\" % (sys.executable))\n print(\"\")\n return 1\n print(\"Updating the repository\")\n f.write(\"Updating the repository\\n\\n\")\n old_version = repo.git.rev_parse('HEAD', short=12)\n try:\n remote = repo.remotes.yt_upstream\n except AttributeError:\n remote = repo.create_remote(\n 'yt_upstream', url='https://github.com/yt-project/yt')\n remote.fetch()\n master = repo.heads.master\n master.set_tracking_branch(remote.refs.master)\n master.checkout()\n remote.pull()\n new_version = repo.git.rev_parse('HEAD', short=12)\n f.write('Updated from %s to %s\\n\\n' % (old_version, new_version))\n rebuild_modules(path, f)\n print('Updated successfully')\n\ndef update_hg(path):\n try:\n import hglib\n except ImportError:\n print(\"Updating requires python-hglib to be installed.\")\n print(\"Try: pip install python-hglib\")\n return -1\n f = open(os.path.join(path, \"yt_updater.log\"), \"a\")\n with hglib.open(path) as repo:\n repo.pull(b'https://bitbucket.org/yt_analysis/yt')\n ident = repo.identify().decode(\"utf-8\")\n if \"+\" in ident:\n print(\"Changes have been made to the yt source code so I won't \")\n print(\"update the code. You will have to do this yourself.\")\n print(\"Here's a set of sample commands:\")\n print(\"\")\n print(\" $ cd %s\" % (path))\n print(\" $ hg up -C yt # This will delete any unsaved changes\")\n print(\" $ %s setup.py develop\" % (sys.executable))\n print(\"\")\n return 1\n print(\"Updating the repository\")\n f.write(\"Updating the repository\\n\\n\")\n books = repo.bookmarks()[0]\n books = [b[0].decode('utf8') for b in books]\n if 'master' in books:\n repo.update('master', check=True)\n else:\n repo.update('yt', check=True)\n f.write(\"Updated from %s to %s\\n\\n\" % (ident, repo.identify()))\n rebuild_modules(path, f)\n print(\"Updated successfully.\")\n\ndef rebuild_modules(path, f):\n f.write(\"Rebuilding modules\\n\\n\")\n p = subprocess.Popen([sys.executable, \"setup.py\", \"build_ext\", \"-i\"],\n cwd=path, stdout = subprocess.PIPE,\n stderr = subprocess.STDOUT)\n stdout, stderr = p.communicate()\n f.write(stdout.decode('utf-8'))\n f.write(\"\\n\\n\")\n if p.returncode:\n print(\"BROKEN: See %s\" % (os.path.join(path, \"yt_updater.log\")))\n sys.exit(1)\n f.write(\"Successful!\\n\")\n\n\ndef get_hg_or_git_version(path):\n if os.path.exists(os.sep.join([path, '.hg'])):\n return get_hg_version(path)\n elif os.path.exists(os.sep.join([path, '.git'])):\n return get_git_version(path)\n return None\n\ndef get_git_version(path):\n try:\n import git\n except ImportError:\n print(\"Updating and precise version information requires \")\n print(\"gitpython to be installed.\")\n print(\"Try: pip install gitpython\")\n return None\n try:\n repo = git.Repo(path)\n return repo.git.rev_parse('HEAD', short=12)\n except git.InvalidGitRepositoryError:\n # path is not a git repository\n return None\n\ndef get_hg_version(path):\n try:\n import hglib\n except ImportError:\n print(\"Updating and precise version information requires \")\n print(\"python-hglib to be installed.\")\n print(\"Try: pip install python-hglib\")\n return None\n try:\n with hglib.open(path) as repo:\n return repo.identify().decode('utf-8')\n except hglib.error.ServerError:\n # path is not an hg repository\n return None\n\ndef get_yt_version():\n try:\n from yt.__hg_version__ import hg_version\n return hg_version\n except ImportError:\n pass\n import pkg_resources\n yt_provider = pkg_resources.get_provider(\"yt\")\n path = os.path.dirname(yt_provider.module_path)\n version = get_git_version(path)\n if version is None:\n return version\n else:\n return version[:12].strip().decode('utf-8')\n\ndef get_version_stack():\n version_info = {}\n version_info['yt'] = get_yt_version()\n version_info['numpy'] = numpy.version.version\n version_info['matplotlib'] = matplotlib.__version__\n return version_info\n\ndef get_script_contents():\n top_frame = inspect.stack()[-1]\n finfo = inspect.getframeinfo(top_frame[0])\n if finfo[2] != \"<module>\": return None\n if not os.path.exists(finfo[0]): return None\n try:\n contents = open(finfo[0]).read()\n except:\n contents = None\n return contents\n\ndef download_file(url, filename):\n requests = get_requests()\n if requests is None:\n return simple_download_file(url, filename)\n else:\n return fancy_download_file(url, filename, requests)\n\ndef fancy_download_file(url, filename, requests=None):\n response = requests.get(url, stream=True)\n total_length = response.headers.get('content-length')\n\n with open(filename, 'wb') as fh:\n if total_length is None:\n fh.write(response.content)\n else:\n blocksize = 4 * 1024 ** 2\n iterations = int(float(total_length)/float(blocksize))\n\n pbar = get_pbar(\n 'Downloading %s to %s ' % os.path.split(filename)[::-1],\n iterations)\n iteration = 0\n for chunk in response.iter_content(chunk_size=blocksize):\n fh.write(chunk)\n iteration += 1\n pbar.update(iteration)\n pbar.finish()\n return filename\n\ndef simple_download_file(url, filename):\n class MyURLopener(urllib.request.FancyURLopener):\n def http_error_default(self, url, fp, errcode, errmsg, headers):\n raise RuntimeError(\"Attempt to download file from %s failed with error %s: %s.\" % \\\n (url, errcode, errmsg))\n fn, h = MyURLopener().retrieve(url, filename)\n return fn\n\n# This code snippet is modified from Georg Brandl\ndef bb_apicall(endpoint, data, use_pass = True):\n uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint\n # since bitbucket doesn't return the required WWW-Authenticate header when\n # making a request without Authorization, we cannot use the standard urllib2\n # auth handlers; we have to add the requisite header from the start\n if data is not None:\n data = urllib.parse.urlencode(data)\n req = urllib.request.Request(uri, data)\n if use_pass:\n username = input(\"Bitbucket Username? \")\n password = getpass.getpass()\n upw = '%s:%s' % (username, password)\n req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())\n return urllib.request.urlopen(req).read()\n\ndef get_yt_supp():\n import hglib\n supp_path = os.path.join(os.environ[\"YT_DEST\"], \"src\",\n \"yt-supplemental\")\n # Now we check that the supplemental repository is checked out.\n if not os.path.isdir(supp_path):\n print()\n print(\"*** The yt-supplemental repository is not checked ***\")\n print(\"*** out. I can do this for you, but because this ***\")\n print(\"*** is a delicate act, I require you to respond ***\")\n print(\"*** to the prompt with the word 'yes'. ***\")\n print()\n response = input(\"Do you want me to try to check it out? \")\n if response != \"yes\":\n print()\n print(\"Okay, I understand. You can check it out yourself.\")\n print(\"This command will do it:\")\n print()\n print(\"$ hg clone http://bitbucket.org/yt_analysis/yt-supplemental/ \", end=' ')\n print(\"%s\" % (supp_path))\n print()\n sys.exit(1)\n rv = hglib.clone(\"http://bitbucket.org/yt_analysis/yt-supplemental/\", \n supp_path)\n if rv:\n print(\"Something has gone wrong. Quitting.\")\n sys.exit(1)\n # Now we think we have our supplemental repository.\n return supp_path\n\ndef fix_length(length, ds):\n registry = ds.unit_registry\n if isinstance(length, YTArray):\n if registry is not None:\n length.units.registry = registry\n return length.in_units(\"code_length\")\n if isinstance(length, numeric_type):\n return YTArray(length, 'code_length', registry=registry)\n length_valid_tuple = isinstance(length, (list, tuple)) and len(length) == 2\n unit_is_string = isinstance(length[1], string_types)\n length_is_number = (isinstance(length[0], numeric_type) and not\n isinstance(length[0], YTArray))\n if length_valid_tuple and unit_is_string and length_is_number:\n return YTArray(*length, registry=registry)\n else:\n raise RuntimeError(\"Length %s is invalid\" % str(length))\n\[email protected]\ndef parallel_profile(prefix):\n r\"\"\"A context manager for profiling parallel code execution using cProfile\n\n This is a simple context manager that automatically profiles the execution\n of a snippet of code.\n\n Parameters\n ----------\n prefix : string\n A string name to prefix outputs with.\n\n Examples\n --------\n\n >>> with parallel_profile('my_profile'):\n ... yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')\n \"\"\"\n import cProfile\n from yt.config import ytcfg\n fn = \"%s_%04i_%04i.cprof\" % (prefix,\n ytcfg.getint(\"yt\", \"__topcomm_parallel_size\"),\n ytcfg.getint(\"yt\", \"__topcomm_parallel_rank\"))\n p = cProfile.Profile()\n p.enable()\n yield fn\n p.disable()\n p.dump_stats(fn)\n\ndef get_num_threads():\n from .config import ytcfg\n nt = ytcfg.getint(\"yt\",\"numthreads\")\n if nt < 0:\n return os.environ.get(\"OMP_NUM_THREADS\", 0)\n return nt\n\ndef fix_axis(axis, ds):\n return ds.coordinates.axis_id.get(axis, axis)\n\ndef get_image_suffix(name):\n suffix = os.path.splitext(name)[1]\n return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''\n\ndef get_output_filename(name, keyword, suffix):\n r\"\"\"Return an appropriate filename for output.\n\n With a name provided by the user, this will decide how to \n appropriately name the output file by the following rules:\n\n 1. if name is None, the filename will be the keyword plus \n the suffix.\n 2. if name ends with \"/\", assume name is a directory and \n the file will be named name/(keyword+suffix). If the\n directory does not exist, first try to create it and\n raise an exception if an error occurs.\n 3. if name does not end in the suffix, add the suffix.\n \n Parameters\n ----------\n name : str\n A filename given by the user.\n keyword : str\n A default filename prefix if name is None.\n suffix : str\n Suffix that must appear at end of the filename.\n This will be added if not present.\n\n Examples\n --------\n\n >>> print get_output_filename(None, \"Projection_x\", \".png\")\n Projection_x.png\n >>> print get_output_filename(\"my_file\", \"Projection_x\", \".png\")\n my_file.png\n >>> print get_output_filename(\"my_file/\", \"Projection_x\", \".png\")\n my_file/Projection_x.png\n \n \"\"\"\n if name is None:\n name = keyword\n name = os.path.expanduser(name)\n if name[-1] == os.sep and not os.path.isdir(name):\n ensure_dir(name)\n if os.path.isdir(name):\n name = os.path.join(name, keyword)\n if not name.endswith(suffix):\n name += suffix\n return name\n\ndef ensure_dir_exists(path):\n r\"\"\"Create all directories in path recursively in a parallel safe manner\"\"\"\n my_dir = os.path.dirname(path)\n # If path is a file in the current directory, like \"test.txt\", then my_dir\n # would be an empty string, resulting in FileNotFoundError when passed to\n # ensure_dir. Let's avoid that.\n if my_dir:\n ensure_dir(my_dir)\n\ndef ensure_dir(path):\n r\"\"\"Parallel safe directory maker.\"\"\"\n if os.path.exists(path):\n return path\n\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n return path\n\ndef validate_width_tuple(width):\n if not iterable(width) or len(width) != 2:\n raise YTInvalidWidthError(\n \"width (%s) is not a two element tuple\" % width)\n is_numeric = isinstance(width[0], numeric_type)\n length_has_units = isinstance(width[0], YTArray)\n unit_is_string = isinstance(width[1], string_types)\n if not is_numeric or length_has_units and unit_is_string:\n msg = \"width (%s) is invalid. \" % str(width)\n msg += \"Valid widths look like this: (12, 'au')\"\n raise YTInvalidWidthError(msg)\n\n_first_cap_re = re.compile('(.)([A-Z][a-z]+)')\n_all_cap_re = re.compile('([a-z0-9])([A-Z])')\n\n@lru_cache(maxsize=128, typed=False)\ndef camelcase_to_underscore(name):\n s1 = _first_cap_re.sub(r'\\1_\\2', name)\n return _all_cap_re.sub(r'\\1_\\2', s1).lower()\n\ndef set_intersection(some_list):\n if len(some_list) == 0: return set([])\n # This accepts a list of iterables, which we get the intersection of.\n s = set(some_list[0])\n for l in some_list[1:]:\n s.intersection_update(l)\n return s\n\[email protected]\ndef memory_checker(interval = 15, dest = None):\n r\"\"\"This is a context manager that monitors memory usage.\n\n Parameters\n ----------\n interval : int\n The number of seconds between printing the current memory usage in\n gigabytes of the current Python interpreter.\n\n Examples\n --------\n\n >>> with memory_checker(10):\n ... arr = np.zeros(1024*1024*1024, dtype=\"float64\")\n ... time.sleep(15)\n ... del arr\n \"\"\"\n import threading\n if dest is None:\n dest = sys.stdout\n class MemoryChecker(threading.Thread):\n def __init__(self, event, interval):\n self.event = event\n self.interval = interval\n threading.Thread.__init__(self)\n\n def run(self):\n while not self.event.wait(self.interval):\n print(\"MEMORY: %0.3e gb\" % (get_memory_usage()/1024.), file=dest)\n\n e = threading.Event()\n mem_check = MemoryChecker(e, interval)\n mem_check.start()\n try:\n yield\n finally:\n e.set()\n\n\ndef deprecated_class(cls):\n @wraps(cls)\n def _func(*args, **kwargs):\n # Note we use SyntaxWarning because by default, DeprecationWarning is\n # not shown.\n warnings.warn(\n \"This usage is deprecated. Please use %s instead.\" % cls.__name__,\n SyntaxWarning, stacklevel=2)\n return cls(*args, **kwargs)\n return _func\n\ndef enable_plugins():\n \"\"\"Forces the plugins file to be parsed.\n\n This plugin file is a means of creating custom fields, quantities,\n data objects, colormaps, and other code classes and objects to be used\n in yt scripts without modifying the yt source directly.\n\n The file must be located at ``$HOME/.config/yt/my_plugins.py``.\n\n Warning: when you use this function, your script will only be reproducible\n if you also provide the ``my_plugins.py`` file.\n \"\"\"\n import yt\n from yt.fields.my_plugin_fields import my_plugins_fields\n from yt.config import ytcfg, CONFIG_DIR\n my_plugin_name = ytcfg.get(\"yt\", \"pluginfilename\")\n\n # In the following order if pluginfilename is: an absolute path, located in\n # the CONFIG_DIR, located in an obsolete config dir.\n _fn = None\n old_config_dir = os.path.join(os.path.expanduser('~'), '.yt')\n for base_prefix in ('', CONFIG_DIR, old_config_dir):\n if os.path.isfile(os.path.join(base_prefix, my_plugin_name)):\n _fn = os.path.join(base_prefix, my_plugin_name)\n break\n\n if _fn is not None and os.path.isfile(_fn):\n if _fn.startswith(old_config_dir):\n mylog.warn(\n 'Your plugin file is located in a deprecated directory. '\n 'Please move it from %s to %s',\n os.path.join(old_config_dir, my_plugin_name),\n os.path.join(CONFIG_DIR, my_plugin_name))\n mylog.info(\"Loading plugins from %s\", _fn)\n ytdict = yt.__dict__\n execdict = ytdict.copy()\n execdict['add_field'] = my_plugins_fields.add_field\n with open(_fn) as f:\n code = compile(f.read(), _fn, 'exec')\n exec(code, execdict, execdict)\n ytnamespace = list(ytdict.keys())\n for k in execdict.keys():\n if k not in ytnamespace:\n if callable(execdict[k]):\n setattr(yt, k, execdict[k])\n\ndef fix_unitary(u):\n if u == '1':\n return 'unitary'\n else:\n return u\n\ndef get_hash(infile, algorithm='md5', BLOCKSIZE=65536):\n \"\"\"Generate file hash without reading in the entire file at once.\n\n Original code licensed under MIT. Source:\n http://pythoncentral.io/hashing-files-with-python/\n\n Parameters\n ----------\n infile : str\n File of interest (including the path).\n algorithm : str (optional)\n Hash algorithm of choice. Defaults to 'md5'.\n BLOCKSIZE : int (optional)\n How much data in bytes to read in at once.\n\n Returns\n -------\n hash : str\n The hash of the file.\n\n Examples\n --------\n >>> import yt.funcs as funcs\n >>> funcs.get_hash('/path/to/test.png')\n 'd38da04859093d430fa4084fd605de60'\n\n \"\"\"\n import hashlib\n\n try:\n hasher = getattr(hashlib, algorithm)()\n except:\n raise NotImplementedError(\"'%s' not available! Available algorithms: %s\" %\n (algorithm, hashlib.algorithms))\n\n filesize = os.path.getsize(infile)\n iterations = int(float(filesize)/float(BLOCKSIZE))\n\n pbar = get_pbar('Generating %s hash' % algorithm, iterations)\n\n iter = 0\n with open(infile,'rb') as f:\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(BLOCKSIZE)\n iter += 1\n pbar.update(iter)\n pbar.finish()\n\n return hasher.hexdigest()\n\ndef get_brewer_cmap(cmap):\n \"\"\"Returns a colorbrewer colormap from palettable\"\"\"\n try:\n import brewer2mpl\n except ImportError:\n brewer2mpl = None\n try:\n import palettable\n except ImportError:\n palettable = None\n if palettable is not None:\n bmap = palettable.colorbrewer.get_map(*cmap)\n elif brewer2mpl is not None:\n warnings.warn(\"Using brewer2mpl colormaps is deprecated. \"\n \"Please install the successor to brewer2mpl, \"\n \"palettable, with `pip install palettable`. \"\n \"Colormap tuple names remain unchanged.\")\n bmap = brewer2mpl.get_map(*cmap)\n else:\n raise RuntimeError(\n \"Please install palettable to use colorbrewer colormaps\")\n return bmap.get_mpl_colormap(N=cmap[2])\n\ndef get_requests():\n try:\n import requests\n except ImportError:\n requests = None\n return requests\n\[email protected]\ndef dummy_context_manager(*args, **kwargs):\n yield\n\ndef matplotlib_style_context(style_name=None, after_reset=False):\n \"\"\"Returns a context manager for controlling matplotlib style.\n\n Arguments are passed to matplotlib.style.context() if specified. Defaults\n to setting \"classic\" style, after resetting to the default config parameters.\n\n On older matplotlib versions (<=1.5.0) where matplotlib.style isn't\n available, returns a dummy context manager.\n \"\"\"\n if style_name is None:\n style_name = {\n 'mathtext.fontset': 'cm',\n 'mathtext.fallback_to_cm': True,\n }\n try:\n import matplotlib.style\n return matplotlib.style.context(style_name, after_reset=after_reset)\n except ImportError:\n pass\n return dummy_context_manager()\n\ninteractivity = False\n\n\"\"\"Sets the condition that interactive backends can be used.\"\"\"\ndef toggle_interactivity():\n global interactivity\n interactivity = not interactivity\n if interactivity is True:\n if '__IPYTHON__' in dir(builtins):\n import IPython\n shell = IPython.get_ipython()\n shell.magic('matplotlib')\n else:\n import matplotlib\n matplotlib.interactive(True)\n\ndef get_interactivity():\n return interactivity\n\ndef setdefaultattr(obj, name, value):\n \"\"\"Set attribute with *name* on *obj* with *value* if it doesn't exist yet\n\n Analogous to dict.setdefault\n \"\"\"\n if not hasattr(obj, name):\n setattr(obj, name, value)\n return getattr(obj, name)\n\ndef parse_h5_attr(f, attr):\n \"\"\"A Python3-safe function for getting hdf5 attributes.\n\n If an attribute is supposed to be a string, this will return it as such.\n \"\"\"\n val = f.attrs.get(attr, None)\n if isinstance(val, bytes):\n return val.decode('utf8')\n else:\n return val\n\ndef issue_deprecation_warning(msg, stacklevel=3):\n from numpy import VisibleDeprecationWarning\n warnings.warn(msg, VisibleDeprecationWarning, stacklevel=stacklevel)\n\ndef obj_length(v):\n if iterable(v):\n return len(v)\n else:\n # If something isn't iterable, we return 0 \n # to signify zero length (aka a scalar).\n return 0\n\ndef handle_mks_cgs(values, field_units):\n try:\n values = values.to(field_units)\n except YTEquivalentDimsError as e:\n values = values.to_equivalent(e.new_units, e.base)\n return values\n\ndef validate_3d_array(obj):\n if not iterable(obj) or len(obj) != 3:\n raise TypeError(\"Expected an array of size (3,), received '%s' of \"\n \"length %s\" % (str(type(obj)).split(\"'\")[1], len(obj)))\n\ndef validate_float(obj):\n \"\"\"Validates if the passed argument is a float value.\n\n Raises an exception if `obj` is a single float value\n or a YTQunatity of size 1.\n\n Parameters\n ----------\n obj : Any\n Any argument which needs to be checked for a single float value.\n\n Raises\n ------\n TypeError\n Raised if `obj` is not a single float value or YTQunatity\n\n Examples\n --------\n >>> validate_float(1)\n >>> validate_float(1.50)\n >>> validate_float(YTQuantity(1,\"cm\"))\n >>> validate_float((1,\"cm\"))\n >>> validate_float([1, 1, 1])\n Traceback (most recent call last):\n ...\n TypeError: Expected a numeric value (or size-1 array), received 'list' of length 3\n\n >>> validate_float([YTQuantity(1, \"cm\"), YTQuantity(2,\"cm\")])\n Traceback (most recent call last):\n ...\n TypeError: Expected a numeric value (or size-1 array), received 'list' of length 2\n \"\"\"\n if isinstance(obj, tuple):\n if len(obj) != 2 or not isinstance(obj[0], numeric_type)\\\n or not isinstance(obj[1], string_types):\n raise TypeError(\"Expected a numeric value (or tuple of format \"\n \"(float, String)), received an inconsistent tuple \"\n \"'%s'.\" % str(obj))\n else:\n return\n if iterable(obj) and (len(obj) != 1 or not isinstance(obj[0], numeric_type)):\n raise TypeError(\"Expected a numeric value (or size-1 array), \"\n \"received '%s' of length %s\"\n % (str(type(obj)).split(\"'\")[1], len(obj)))\n\n\ndef validate_iterable(obj):\n if obj is not None and not iterable(obj):\n raise TypeError(\"Expected an iterable object,\"\n \" received '%s'\" % str(type(obj)).split(\"'\")[1])\n\ndef validate_object(obj, data_type):\n if obj is not None and not isinstance(obj, data_type):\n raise TypeError(\"Expected an object of '%s' type, received '%s'\"\n % (str(data_type).split(\"'\")[1],\n str(type(obj)).split(\"'\")[1]))\n\ndef validate_axis(ds, axis):\n if ds is not None:\n valid_axis = ds.coordinates.axis_name.keys()\n else:\n valid_axis = [0, 1, 2, 'x', 'y', 'z', 'X', 'Y', 'Z']\n if axis not in valid_axis:\n raise TypeError(\"Expected axis of int or char type (can be %s), \"\n \"received '%s'.\" % (list(valid_axis), axis))\n\ndef validate_center(center):\n if isinstance(center, string_types):\n c = center.lower()\n if c not in [\"c\", \"center\", \"m\", \"max\", \"min\"] \\\n and not c.startswith(\"max_\") and not c.startswith(\"min_\"):\n raise TypeError(\"Expected 'center' to be in ['c', 'center', \"\n \"'m', 'max', 'min'] or the prefix to be \"\n \"'max_'/'min_', received '%s'.\" % center)\n elif not isinstance(center, (numeric_type, YTQuantity)) \\\n and not iterable(center):\n raise TypeError(\"Expected 'center' to be a numeric object of type \"\n \"list/tuple/np.ndarray/YTArray/YTQuantity, \"\n \"received '%s'.\" % str(type(center)).split(\"'\")[1])\n", "path": "yt/funcs.py" } ]
[ { "content": "\"\"\"\nUseful functions. If non-original, see function for citation.\n\n\n\n\"\"\"\nfrom __future__ import print_function\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport errno\nfrom yt.extern.six import string_types\nfrom yt.extern.six.moves import input, builtins\nimport time\nimport inspect\nimport traceback\nimport sys\nimport pdb\nimport os\nimport re\nimport contextlib\nimport warnings\nimport struct\nimport subprocess\nimport numpy as np\nimport itertools\nimport base64\nimport numpy\nimport matplotlib\nimport getpass\nfrom math import floor, ceil\nfrom numbers import Number as numeric_type\n\nfrom yt.extern.six.moves import urllib\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.utilities.lru_cache import lru_cache\nfrom yt.utilities.exceptions import \\\n YTInvalidWidthError, \\\n YTEquivalentDimsError\nfrom yt.extern.tqdm import tqdm\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom functools import wraps\n\n# Some functions for handling sequences and other types\n\ndef iterable(obj):\n \"\"\"\n Grabbed from Python Cookbook / matplotlib.cbook. Returns true/false for\n *obj* iterable.\n \"\"\"\n try: len(obj)\n except: return False\n return True\n\ndef ensure_list(obj):\n \"\"\"\n This function ensures that *obj* is a list. Typically used to convert a\n string to a list, for instance ensuring the *fields* as an argument is a\n list.\n \"\"\"\n if obj is None:\n return [obj]\n if not isinstance(obj, list):\n return [obj]\n return obj\n\ndef ensure_numpy_array(obj):\n \"\"\"\n This function ensures that *obj* is a numpy array. Typically used to\n convert scalar, list or tuple argument passed to functions using Cython.\n \"\"\"\n if isinstance(obj, np.ndarray):\n if obj.shape == ():\n return np.array([obj])\n # We cast to ndarray to catch ndarray subclasses\n return np.array(obj)\n elif isinstance(obj, (list, tuple)):\n return np.asarray(obj)\n else:\n return np.asarray([obj])\n\ndef ensure_tuple(obj):\n \"\"\"\n This function ensures that *obj* is a tuple. Typically used to convert\n scalar, list, or array arguments specified by a user in a context where\n we assume a tuple internally\n \"\"\"\n if isinstance(obj, tuple):\n return obj\n elif isinstance(obj, (list, np.ndarray)):\n return tuple(obj)\n else:\n return (obj,)\n\ndef read_struct(f, fmt):\n \"\"\"\n This reads a struct, and only that struct, from an open file.\n \"\"\"\n s = f.read(struct.calcsize(fmt))\n return struct.unpack(fmt, s)\n\ndef just_one(obj):\n # If we have an iterable, sometimes we only want one item\n if hasattr(obj,'flat'):\n if isinstance(obj, YTArray):\n return YTQuantity(obj.flat[0], obj.units, registry=obj.units.registry)\n return obj.flat[0]\n elif iterable(obj):\n return obj[0]\n return obj\n\n\ndef compare_dicts(dict1, dict2):\n if not set(dict1) <= set(dict2):\n return False\n for key in dict1.keys():\n if dict1[key] is not None and dict2[key] is not None:\n if isinstance(dict1[key], dict):\n if compare_dicts(dict1[key], dict2[key]):\n continue\n else:\n return False\n try:\n comparison = (dict1[key] == dict2[key]).all()\n except AttributeError:\n comparison = (dict1[key] == dict2[key])\n if not comparison:\n return False\n return True\n\n# Taken from\n# http://www.goldb.org/goldblog/2008/02/06/PythonConvertSecsIntoHumanReadableTimeStringHHMMSS.aspx\ndef humanize_time(secs):\n \"\"\"\n Takes *secs* and returns a nicely formatted string\n \"\"\"\n mins, secs = divmod(secs, 60)\n hours, mins = divmod(mins, 60)\n return '%02d:%02d:%02d' % (hours, mins, secs)\n\n#\n# Some function wrappers that come in handy once in a while\n#\n\n# we use the resource module to get the memory page size\n\ntry:\n import resource\nexcept ImportError:\n pass\n\ndef get_memory_usage(subtract_share = False):\n \"\"\"\n Returning resident size in megabytes\n \"\"\"\n pid = os.getpid()\n try:\n pagesize = resource.getpagesize()\n except NameError:\n return -1024\n status_file = \"/proc/%s/statm\" % (pid)\n if not os.path.isfile(status_file):\n return -1024\n line = open(status_file).read()\n size, resident, share, text, library, data, dt = [int(i) for i in line.split()]\n if subtract_share: resident -= share\n return resident * pagesize / (1024 * 1024) # return in megs\n\ndef time_execution(func):\n r\"\"\"\n Decorator for seeing how long a given function takes, depending on whether\n or not the global 'yt.timefunctions' config parameter is set.\n \"\"\"\n @wraps(func)\n def wrapper(*arg, **kw):\n t1 = time.time()\n res = func(*arg, **kw)\n t2 = time.time()\n mylog.debug('%s took %0.3f s', func.__name__, (t2-t1))\n return res\n from yt.config import ytcfg\n if ytcfg.getboolean(\"yt\",\"timefunctions\") is True:\n return wrapper\n else:\n return func\n\ndef print_tb(func):\n \"\"\"\n This function is used as a decorate on a function to have the calling stack\n printed whenever that function is entered.\n\n This can be used like so:\n\n .. code-block:: python\n\n @print_tb\n def some_deeply_nested_function(...):\n\n \"\"\"\n @wraps(func)\n def run_func(*args, **kwargs):\n traceback.print_stack()\n return func(*args, **kwargs)\n return run_func\n\ndef rootonly(func):\n \"\"\"\n This is a decorator that, when used, will only call the function on the\n root processor and then broadcast the results of the function to all other\n processors.\n\n This can be used like so:\n\n .. code-block:: python\n\n @rootonly\n def some_root_only_function(...):\n\n \"\"\"\n from yt.config import ytcfg\n @wraps(func)\n def check_parallel_rank(*args, **kwargs):\n if ytcfg.getint(\"yt\",\"__topcomm_parallel_rank\") > 0:\n return\n return func(*args, **kwargs)\n return check_parallel_rank\n\ndef rootloginfo(*args):\n from yt.config import ytcfg\n if ytcfg.getint(\"yt\", \"__topcomm_parallel_rank\") > 0: return\n mylog.info(*args)\n\ndef deprecate(replacement):\n def real_deprecate(func):\n \"\"\"\n This decorator issues a deprecation warning.\n\n This can be used like so:\n\n .. code-block:: python\n\n @deprecate(\"new_function\")\n def some_really_old_function(...):\n\n \"\"\"\n @wraps(func)\n def run_func(*args, **kwargs):\n message = \"%s has been deprecated and may be removed without notice!\"\n if replacement is not None:\n message += \" Use %s instead.\" % replacement\n warnings.warn(message % func.__name__, DeprecationWarning,\n stacklevel=2)\n func(*args, **kwargs)\n return run_func\n return real_deprecate\n\ndef pdb_run(func):\n \"\"\"\n This decorator inserts a pdb session on top of the call-stack into a\n function.\n\n This can be used like so:\n\n .. code-block:: python\n\n @pdb_run\n def some_function_to_debug(...):\n\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kw):\n pdb.runcall(func, *args, **kw)\n return wrapper\n\n__header = \"\"\"\n== Welcome to the embedded IPython Shell ==\n\n You are currently inside the function:\n %(fname)s\n\n Defined in:\n %(filename)s:%(lineno)s\n\"\"\"\n\ndef insert_ipython(num_up=1):\n \"\"\"\n Placed inside a function, this will insert an IPython interpreter at that\n current location. This will enabled detailed inspection of the current\n execution environment, as well as (optional) modification of that environment.\n *num_up* refers to how many frames of the stack get stripped off, and\n defaults to 1 so that this function itself is stripped off.\n \"\"\"\n import IPython\n from IPython.terminal.embed import InteractiveShellEmbed\n try:\n from traitlets.config.loader import Config\n except ImportError:\n from IPython.config.loader import Config\n\n frame = inspect.stack()[num_up]\n loc = frame[0].f_locals.copy()\n glo = frame[0].f_globals\n dd = dict(fname = frame[3], filename = frame[1],\n lineno = frame[2])\n cfg = Config()\n cfg.InteractiveShellEmbed.local_ns = loc\n cfg.InteractiveShellEmbed.global_ns = glo\n IPython.embed(config=cfg, banner2 = __header % dd)\n ipshell = InteractiveShellEmbed(config=cfg)\n\n del ipshell\n\n\n#\n# Our progress bar types and how to get one\n#\n\nclass TqdmProgressBar(object):\n # This is a drop in replacement for pbar\n # called tqdm\n def __init__(self,title, maxval):\n self._pbar = tqdm(leave=True, total=maxval, desc=title)\n self.i = 0\n def update(self, i=None):\n if i is None:\n i = self.i + 1\n n = i - self.i\n self.i = i\n self._pbar.update(n)\n def finish(self):\n self._pbar.close()\n\nclass DummyProgressBar(object):\n # This progressbar gets handed if we don't\n # want ANY output\n def __init__(self, *args, **kwargs):\n return\n def update(self, *args, **kwargs):\n return\n def finish(self, *args, **kwargs):\n return\n\nclass ParallelProgressBar(object):\n # This is just a simple progress bar\n # that prints on start/stop\n def __init__(self, title, maxval):\n self.title = title\n mylog.info(\"Starting '%s'\", title)\n def update(self, *args, **kwargs):\n return\n def finish(self):\n mylog.info(\"Finishing '%s'\", self.title)\n\nclass GUIProgressBar(object):\n def __init__(self, title, maxval):\n import wx\n self.maxval = maxval\n self.last = 0\n self._pbar = wx.ProgressDialog(\"Working...\",\n title, maximum=maxval,\n style=wx.PD_REMAINING_TIME|wx.PD_ELAPSED_TIME|wx.PD_APP_MODAL)\n def update(self, val):\n # An update is only meaningful if it's on the order of 1/100 or greater\n if ceil(100*self.last / self.maxval) + 1 == \\\n floor(100*val / self.maxval) or val == self.maxval:\n self._pbar.Update(val)\n self.last = val\n def finish(self):\n self._pbar.Destroy()\n\ndef get_pbar(title, maxval, parallel=False):\n \"\"\"\n This returns a progressbar of the most appropriate type, given a *title*\n and a *maxval*.\n \"\"\"\n maxval = max(maxval, 1)\n from yt.config import ytcfg\n if ytcfg.getboolean(\"yt\", \"suppressStreamLogging\") or \\\n ytcfg.getboolean(\"yt\", \"__withintesting\") or \\\n maxval == 1: \\\n return DummyProgressBar()\n elif ytcfg.getboolean(\"yt\", \"__parallel\"):\n # If parallel is True, update progress on root only.\n if parallel:\n if is_root():\n return TqdmProgressBar(title, maxval)\n else:\n return DummyProgressBar()\n else:\n return ParallelProgressBar(title, maxval)\n pbar = TqdmProgressBar(title,maxval)\n return pbar\n\ndef only_on_root(func, *args, **kwargs):\n \"\"\"\n This function accepts a *func*, a set of *args* and *kwargs* and then only\n on the root processor calls the function. All other processors get \"None\"\n handed back.\n \"\"\"\n from yt.config import ytcfg\n if kwargs.pop(\"global_rootonly\", False):\n cfg_option = \"__global_parallel_rank\"\n else:\n cfg_option = \"__topcomm_parallel_rank\"\n if not ytcfg.getboolean(\"yt\",\"__parallel\"):\n return func(*args,**kwargs)\n if ytcfg.getint(\"yt\", cfg_option) > 0: return\n return func(*args, **kwargs)\n\ndef is_root():\n \"\"\"\n This function returns True if it is on the root processor of the\n topcomm and False otherwise.\n \"\"\"\n from yt.config import ytcfg\n cfg_option = \"__topcomm_parallel_rank\"\n if not ytcfg.getboolean(\"yt\",\"__parallel\"):\n return True\n if ytcfg.getint(\"yt\", cfg_option) > 0:\n return False\n return True\n\n\n#\n# Our signal and traceback handling functions\n#\n\ndef signal_print_traceback(signo, frame):\n print(traceback.print_stack(frame))\n\ndef signal_problem(signo, frame):\n raise RuntimeError()\n\ndef signal_ipython(signo, frame):\n insert_ipython(2)\n\ndef paste_traceback(exc_type, exc, tb):\n \"\"\"\n This is a traceback handler that knows how to paste to the pastebin.\n Should only be used in sys.excepthook.\n \"\"\"\n sys.__excepthook__(exc_type, exc, tb)\n from yt.extern.six.moves import StringIO, xmlrpc_client\n p = xmlrpc_client.ServerProxy(\n \"http://paste.yt-project.org/xmlrpc/\",\n allow_none=True)\n s = StringIO()\n traceback.print_exception(exc_type, exc, tb, file=s)\n s = s.getvalue()\n ret = p.pastes.newPaste('pytb', s, None, '', '', True)\n print()\n print(\"Traceback pasted to http://paste.yt-project.org/show/%s\" % (ret))\n print()\n\ndef paste_traceback_detailed(exc_type, exc, tb):\n \"\"\"\n This is a traceback handler that knows how to paste to the pastebin.\n Should only be used in sys.excepthook.\n \"\"\"\n import cgitb\n from yt.extern.six.moves import StringIO, xmlrpc_client\n s = StringIO()\n handler = cgitb.Hook(format=\"text\", file = s)\n handler(exc_type, exc, tb)\n s = s.getvalue()\n print(s)\n p = xmlrpc_client.ServerProxy(\n \"http://paste.yt-project.org/xmlrpc/\",\n allow_none=True)\n ret = p.pastes.newPaste('text', s, None, '', '', True)\n print()\n print(\"Traceback pasted to http://paste.yt-project.org/show/%s\" % (ret))\n print()\n\n_ss = \"fURbBUUBE0cLXgETJnZgJRMXVhVGUQpQAUBuehQMUhJWRFFRAV1ERAtBXw1dAxMLXT4zXBFfABNN\\nC0ZEXw1YUURHCxMXVlFERwxWCQw=\\n\"\ndef _rdbeta(key):\n enc_s = base64.decodestring(_ss)\n dec_s = ''.join([ chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)) ])\n print(dec_s)\n\n#\n# Some exceptions\n#\n\nclass NoCUDAException(Exception):\n pass\n\nclass YTEmptyClass(object):\n pass\n\ndef update_hg_or_git(path):\n if os.path.exists(os.sep.join([path, '.hg'])):\n update_hg(path)\n elif os.path.exists(os.sep.join([path, '.git'])):\n update_git(path)\n\ndef update_git(path):\n try:\n import git\n except ImportError:\n print(\"Updating and precise version information requires \")\n print(\"gitpython to be installed.\")\n print(\"Try: pip install gitpython\")\n return -1\n with open(os.path.join(path, \"yt_updater.log\"), \"a\") as f:\n repo = git.Repo(path)\n if repo.is_dirty(untracked_files=True):\n print(\"Changes have been made to the yt source code so I won't \")\n print(\"update the code. You will have to do this yourself.\")\n print(\"Here's a set of sample commands:\")\n print(\"\")\n print(\" $ cd %s\" % (path))\n print(\" $ git stash\")\n print(\" $ git checkout master\")\n print(\" $ git pull\")\n print(\" $ git stash pop\")\n print(\" $ %s setup.py develop\" % (sys.executable))\n print(\"\")\n return 1\n if repo.active_branch.name != 'master':\n print(\"yt repository is not tracking the master branch so I won't \")\n print(\"update the code. You will have to do this yourself.\")\n print(\"Here's a set of sample commands:\")\n print(\"\")\n print(\" $ cd %s\" % (path))\n print(\" $ git checkout master\")\n print(\" $ git pull\")\n print(\" $ %s setup.py develop\" % (sys.executable))\n print(\"\")\n return 1\n print(\"Updating the repository\")\n f.write(\"Updating the repository\\n\\n\")\n old_version = repo.git.rev_parse('HEAD', short=12)\n try:\n remote = repo.remotes.yt_upstream\n except AttributeError:\n remote = repo.create_remote(\n 'yt_upstream', url='https://github.com/yt-project/yt')\n remote.fetch()\n master = repo.heads.master\n master.set_tracking_branch(remote.refs.master)\n master.checkout()\n remote.pull()\n new_version = repo.git.rev_parse('HEAD', short=12)\n f.write('Updated from %s to %s\\n\\n' % (old_version, new_version))\n rebuild_modules(path, f)\n print('Updated successfully')\n\ndef update_hg(path):\n try:\n import hglib\n except ImportError:\n print(\"Updating requires python-hglib to be installed.\")\n print(\"Try: pip install python-hglib\")\n return -1\n f = open(os.path.join(path, \"yt_updater.log\"), \"a\")\n with hglib.open(path) as repo:\n repo.pull(b'https://bitbucket.org/yt_analysis/yt')\n ident = repo.identify().decode(\"utf-8\")\n if \"+\" in ident:\n print(\"Changes have been made to the yt source code so I won't \")\n print(\"update the code. You will have to do this yourself.\")\n print(\"Here's a set of sample commands:\")\n print(\"\")\n print(\" $ cd %s\" % (path))\n print(\" $ hg up -C yt # This will delete any unsaved changes\")\n print(\" $ %s setup.py develop\" % (sys.executable))\n print(\"\")\n return 1\n print(\"Updating the repository\")\n f.write(\"Updating the repository\\n\\n\")\n books = repo.bookmarks()[0]\n books = [b[0].decode('utf8') for b in books]\n if 'master' in books:\n repo.update('master', check=True)\n else:\n repo.update('yt', check=True)\n f.write(\"Updated from %s to %s\\n\\n\" % (ident, repo.identify()))\n rebuild_modules(path, f)\n print(\"Updated successfully.\")\n\ndef rebuild_modules(path, f):\n f.write(\"Rebuilding modules\\n\\n\")\n p = subprocess.Popen([sys.executable, \"setup.py\", \"build_ext\", \"-i\"],\n cwd=path, stdout = subprocess.PIPE,\n stderr = subprocess.STDOUT)\n stdout, stderr = p.communicate()\n f.write(stdout.decode('utf-8'))\n f.write(\"\\n\\n\")\n if p.returncode:\n print(\"BROKEN: See %s\" % (os.path.join(path, \"yt_updater.log\")))\n sys.exit(1)\n f.write(\"Successful!\\n\")\n\n\ndef get_hg_or_git_version(path):\n if os.path.exists(os.sep.join([path, '.hg'])):\n return get_hg_version(path)\n elif os.path.exists(os.sep.join([path, '.git'])):\n return get_git_version(path)\n return None\n\ndef get_git_version(path):\n try:\n import git\n except ImportError:\n print(\"Updating and precise version information requires \")\n print(\"gitpython to be installed.\")\n print(\"Try: pip install gitpython\")\n return None\n try:\n repo = git.Repo(path)\n return repo.git.rev_parse('HEAD', short=12)\n except git.InvalidGitRepositoryError:\n # path is not a git repository\n return None\n\ndef get_hg_version(path):\n try:\n import hglib\n except ImportError:\n print(\"Updating and precise version information requires \")\n print(\"python-hglib to be installed.\")\n print(\"Try: pip install python-hglib\")\n return None\n try:\n with hglib.open(path) as repo:\n return repo.identify().decode('utf-8')\n except hglib.error.ServerError:\n # path is not an hg repository\n return None\n\ndef get_yt_version():\n try:\n from yt.__hg_version__ import hg_version\n return hg_version\n except ImportError:\n pass\n import pkg_resources\n yt_provider = pkg_resources.get_provider(\"yt\")\n path = os.path.dirname(yt_provider.module_path)\n version = get_git_version(path)\n if version is None:\n return version\n else:\n v_str = version[:12].strip()\n if hasattr(v_str, 'decode'):\n v_str = v_str.decode('utf-8')\n return v_str\n\ndef get_version_stack():\n version_info = {}\n version_info['yt'] = get_yt_version()\n version_info['numpy'] = numpy.version.version\n version_info['matplotlib'] = matplotlib.__version__\n return version_info\n\ndef get_script_contents():\n top_frame = inspect.stack()[-1]\n finfo = inspect.getframeinfo(top_frame[0])\n if finfo[2] != \"<module>\": return None\n if not os.path.exists(finfo[0]): return None\n try:\n contents = open(finfo[0]).read()\n except:\n contents = None\n return contents\n\ndef download_file(url, filename):\n requests = get_requests()\n if requests is None:\n return simple_download_file(url, filename)\n else:\n return fancy_download_file(url, filename, requests)\n\ndef fancy_download_file(url, filename, requests=None):\n response = requests.get(url, stream=True)\n total_length = response.headers.get('content-length')\n\n with open(filename, 'wb') as fh:\n if total_length is None:\n fh.write(response.content)\n else:\n blocksize = 4 * 1024 ** 2\n iterations = int(float(total_length)/float(blocksize))\n\n pbar = get_pbar(\n 'Downloading %s to %s ' % os.path.split(filename)[::-1],\n iterations)\n iteration = 0\n for chunk in response.iter_content(chunk_size=blocksize):\n fh.write(chunk)\n iteration += 1\n pbar.update(iteration)\n pbar.finish()\n return filename\n\ndef simple_download_file(url, filename):\n class MyURLopener(urllib.request.FancyURLopener):\n def http_error_default(self, url, fp, errcode, errmsg, headers):\n raise RuntimeError(\"Attempt to download file from %s failed with error %s: %s.\" % \\\n (url, errcode, errmsg))\n fn, h = MyURLopener().retrieve(url, filename)\n return fn\n\n# This code snippet is modified from Georg Brandl\ndef bb_apicall(endpoint, data, use_pass = True):\n uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint\n # since bitbucket doesn't return the required WWW-Authenticate header when\n # making a request without Authorization, we cannot use the standard urllib2\n # auth handlers; we have to add the requisite header from the start\n if data is not None:\n data = urllib.parse.urlencode(data)\n req = urllib.request.Request(uri, data)\n if use_pass:\n username = input(\"Bitbucket Username? \")\n password = getpass.getpass()\n upw = '%s:%s' % (username, password)\n req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())\n return urllib.request.urlopen(req).read()\n\ndef get_yt_supp():\n import hglib\n supp_path = os.path.join(os.environ[\"YT_DEST\"], \"src\",\n \"yt-supplemental\")\n # Now we check that the supplemental repository is checked out.\n if not os.path.isdir(supp_path):\n print()\n print(\"*** The yt-supplemental repository is not checked ***\")\n print(\"*** out. I can do this for you, but because this ***\")\n print(\"*** is a delicate act, I require you to respond ***\")\n print(\"*** to the prompt with the word 'yes'. ***\")\n print()\n response = input(\"Do you want me to try to check it out? \")\n if response != \"yes\":\n print()\n print(\"Okay, I understand. You can check it out yourself.\")\n print(\"This command will do it:\")\n print()\n print(\"$ hg clone http://bitbucket.org/yt_analysis/yt-supplemental/ \", end=' ')\n print(\"%s\" % (supp_path))\n print()\n sys.exit(1)\n rv = hglib.clone(\"http://bitbucket.org/yt_analysis/yt-supplemental/\", \n supp_path)\n if rv:\n print(\"Something has gone wrong. Quitting.\")\n sys.exit(1)\n # Now we think we have our supplemental repository.\n return supp_path\n\ndef fix_length(length, ds):\n registry = ds.unit_registry\n if isinstance(length, YTArray):\n if registry is not None:\n length.units.registry = registry\n return length.in_units(\"code_length\")\n if isinstance(length, numeric_type):\n return YTArray(length, 'code_length', registry=registry)\n length_valid_tuple = isinstance(length, (list, tuple)) and len(length) == 2\n unit_is_string = isinstance(length[1], string_types)\n length_is_number = (isinstance(length[0], numeric_type) and not\n isinstance(length[0], YTArray))\n if length_valid_tuple and unit_is_string and length_is_number:\n return YTArray(*length, registry=registry)\n else:\n raise RuntimeError(\"Length %s is invalid\" % str(length))\n\[email protected]\ndef parallel_profile(prefix):\n r\"\"\"A context manager for profiling parallel code execution using cProfile\n\n This is a simple context manager that automatically profiles the execution\n of a snippet of code.\n\n Parameters\n ----------\n prefix : string\n A string name to prefix outputs with.\n\n Examples\n --------\n\n >>> with parallel_profile('my_profile'):\n ... yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')\n \"\"\"\n import cProfile\n from yt.config import ytcfg\n fn = \"%s_%04i_%04i.cprof\" % (prefix,\n ytcfg.getint(\"yt\", \"__topcomm_parallel_size\"),\n ytcfg.getint(\"yt\", \"__topcomm_parallel_rank\"))\n p = cProfile.Profile()\n p.enable()\n yield fn\n p.disable()\n p.dump_stats(fn)\n\ndef get_num_threads():\n from .config import ytcfg\n nt = ytcfg.getint(\"yt\",\"numthreads\")\n if nt < 0:\n return os.environ.get(\"OMP_NUM_THREADS\", 0)\n return nt\n\ndef fix_axis(axis, ds):\n return ds.coordinates.axis_id.get(axis, axis)\n\ndef get_image_suffix(name):\n suffix = os.path.splitext(name)[1]\n return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''\n\ndef get_output_filename(name, keyword, suffix):\n r\"\"\"Return an appropriate filename for output.\n\n With a name provided by the user, this will decide how to \n appropriately name the output file by the following rules:\n\n 1. if name is None, the filename will be the keyword plus \n the suffix.\n 2. if name ends with \"/\", assume name is a directory and \n the file will be named name/(keyword+suffix). If the\n directory does not exist, first try to create it and\n raise an exception if an error occurs.\n 3. if name does not end in the suffix, add the suffix.\n \n Parameters\n ----------\n name : str\n A filename given by the user.\n keyword : str\n A default filename prefix if name is None.\n suffix : str\n Suffix that must appear at end of the filename.\n This will be added if not present.\n\n Examples\n --------\n\n >>> print get_output_filename(None, \"Projection_x\", \".png\")\n Projection_x.png\n >>> print get_output_filename(\"my_file\", \"Projection_x\", \".png\")\n my_file.png\n >>> print get_output_filename(\"my_file/\", \"Projection_x\", \".png\")\n my_file/Projection_x.png\n \n \"\"\"\n if name is None:\n name = keyword\n name = os.path.expanduser(name)\n if name[-1] == os.sep and not os.path.isdir(name):\n ensure_dir(name)\n if os.path.isdir(name):\n name = os.path.join(name, keyword)\n if not name.endswith(suffix):\n name += suffix\n return name\n\ndef ensure_dir_exists(path):\n r\"\"\"Create all directories in path recursively in a parallel safe manner\"\"\"\n my_dir = os.path.dirname(path)\n # If path is a file in the current directory, like \"test.txt\", then my_dir\n # would be an empty string, resulting in FileNotFoundError when passed to\n # ensure_dir. Let's avoid that.\n if my_dir:\n ensure_dir(my_dir)\n\ndef ensure_dir(path):\n r\"\"\"Parallel safe directory maker.\"\"\"\n if os.path.exists(path):\n return path\n\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n return path\n\ndef validate_width_tuple(width):\n if not iterable(width) or len(width) != 2:\n raise YTInvalidWidthError(\n \"width (%s) is not a two element tuple\" % width)\n is_numeric = isinstance(width[0], numeric_type)\n length_has_units = isinstance(width[0], YTArray)\n unit_is_string = isinstance(width[1], string_types)\n if not is_numeric or length_has_units and unit_is_string:\n msg = \"width (%s) is invalid. \" % str(width)\n msg += \"Valid widths look like this: (12, 'au')\"\n raise YTInvalidWidthError(msg)\n\n_first_cap_re = re.compile('(.)([A-Z][a-z]+)')\n_all_cap_re = re.compile('([a-z0-9])([A-Z])')\n\n@lru_cache(maxsize=128, typed=False)\ndef camelcase_to_underscore(name):\n s1 = _first_cap_re.sub(r'\\1_\\2', name)\n return _all_cap_re.sub(r'\\1_\\2', s1).lower()\n\ndef set_intersection(some_list):\n if len(some_list) == 0: return set([])\n # This accepts a list of iterables, which we get the intersection of.\n s = set(some_list[0])\n for l in some_list[1:]:\n s.intersection_update(l)\n return s\n\[email protected]\ndef memory_checker(interval = 15, dest = None):\n r\"\"\"This is a context manager that monitors memory usage.\n\n Parameters\n ----------\n interval : int\n The number of seconds between printing the current memory usage in\n gigabytes of the current Python interpreter.\n\n Examples\n --------\n\n >>> with memory_checker(10):\n ... arr = np.zeros(1024*1024*1024, dtype=\"float64\")\n ... time.sleep(15)\n ... del arr\n \"\"\"\n import threading\n if dest is None:\n dest = sys.stdout\n class MemoryChecker(threading.Thread):\n def __init__(self, event, interval):\n self.event = event\n self.interval = interval\n threading.Thread.__init__(self)\n\n def run(self):\n while not self.event.wait(self.interval):\n print(\"MEMORY: %0.3e gb\" % (get_memory_usage()/1024.), file=dest)\n\n e = threading.Event()\n mem_check = MemoryChecker(e, interval)\n mem_check.start()\n try:\n yield\n finally:\n e.set()\n\n\ndef deprecated_class(cls):\n @wraps(cls)\n def _func(*args, **kwargs):\n # Note we use SyntaxWarning because by default, DeprecationWarning is\n # not shown.\n warnings.warn(\n \"This usage is deprecated. Please use %s instead.\" % cls.__name__,\n SyntaxWarning, stacklevel=2)\n return cls(*args, **kwargs)\n return _func\n\ndef enable_plugins():\n \"\"\"Forces the plugins file to be parsed.\n\n This plugin file is a means of creating custom fields, quantities,\n data objects, colormaps, and other code classes and objects to be used\n in yt scripts without modifying the yt source directly.\n\n The file must be located at ``$HOME/.config/yt/my_plugins.py``.\n\n Warning: when you use this function, your script will only be reproducible\n if you also provide the ``my_plugins.py`` file.\n \"\"\"\n import yt\n from yt.fields.my_plugin_fields import my_plugins_fields\n from yt.config import ytcfg, CONFIG_DIR\n my_plugin_name = ytcfg.get(\"yt\", \"pluginfilename\")\n\n # In the following order if pluginfilename is: an absolute path, located in\n # the CONFIG_DIR, located in an obsolete config dir.\n _fn = None\n old_config_dir = os.path.join(os.path.expanduser('~'), '.yt')\n for base_prefix in ('', CONFIG_DIR, old_config_dir):\n if os.path.isfile(os.path.join(base_prefix, my_plugin_name)):\n _fn = os.path.join(base_prefix, my_plugin_name)\n break\n\n if _fn is not None and os.path.isfile(_fn):\n if _fn.startswith(old_config_dir):\n mylog.warn(\n 'Your plugin file is located in a deprecated directory. '\n 'Please move it from %s to %s',\n os.path.join(old_config_dir, my_plugin_name),\n os.path.join(CONFIG_DIR, my_plugin_name))\n mylog.info(\"Loading plugins from %s\", _fn)\n ytdict = yt.__dict__\n execdict = ytdict.copy()\n execdict['add_field'] = my_plugins_fields.add_field\n with open(_fn) as f:\n code = compile(f.read(), _fn, 'exec')\n exec(code, execdict, execdict)\n ytnamespace = list(ytdict.keys())\n for k in execdict.keys():\n if k not in ytnamespace:\n if callable(execdict[k]):\n setattr(yt, k, execdict[k])\n\ndef fix_unitary(u):\n if u == '1':\n return 'unitary'\n else:\n return u\n\ndef get_hash(infile, algorithm='md5', BLOCKSIZE=65536):\n \"\"\"Generate file hash without reading in the entire file at once.\n\n Original code licensed under MIT. Source:\n http://pythoncentral.io/hashing-files-with-python/\n\n Parameters\n ----------\n infile : str\n File of interest (including the path).\n algorithm : str (optional)\n Hash algorithm of choice. Defaults to 'md5'.\n BLOCKSIZE : int (optional)\n How much data in bytes to read in at once.\n\n Returns\n -------\n hash : str\n The hash of the file.\n\n Examples\n --------\n >>> import yt.funcs as funcs\n >>> funcs.get_hash('/path/to/test.png')\n 'd38da04859093d430fa4084fd605de60'\n\n \"\"\"\n import hashlib\n\n try:\n hasher = getattr(hashlib, algorithm)()\n except:\n raise NotImplementedError(\"'%s' not available! Available algorithms: %s\" %\n (algorithm, hashlib.algorithms))\n\n filesize = os.path.getsize(infile)\n iterations = int(float(filesize)/float(BLOCKSIZE))\n\n pbar = get_pbar('Generating %s hash' % algorithm, iterations)\n\n iter = 0\n with open(infile,'rb') as f:\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(BLOCKSIZE)\n iter += 1\n pbar.update(iter)\n pbar.finish()\n\n return hasher.hexdigest()\n\ndef get_brewer_cmap(cmap):\n \"\"\"Returns a colorbrewer colormap from palettable\"\"\"\n try:\n import brewer2mpl\n except ImportError:\n brewer2mpl = None\n try:\n import palettable\n except ImportError:\n palettable = None\n if palettable is not None:\n bmap = palettable.colorbrewer.get_map(*cmap)\n elif brewer2mpl is not None:\n warnings.warn(\"Using brewer2mpl colormaps is deprecated. \"\n \"Please install the successor to brewer2mpl, \"\n \"palettable, with `pip install palettable`. \"\n \"Colormap tuple names remain unchanged.\")\n bmap = brewer2mpl.get_map(*cmap)\n else:\n raise RuntimeError(\n \"Please install palettable to use colorbrewer colormaps\")\n return bmap.get_mpl_colormap(N=cmap[2])\n\ndef get_requests():\n try:\n import requests\n except ImportError:\n requests = None\n return requests\n\[email protected]\ndef dummy_context_manager(*args, **kwargs):\n yield\n\ndef matplotlib_style_context(style_name=None, after_reset=False):\n \"\"\"Returns a context manager for controlling matplotlib style.\n\n Arguments are passed to matplotlib.style.context() if specified. Defaults\n to setting \"classic\" style, after resetting to the default config parameters.\n\n On older matplotlib versions (<=1.5.0) where matplotlib.style isn't\n available, returns a dummy context manager.\n \"\"\"\n if style_name is None:\n style_name = {\n 'mathtext.fontset': 'cm',\n 'mathtext.fallback_to_cm': True,\n }\n try:\n import matplotlib.style\n return matplotlib.style.context(style_name, after_reset=after_reset)\n except ImportError:\n pass\n return dummy_context_manager()\n\ninteractivity = False\n\n\"\"\"Sets the condition that interactive backends can be used.\"\"\"\ndef toggle_interactivity():\n global interactivity\n interactivity = not interactivity\n if interactivity is True:\n if '__IPYTHON__' in dir(builtins):\n import IPython\n shell = IPython.get_ipython()\n shell.magic('matplotlib')\n else:\n import matplotlib\n matplotlib.interactive(True)\n\ndef get_interactivity():\n return interactivity\n\ndef setdefaultattr(obj, name, value):\n \"\"\"Set attribute with *name* on *obj* with *value* if it doesn't exist yet\n\n Analogous to dict.setdefault\n \"\"\"\n if not hasattr(obj, name):\n setattr(obj, name, value)\n return getattr(obj, name)\n\ndef parse_h5_attr(f, attr):\n \"\"\"A Python3-safe function for getting hdf5 attributes.\n\n If an attribute is supposed to be a string, this will return it as such.\n \"\"\"\n val = f.attrs.get(attr, None)\n if isinstance(val, bytes):\n return val.decode('utf8')\n else:\n return val\n\ndef issue_deprecation_warning(msg, stacklevel=3):\n from numpy import VisibleDeprecationWarning\n warnings.warn(msg, VisibleDeprecationWarning, stacklevel=stacklevel)\n\ndef obj_length(v):\n if iterable(v):\n return len(v)\n else:\n # If something isn't iterable, we return 0 \n # to signify zero length (aka a scalar).\n return 0\n\ndef handle_mks_cgs(values, field_units):\n try:\n values = values.to(field_units)\n except YTEquivalentDimsError as e:\n values = values.to_equivalent(e.new_units, e.base)\n return values\n\ndef validate_3d_array(obj):\n if not iterable(obj) or len(obj) != 3:\n raise TypeError(\"Expected an array of size (3,), received '%s' of \"\n \"length %s\" % (str(type(obj)).split(\"'\")[1], len(obj)))\n\ndef validate_float(obj):\n \"\"\"Validates if the passed argument is a float value.\n\n Raises an exception if `obj` is a single float value\n or a YTQunatity of size 1.\n\n Parameters\n ----------\n obj : Any\n Any argument which needs to be checked for a single float value.\n\n Raises\n ------\n TypeError\n Raised if `obj` is not a single float value or YTQunatity\n\n Examples\n --------\n >>> validate_float(1)\n >>> validate_float(1.50)\n >>> validate_float(YTQuantity(1,\"cm\"))\n >>> validate_float((1,\"cm\"))\n >>> validate_float([1, 1, 1])\n Traceback (most recent call last):\n ...\n TypeError: Expected a numeric value (or size-1 array), received 'list' of length 3\n\n >>> validate_float([YTQuantity(1, \"cm\"), YTQuantity(2,\"cm\")])\n Traceback (most recent call last):\n ...\n TypeError: Expected a numeric value (or size-1 array), received 'list' of length 2\n \"\"\"\n if isinstance(obj, tuple):\n if len(obj) != 2 or not isinstance(obj[0], numeric_type)\\\n or not isinstance(obj[1], string_types):\n raise TypeError(\"Expected a numeric value (or tuple of format \"\n \"(float, String)), received an inconsistent tuple \"\n \"'%s'.\" % str(obj))\n else:\n return\n if iterable(obj) and (len(obj) != 1 or not isinstance(obj[0], numeric_type)):\n raise TypeError(\"Expected a numeric value (or size-1 array), \"\n \"received '%s' of length %s\"\n % (str(type(obj)).split(\"'\")[1], len(obj)))\n\n\ndef validate_iterable(obj):\n if obj is not None and not iterable(obj):\n raise TypeError(\"Expected an iterable object,\"\n \" received '%s'\" % str(type(obj)).split(\"'\")[1])\n\ndef validate_object(obj, data_type):\n if obj is not None and not isinstance(obj, data_type):\n raise TypeError(\"Expected an object of '%s' type, received '%s'\"\n % (str(data_type).split(\"'\")[1],\n str(type(obj)).split(\"'\")[1]))\n\ndef validate_axis(ds, axis):\n if ds is not None:\n valid_axis = ds.coordinates.axis_name.keys()\n else:\n valid_axis = [0, 1, 2, 'x', 'y', 'z', 'X', 'Y', 'Z']\n if axis not in valid_axis:\n raise TypeError(\"Expected axis of int or char type (can be %s), \"\n \"received '%s'.\" % (list(valid_axis), axis))\n\ndef validate_center(center):\n if isinstance(center, string_types):\n c = center.lower()\n if c not in [\"c\", \"center\", \"m\", \"max\", \"min\"] \\\n and not c.startswith(\"max_\") and not c.startswith(\"min_\"):\n raise TypeError(\"Expected 'center' to be in ['c', 'center', \"\n \"'m', 'max', 'min'] or the prefix to be \"\n \"'max_'/'min_', received '%s'.\" % center)\n elif not isinstance(center, (numeric_type, YTQuantity)) \\\n and not iterable(center):\n raise TypeError(\"Expected 'center' to be a numeric object of type \"\n \"list/tuple/np.ndarray/YTArray/YTQuantity, \"\n \"received '%s'.\" % str(type(center)).split(\"'\")[1])\n", "path": "yt/funcs.py" } ]
diff --git a/yt/funcs.py b/yt/funcs.py index eefe83cd1b2..b8b1d6e7f68 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -649,7 +649,10 @@ def get_yt_version(): if version is None: return version else: - return version[:12].strip().decode('utf-8') + v_str = version[:12].strip() + if hasattr(v_str, 'decode'): + v_str = v_str.decode('utf-8') + return v_str def get_version_stack(): version_info = {}
ansible-collections__community.general-7875
community.general.incus connection not working as inventory_hostname treated as litteral ### Summary In my environment I am connecting to an incus server via a remote client on OSX. Ansible, running on the OSX machine is utilizing roles, and gets the inventory_hostname from the filename under the host_vars directory. I suspect this environment is causing inventory_hostname to be treated as a litteral. A very similar bug was fixed community.general.lxd and be found here: https://github.com/ansible-collections/community.general/pull/4912 I have already implemented the solution and will submit a pull request. ### Issue Type Bug Report ### Component Name incus.py connection plugin ### Ansible Version ```console (paste below) ansible [core 2.16.2] config file = /Users/travis/workspace/IZUMANETWORKS/siteinfra/Ansible/work/ansible.cfg configured module search path = ['/Users/travis/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /opt/homebrew/lib/python3.11/site-packages/ansible ansible collection location = /Users/travis/.ansible/collections:/usr/share/ansible/collections executable location = /opt/homebrew/bin/ansible python version = 3.11.7 (main, Dec 4 2023, 18:10:11) [Clang 15.0.0 (clang-1500.1.0.2.5)] (/opt/homebrew/opt/[email protected]/bin/python3.11) jinja version = 3.1.2 libyaml = True ``` ### Community.general Version ```console (paste below) $ ansible-galaxy collection list community.general # /Users/travis/.ansible/collections/ansible_collections Collection Version ----------------- ------- community.general 8.2.0 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed CONFIG_FILE() = /Users/travis/workspace/IZUMANETWORKS/siteinfra/Ansible/work/ansible.cfg DEFAULT_HASH_BEHAVIOUR(/Users/travis/workspace/IZUMANETWORKS/siteinfra/Ansible/work/ansible.cfg) = merge DEFAULT_HOST_LIST(/Users/travis/workspace//IZUMANETWORKS/siteinfra/Ansible/work/ansible.cfg) = ['/Users/travis/workspace/IZUMANETWORKS/siteinfra/Ansible/work/ansible.cfg) = ['/Users/travis/workspace/IZUMANETWORKS/siteinfra/Ansible/work/inventory.ini'] EDITOR(env: EDITOR) = emacs HOST_KEY_CHECKING(/Users/travis/workspace/IZUMANETWORKS/siteinfra/Ansible/work/ansible.cfg) = False ``` ### OS / Environment client: OSX server: Ubuntu 22.04 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) # host_var file named IzumaMercury.yaml ansible_connection: community.general.incus ansible_user: root ansible_become: no ansible_incus_remote: IzumaExplorer ``` ### Expected Results ansible-playbook -i inventories/tests/moffett.yaml setup_izuma_networks_vm_controllers_workers.yml PLAY [vm_controllers] **************************************************************************************************** TASK [Gathering Facts] *************************************************************************************************** ok: [IzumaMercury] ### Actual Results ```console (paste below) ansible-playbook -i inventories/tests/moffett.yaml setup_izuma_networks_vm_controllers_workers.yml PLAY [vm_controllers] **************************************************************************************************** TASK [Gathering Facts] *************************************************************************************************** [WARNING]: The "community.general.incus" connection plugin has an improperly configured remote target value, forcing "inventory_hostname" templated value instead of the string fatal: [IzumaMercury]: UNREACHABLE! => {"changed": false, "msg": "instance not found: inventory_hostname", "unreachable": true} ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct
[ { "content": "# -*- coding: utf-8 -*-\n# Based on lxd.py (c) 2016, Matt Clay <[email protected]>\n# (c) 2023, Stephane Graber <[email protected]>\n# Copyright (c) 2023 Ansible Project\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n author: Stéphane Graber (@stgraber)\n name: incus\n short_description: Run tasks in Incus instances via the Incus CLI.\n description:\n - Run commands or put/fetch files to an existing Incus instance using Incus CLI.\n version_added: \"8.2.0\"\n options:\n remote_addr:\n description:\n - The instance identifier.\n default: inventory_hostname\n vars:\n - name: ansible_host\n - name: ansible_incus_host\n executable:\n description:\n - The shell to use for execution inside the instance.\n default: /bin/sh\n vars:\n - name: ansible_executable\n - name: ansible_incus_executable\n remote:\n description:\n - The name of the Incus remote to use (per C(incus remote list)).\n - Remotes are used to access multiple servers from a single client.\n default: local\n vars:\n - name: ansible_incus_remote\n project:\n description:\n - The name of the Incus project to use (per C(incus project list)).\n - Projects are used to divide the instances running on a server.\n default: default\n vars:\n - name: ansible_incus_project\n\"\"\"\n\nimport os\nfrom subprocess import call, Popen, PIPE\n\nfrom ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound\nfrom ansible.module_utils.common.process import get_bin_path\nfrom ansible.module_utils._text import to_bytes, to_text\nfrom ansible.plugins.connection import ConnectionBase\n\n\nclass Connection(ConnectionBase):\n \"\"\" Incus based connections \"\"\"\n\n transport = \"incus\"\n has_pipelining = True\n default_user = 'root'\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n self._incus_cmd = get_bin_path(\"incus\")\n\n if not self._incus_cmd:\n raise AnsibleError(\"incus command not found in PATH\")\n\n def _connect(self):\n \"\"\"connect to Incus (nothing to do here) \"\"\"\n super(Connection, self)._connect()\n\n if not self._connected:\n self._display.vvv(u\"ESTABLISH Incus CONNECTION FOR USER: root\",\n host=self._instance())\n self._connected = True\n\n def _instance(self):\n # Return only the leading part of the FQDN as the instance name\n # as Incus instance names cannot be a FQDN.\n return self.get_option('remote_addr').split(\".\")[0]\n\n def exec_command(self, cmd, in_data=None, sudoable=True):\n \"\"\" execute a command on the Incus host \"\"\"\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n self._display.vvv(u\"EXEC {0}\".format(cmd),\n host=self._instance())\n\n local_cmd = [\n self._incus_cmd,\n \"--project\", self.get_option(\"project\"),\n \"exec\",\n \"%s:%s\" % (self.get_option(\"remote\"), self._instance()),\n \"--\",\n self._play_context.executable, \"-c\", cmd]\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate(in_data)\n\n stdout = to_text(stdout)\n stderr = to_text(stderr)\n\n if stderr == \"Error: Instance is not running.\\n\":\n raise AnsibleConnectionFailure(\"instance not running: %s\" %\n self._instance())\n\n if stderr == \"Error: Instance not found\\n\":\n raise AnsibleConnectionFailure(\"instance not found: %s\" %\n self._instance())\n\n return process.returncode, stdout, stderr\n\n def put_file(self, in_path, out_path):\n \"\"\" put a file from local to Incus \"\"\"\n super(Connection, self).put_file(in_path, out_path)\n\n self._display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path),\n host=self._instance())\n\n if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"input path is not a file: %s\" % in_path)\n\n local_cmd = [\n self._incus_cmd,\n \"--project\", self.get_option(\"project\"),\n \"file\", \"push\", \"--quiet\",\n in_path,\n \"%s:%s/%s\" % (self.get_option(\"remote\"),\n self._instance(),\n out_path)]\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n call(local_cmd)\n\n def fetch_file(self, in_path, out_path):\n \"\"\" fetch a file from Incus to local \"\"\"\n super(Connection, self).fetch_file(in_path, out_path)\n\n self._display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path),\n host=self._instance())\n\n local_cmd = [\n self._incus_cmd,\n \"--project\", self.get_option(\"project\"),\n \"file\", \"pull\", \"--quiet\",\n \"%s:%s/%s\" % (self.get_option(\"remote\"),\n self._instance(),\n in_path),\n out_path]\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n call(local_cmd)\n\n def close(self):\n \"\"\" close the connection (nothing to do here) \"\"\"\n super(Connection, self).close()\n\n self._connected = False\n", "path": "plugins/connection/incus.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Based on lxd.py (c) 2016, Matt Clay <[email protected]>\n# (c) 2023, Stephane Graber <[email protected]>\n# Copyright (c) 2023 Ansible Project\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n author: Stéphane Graber (@stgraber)\n name: incus\n short_description: Run tasks in Incus instances via the Incus CLI.\n description:\n - Run commands or put/fetch files to an existing Incus instance using Incus CLI.\n version_added: \"8.2.0\"\n options:\n remote_addr:\n description:\n - The instance identifier.\n default: inventory_hostname\n vars:\n - name: inventory_hostname\n - name: ansible_host\n - name: ansible_incus_host\n executable:\n description:\n - The shell to use for execution inside the instance.\n default: /bin/sh\n vars:\n - name: ansible_executable\n - name: ansible_incus_executable\n remote:\n description:\n - The name of the Incus remote to use (per C(incus remote list)).\n - Remotes are used to access multiple servers from a single client.\n default: local\n vars:\n - name: ansible_incus_remote\n project:\n description:\n - The name of the Incus project to use (per C(incus project list)).\n - Projects are used to divide the instances running on a server.\n default: default\n vars:\n - name: ansible_incus_project\n\"\"\"\n\nimport os\nfrom subprocess import call, Popen, PIPE\n\nfrom ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound\nfrom ansible.module_utils.common.process import get_bin_path\nfrom ansible.module_utils._text import to_bytes, to_text\nfrom ansible.plugins.connection import ConnectionBase\n\n\nclass Connection(ConnectionBase):\n \"\"\" Incus based connections \"\"\"\n\n transport = \"incus\"\n has_pipelining = True\n default_user = 'root'\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n self._incus_cmd = get_bin_path(\"incus\")\n\n if not self._incus_cmd:\n raise AnsibleError(\"incus command not found in PATH\")\n\n def _connect(self):\n \"\"\"connect to Incus (nothing to do here) \"\"\"\n super(Connection, self)._connect()\n\n if not self._connected:\n self._display.vvv(u\"ESTABLISH Incus CONNECTION FOR USER: root\",\n host=self._instance())\n self._connected = True\n\n def _instance(self):\n # Return only the leading part of the FQDN as the instance name\n # as Incus instance names cannot be a FQDN.\n return self.get_option('remote_addr').split(\".\")[0]\n\n def exec_command(self, cmd, in_data=None, sudoable=True):\n \"\"\" execute a command on the Incus host \"\"\"\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n self._display.vvv(u\"EXEC {0}\".format(cmd),\n host=self._instance())\n\n local_cmd = [\n self._incus_cmd,\n \"--project\", self.get_option(\"project\"),\n \"exec\",\n \"%s:%s\" % (self.get_option(\"remote\"), self._instance()),\n \"--\",\n self._play_context.executable, \"-c\", cmd]\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate(in_data)\n\n stdout = to_text(stdout)\n stderr = to_text(stderr)\n\n if stderr == \"Error: Instance is not running.\\n\":\n raise AnsibleConnectionFailure(\"instance not running: %s\" %\n self._instance())\n\n if stderr == \"Error: Instance not found\\n\":\n raise AnsibleConnectionFailure(\"instance not found: %s\" %\n self._instance())\n\n return process.returncode, stdout, stderr\n\n def put_file(self, in_path, out_path):\n \"\"\" put a file from local to Incus \"\"\"\n super(Connection, self).put_file(in_path, out_path)\n\n self._display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path),\n host=self._instance())\n\n if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"input path is not a file: %s\" % in_path)\n\n local_cmd = [\n self._incus_cmd,\n \"--project\", self.get_option(\"project\"),\n \"file\", \"push\", \"--quiet\",\n in_path,\n \"%s:%s/%s\" % (self.get_option(\"remote\"),\n self._instance(),\n out_path)]\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n call(local_cmd)\n\n def fetch_file(self, in_path, out_path):\n \"\"\" fetch a file from Incus to local \"\"\"\n super(Connection, self).fetch_file(in_path, out_path)\n\n self._display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path),\n host=self._instance())\n\n local_cmd = [\n self._incus_cmd,\n \"--project\", self.get_option(\"project\"),\n \"file\", \"pull\", \"--quiet\",\n \"%s:%s/%s\" % (self.get_option(\"remote\"),\n self._instance(),\n in_path),\n out_path]\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n call(local_cmd)\n\n def close(self):\n \"\"\" close the connection (nothing to do here) \"\"\"\n super(Connection, self).close()\n\n self._connected = False\n", "path": "plugins/connection/incus.py" } ]
diff --git a/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml b/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml new file mode 100644 index 00000000000..83d302e9b9a --- /dev/null +++ b/changelogs/fragments/7874-incus_connection_treats_inventory_hostname_as_literal_in_remotes.yml @@ -0,0 +1,2 @@ +bugfixes: + - "incus connection plugin - treats ``inventory_hostname`` as a variable instead of a literal in remote connections (https://github.com/ansible-collections/community.general/issues/7874)." diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py index f346d06170f..81d6f971c70 100644 --- a/plugins/connection/incus.py +++ b/plugins/connection/incus.py @@ -21,6 +21,7 @@ - The instance identifier. default: inventory_hostname vars: + - name: inventory_hostname - name: ansible_host - name: ansible_incus_host executable:
pennersr__django-allauth-2987
Update Facebook complete login Users may want to create custom provider which extends FacebookProvider and set custom provider_id. For example ``` from allauth.socialaccount.providers.facebook.provider import FacebookProvider class FacebookGroupProvider(FacebookProvider): id = "facebook_group" name = "Facebook Group" provider_classes = [FacebookGroupProvider] ``` The problem is in this case social account will be created with FacebookProvider.id instead of custom provider_id. https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/facebook/views.py#L66 https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/facebook/views.py#L38 https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/facebook/views.py#L49 https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/base.py#L63 https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/base.py#L86 I think we don't need to update [login_by_token](https://github.com/pennersr/django-allauth/blob/master/allauth/socialaccount/providers/facebook/views.py#L73) function for custom provider. Because we use this function for Facebook login purpose.
[ { "content": "import hashlib\nimport hmac\nimport logging\nimport requests\nfrom datetime import timedelta\n\nfrom django.utils import timezone\n\nfrom allauth.socialaccount import app_settings, providers\nfrom allauth.socialaccount.helpers import (\n complete_social_login,\n render_authentication_error,\n)\nfrom allauth.socialaccount.models import SocialLogin, SocialToken\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2Adapter,\n OAuth2CallbackView,\n OAuth2LoginView,\n)\n\nfrom .forms import FacebookConnectForm\nfrom .provider import GRAPH_API_URL, GRAPH_API_VERSION, FacebookProvider\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef compute_appsecret_proof(app, token):\n # Generate an appsecret_proof parameter to secure the Graph API call\n # see https://developers.facebook.com/docs/graph-api/securing-requests\n msg = token.token.encode(\"utf-8\")\n key = app.secret.encode(\"utf-8\")\n appsecret_proof = hmac.new(key, msg, digestmod=hashlib.sha256).hexdigest()\n return appsecret_proof\n\n\ndef fb_complete_login(request, app, token):\n provider = providers.registry.by_id(FacebookProvider.id, request)\n resp = requests.get(\n GRAPH_API_URL + \"/me\",\n params={\n \"fields\": \",\".join(provider.get_fields()),\n \"access_token\": token.token,\n \"appsecret_proof\": compute_appsecret_proof(app, token),\n },\n )\n resp.raise_for_status()\n extra_data = resp.json()\n login = provider.sociallogin_from_response(request, extra_data)\n return login\n\n\nclass FacebookOAuth2Adapter(OAuth2Adapter):\n provider_id = FacebookProvider.id\n provider_default_auth_url = \"https://www.facebook.com/{}/dialog/oauth\".format(\n GRAPH_API_VERSION\n )\n\n settings = app_settings.PROVIDERS.get(provider_id, {})\n scope_delimiter = \",\"\n authorize_url = settings.get(\"AUTHORIZE_URL\", provider_default_auth_url)\n access_token_url = GRAPH_API_URL + \"/oauth/access_token\"\n expires_in_key = \"expires_in\"\n\n def complete_login(self, request, app, access_token, **kwargs):\n return fb_complete_login(request, app, access_token)\n\n\noauth2_login = OAuth2LoginView.adapter_view(FacebookOAuth2Adapter)\noauth2_callback = OAuth2CallbackView.adapter_view(FacebookOAuth2Adapter)\n\n\ndef login_by_token(request):\n ret = None\n auth_exception = None\n if request.method == \"POST\":\n form = FacebookConnectForm(request.POST)\n if form.is_valid():\n try:\n provider = providers.registry.by_id(FacebookProvider.id, request)\n login_options = provider.get_fb_login_options(request)\n app = provider.get_app(request)\n access_token = form.cleaned_data[\"access_token\"]\n expires_at = None\n if login_options.get(\"auth_type\") == \"reauthenticate\":\n info = requests.get(\n GRAPH_API_URL + \"/oauth/access_token_info\",\n params={\n \"client_id\": app.client_id,\n \"access_token\": access_token,\n },\n ).json()\n nonce = provider.get_nonce(request, pop=True)\n ok = nonce and nonce == info.get(\"auth_nonce\")\n else:\n ok = True\n if ok and provider.get_settings().get(\"EXCHANGE_TOKEN\"):\n resp = requests.get(\n GRAPH_API_URL + \"/oauth/access_token\",\n params={\n \"grant_type\": \"fb_exchange_token\",\n \"client_id\": app.client_id,\n \"client_secret\": app.secret,\n \"fb_exchange_token\": access_token,\n },\n ).json()\n access_token = resp[\"access_token\"]\n expires_in = resp.get(\"expires_in\")\n if expires_in:\n expires_at = timezone.now() + timedelta(seconds=int(expires_in))\n if ok:\n token = SocialToken(\n app=app, token=access_token, expires_at=expires_at\n )\n login = fb_complete_login(request, app, token)\n login.token = token\n login.state = SocialLogin.state_from_request(request)\n ret = complete_social_login(request, login)\n except requests.RequestException as e:\n logger.exception(\"Error accessing FB user profile\")\n auth_exception = e\n if not ret:\n ret = render_authentication_error(\n request, FacebookProvider.id, exception=auth_exception\n )\n return ret\n", "path": "allauth/socialaccount/providers/facebook/views.py" } ]
[ { "content": "import hashlib\nimport hmac\nimport logging\nimport requests\nfrom datetime import timedelta\n\nfrom django.utils import timezone\n\nfrom allauth.socialaccount import app_settings, providers\nfrom allauth.socialaccount.helpers import (\n complete_social_login,\n render_authentication_error,\n)\nfrom allauth.socialaccount.models import SocialLogin, SocialToken\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2Adapter,\n OAuth2CallbackView,\n OAuth2LoginView,\n)\n\nfrom .forms import FacebookConnectForm\nfrom .provider import GRAPH_API_URL, GRAPH_API_VERSION, FacebookProvider\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef compute_appsecret_proof(app, token):\n # Generate an appsecret_proof parameter to secure the Graph API call\n # see https://developers.facebook.com/docs/graph-api/securing-requests\n msg = token.token.encode(\"utf-8\")\n key = app.secret.encode(\"utf-8\")\n appsecret_proof = hmac.new(key, msg, digestmod=hashlib.sha256).hexdigest()\n return appsecret_proof\n\n\ndef fb_complete_login(request, app, token):\n provider = providers.registry.by_id(app.provider, request)\n resp = requests.get(\n GRAPH_API_URL + \"/me\",\n params={\n \"fields\": \",\".join(provider.get_fields()),\n \"access_token\": token.token,\n \"appsecret_proof\": compute_appsecret_proof(app, token),\n },\n )\n resp.raise_for_status()\n extra_data = resp.json()\n login = provider.sociallogin_from_response(request, extra_data)\n return login\n\n\nclass FacebookOAuth2Adapter(OAuth2Adapter):\n provider_id = FacebookProvider.id\n provider_default_auth_url = \"https://www.facebook.com/{}/dialog/oauth\".format(\n GRAPH_API_VERSION\n )\n\n settings = app_settings.PROVIDERS.get(provider_id, {})\n scope_delimiter = \",\"\n authorize_url = settings.get(\"AUTHORIZE_URL\", provider_default_auth_url)\n access_token_url = GRAPH_API_URL + \"/oauth/access_token\"\n expires_in_key = \"expires_in\"\n\n def complete_login(self, request, app, access_token, **kwargs):\n return fb_complete_login(request, app, access_token)\n\n\noauth2_login = OAuth2LoginView.adapter_view(FacebookOAuth2Adapter)\noauth2_callback = OAuth2CallbackView.adapter_view(FacebookOAuth2Adapter)\n\n\ndef login_by_token(request):\n ret = None\n auth_exception = None\n if request.method == \"POST\":\n form = FacebookConnectForm(request.POST)\n if form.is_valid():\n try:\n provider = providers.registry.by_id(FacebookProvider.id, request)\n login_options = provider.get_fb_login_options(request)\n app = provider.get_app(request)\n access_token = form.cleaned_data[\"access_token\"]\n expires_at = None\n if login_options.get(\"auth_type\") == \"reauthenticate\":\n info = requests.get(\n GRAPH_API_URL + \"/oauth/access_token_info\",\n params={\n \"client_id\": app.client_id,\n \"access_token\": access_token,\n },\n ).json()\n nonce = provider.get_nonce(request, pop=True)\n ok = nonce and nonce == info.get(\"auth_nonce\")\n else:\n ok = True\n if ok and provider.get_settings().get(\"EXCHANGE_TOKEN\"):\n resp = requests.get(\n GRAPH_API_URL + \"/oauth/access_token\",\n params={\n \"grant_type\": \"fb_exchange_token\",\n \"client_id\": app.client_id,\n \"client_secret\": app.secret,\n \"fb_exchange_token\": access_token,\n },\n ).json()\n access_token = resp[\"access_token\"]\n expires_in = resp.get(\"expires_in\")\n if expires_in:\n expires_at = timezone.now() + timedelta(seconds=int(expires_in))\n if ok:\n token = SocialToken(\n app=app, token=access_token, expires_at=expires_at\n )\n login = fb_complete_login(request, app, token)\n login.token = token\n login.state = SocialLogin.state_from_request(request)\n ret = complete_social_login(request, login)\n except requests.RequestException as e:\n logger.exception(\"Error accessing FB user profile\")\n auth_exception = e\n if not ret:\n ret = render_authentication_error(\n request, FacebookProvider.id, exception=auth_exception\n )\n return ret\n", "path": "allauth/socialaccount/providers/facebook/views.py" } ]
diff --git a/allauth/socialaccount/providers/facebook/views.py b/allauth/socialaccount/providers/facebook/views.py index d74bd36f78..002fd3cdc4 100644 --- a/allauth/socialaccount/providers/facebook/views.py +++ b/allauth/socialaccount/providers/facebook/views.py @@ -35,7 +35,7 @@ def compute_appsecret_proof(app, token): def fb_complete_login(request, app, token): - provider = providers.registry.by_id(FacebookProvider.id, request) + provider = providers.registry.by_id(app.provider, request) resp = requests.get( GRAPH_API_URL + "/me", params={
googleapis__python-bigquery-802
ChunkedEncodingError is not retried when fetching data with list_rows() Original issue: https://github.com/googleapis/python-bigquery-storage/issues/242 A user reported that they saw an error in production when fetching table data with `Client.list_rows()`. That method uses the [default retry object](https://github.com/googleapis/python-bigquery/blob/7e0e2bafc4c3f98a4246100f504fd78a01a28e7d/google/cloud/bigquery/retry.py#L49), which currently does not consider `requests.exceptions.ChunkedEncodingError` retryable. (it does retry `requests.exceptions.ConnectionError`, but `ChunkedEncodingError` is not a subclass of that.
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py" } ]
[ { "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py" } ]
diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py index 5e9075fe1..2df4de08b 100644 --- a/google/cloud/bigquery/retry.py +++ b/google/cloud/bigquery/retry.py @@ -27,6 +27,7 @@ exceptions.TooManyRequests, exceptions.InternalServerError, exceptions.BadGateway, + requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError, auth_exceptions.TransportError, ) diff --git a/tests/unit/test_retry.py b/tests/unit/test_retry.py index 0bef1e5e1..6fb7f93fd 100644 --- a/tests/unit/test_retry.py +++ b/tests/unit/test_retry.py @@ -51,6 +51,10 @@ def test_w_unstructured_requests_connectionerror(self): exc = requests.exceptions.ConnectionError() self.assertTrue(self._call_fut(exc)) + def test_w_unstructured_requests_chunked_encoding_error(self): + exc = requests.exceptions.ChunkedEncodingError() + self.assertTrue(self._call_fut(exc)) + def test_w_auth_transporterror(self): from google.auth.exceptions import TransportError
hylang__hy-320
hy raises ImportError out of the box This is on Python 2.6. May be related to #37 I think `hy` should probably install the `importlib` dependency at installation time, or the docs should state clearly that `importlib` needs to be installed ahead of time. Or, (worst case) state that Python 2.6 is not supported. ``` (env)09:52:13 Python (master) > hy Traceback (most recent call last): File "/Users/jacobsen/env/bin/hy", line 9, in <module> load_entry_point('hy==0.9.10', 'console_scripts', 'hy')() File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pkg_resources.py", line 343, in load_entry_point return get_distribution(dist).load_entry_point(group, name) File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pkg_resources.py", line 2354, in load_entry_point return ep.load() File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pkg_resources.py", line 2060, in load entry = __import__(self.module_name, globals(),globals(), ['__name__']) File "/Users/jacobsen/Programming/Python/hy/hy/__init__.py", line 37, in <module> import hy.importer # NOQA File "/Users/jacobsen/Programming/Python/hy/hy/importer.py", line 22, in <module> from hy.compiler import hy_compile File "/Users/jacobsen/Programming/Python/hy/hy/compiler.py", line 44, in <module> import importlib ImportError: No module named importlib (env)09:52:13 Python (master) > pip install importlib Downloading/unpacking importlib Downloading importlib-1.0.2.tar.bz2 Running setup.py egg_info for package importlib Installing collected packages: importlib Running setup.py install for importlib Successfully installed importlib Cleaning up... (env)09:52:21 Python (master) > hy hy 0.9.10 => ```
[ { "content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.6.2']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n dependency_links=['https://github.com/hylang/rply/zipball/master#egg=rply-0.6.2'],\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main'\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.6.2']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n dependency_links=['https://github.com/hylang/rply/zipball/master#egg=rply-0.6.2'],\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main'\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index f3e181456..24621eed5 100755 --- a/setup.py +++ b/setup.py @@ -48,6 +48,7 @@ install_requires = ['rply>=0.6.2'] if sys.version_info[:2] < (2, 7): install_requires.append('argparse>=1.2.1') + install_requires.append('importlib>=1.0.2') if os.name == 'nt': install_requires.append('pyreadline==2.0')
DDMAL__CantusDB-566
On Chant Detail page, in the Source Navigation sidebar, indicate current chant Currently, on the chant detail page, there is a sidebar that lists the chants on the current folio as well as on nearby folios. In the sidebar, there is no indication of which chant is currently being displayed: ![Screen Shot 2022-12-15 at 1 19 53 PM](https://user-images.githubusercontent.com/58090591/207939496-fdcb9a9a-1fa2-4dc3-a447-7c0d89b771bf.png) It might be helpful if we visually indicated somehow the chant that is currently being displayed. Possible options: 1. Bold it 2. make it not be a link 3. add a little arrow pointing to the chant 4. a combination of 2. and 3., or maybe all three of the options above. For example, here's a quick mockup of option 4: ![Screen Shot 2022-12-15 at 1 34 09 PM](https://user-images.githubusercontent.com/58090591/207940198-55b1d25e-f5f8-4bb7-af0a-9299d4d6c081.png)
[ { "content": "from latin_syllabification import syllabify_word\nfrom itertools import zip_longest\n\n\"\"\"\nsome useful info taken from the text entry guideline:\nthe symbols are present only in the MS spelling, not std spelling\nvertical stroke | identifies sections within a chant, it's meant to help align text with melody\n should be surrounded by spaces\ntilda ~ identifies \"Psalm incipits\" or any text that doesn't align with pitches\n immediately before text, no spaces\nIPSUM (same), it looks like: | space ~Ipsum space [actual psalm text from the antiphon]\n this doesn't affect alignment, just treat the part between ~ and next | as one syllable in one word\n\n MISSING TEXT WITH READABLE PITCHES\nthe number sign # identifies missing text, it could be missing complete word(s) or syllable(s)\n complete words missing: space # space\n partially visible word (syllables missing): - (one hyphen represents missing syllables) then space # space for missing section of text\n volpiano for the section with missing text: -- between neumes, begin and end section with --- (always treat the section as a word?)\n\n READABLE TEXT UNDER MISSING PITCHES\nfor a complete word:\n enclose affected text in {} and don't syllabize them,\n volpiano use 6------6 to represent missing pitches, --- before and after each 6\n\nfor select syllables:\n enclose affected syllable(s) in {},\n volpiano use 6------6 to represent missing pitches, no --- before and after each 6???\nIn either case, the 6*6 align with {*}\n\n MISSING BOTH PITCHES AND TEXT\nno text, no pitches visible:\n {#} indicates missing text,\n if partial word readable, use - for the missing sylable(s) and then enter space {#} space for the remainder of missing text\n volpiano use 6------6 as described above\n\nno pitches, partial text visible:\n enclose affected text in {}, use - for the missing portions of words\n use # within {} to indicate location of missing text\n volpiano use 6------6 as described above\n\nthere should never be a space in volpiano. hyphens do the separation and spacing.\n\"\"\"\n\n\ndef syllabize_text(text, pre_syllabized=False):\n # vertical stroke | identifies sections within a chant, it should be surrounded by spaces\n # if it's missing spaces, add them\n if \"|\" in text:\n substrs_around_barline = text.split(\"|\")\n # this may introduce extra spaces. those will be removed in the next part\n text = \" | \".join(substrs_around_barline)\n\n # curly braces {} identifies text affected by missing pitches, insert spaces before { and after }\n # so that the text in braces are are treated as a separate word\n if \"{\" in text:\n substrs_around_brace_start = text.split(\"{\")\n # this may introduce extra spaces. those will be removed in the next part\n text = \" {\".join(substrs_around_brace_start)\n if \"}\" in text:\n substrs_around_brace_end = text.split(\"}\")\n # this may introduce extra spaces. those will be removed in the next part\n text = \"} \".join(substrs_around_brace_end)\n\n words_text = text.split(\" \")\n # initialize the first word with a space, which aligns with the clef at the beginning of melody\n # syls_text is a list of lists (words). each word is a list of syllables\n syls_text = [[\" \"]]\n\n if pre_syllabized:\n # if the chant has its syllabized_full_text hardcoded in the DB\n for word in words_text:\n # this `if` eliminates the extra spaces\n if word:\n syls = [syl + \"-\" for syl in word.split(\"-\")]\n syls[-1] = syls[-1][:-1]\n syls_text.append(syls)\n else:\n for word in words_text:\n if word:\n syls = [syl + \"-\" for syl in syllabify_word(word)]\n syls[-1] = syls[-1][:-1]\n syls_text.append(syls)\n return syls_text\n\n\ndef syllabize_melody(volpiano):\n # the clef in volpiano should be 1--- with three dashes, if missing any dash, insert it\n if volpiano[1] != \"-\":\n volpiano = volpiano[:1] + \"-\" + volpiano[1:]\n if volpiano[2] != \"-\":\n volpiano = volpiano[:2] + \"-\" + volpiano[2:]\n if volpiano[3] != \"-\":\n volpiano = volpiano[:3] + \"-\" + volpiano[3:]\n\n # before splitting on \"---\", note that some volpianos use \"6------6\" to identify missing content\n # the \"6------6\" should not be split\n if \"6------6\" in volpiano:\n # temporarily replace 6-----6 by *\n volpiano = volpiano.replace(\"6------6\", \"******\")\n # split volpiano into melody words\n words_melody = [word + \"---\" for word in volpiano.split(\"---\")]\n # remove the trailing \"---\" (added in previous line) from the last word\n words_melody[-1] = words_melody[-1][:-3]\n\n # split melody words into syllables\n # `syls_melody` would be a list of lists (words), each word is a list of syllables\n syls_melody = []\n for word in words_melody[:-1]:\n # to accommodate for text like `mar{tirum et}`, we appended space before all curly braces,\n # so that the text in curly braces can nicely align with the `---6------6---` in melody\n # (they're both treated as a single word)\n # however, there are cases like `an{#}` (originally no space before curly brace,\n # while the corresp `6------6` in melody has only two leading dashes because it corresponds to only a syllable)\n # in order to accommodate both cases, we change the syllable-level `6------6` into word-level\n # i.e., make it a single word on its own\n # example: see 219427 and 619450\n # this variable is for capturing the syllable-level `6------6` (below referred to as `gap`),\n syl_level_gap = None\n # `syls` contains the melody syllables in each melody word\n syls = []\n # the last 3 charactors (---) are discarded\n for i, syl in enumerate(word[:-3].split(\"--\")):\n if \"******\" in syl:\n # if the syllable contains `6------6`\n # (it may not be exactly `6------6`, it could also be sth like `6------677`)\n if i == 0:\n # if `6------6` is the first syllable in the word, it must be a word-level gap\n # just put it directly into the list for the current word\n syl = syl.replace(\"******\", \"6------6\")\n syls.append(syl + \"--\")\n else:\n # if the gap is not the first syllable in the word,\n # the word must be sth like `---k--6------677---` (syl-level gap)\n # we save it and later add it directly to the `syls_melody` list\n syl_level_gap = syl.replace(\"******\", \"6------6\")\n else:\n # for normal syls, directly add them to the list for the current word\n syls.append(syl + \"--\")\n # this next line is equivalent to removing the trailing \"--\" and\n # then adding the \"---\" back to the end of each word\n syls[-1] = syls[-1] + \"-\"\n syls_melody.append(syls)\n if syl_level_gap:\n # if there is syl-level gap, add it to `syls_melody` like a word\n syls_melody.append([syl_level_gap])\n\n # for the last word in melody\n last_word = words_melody[-1]\n if \"******\" in last_word:\n # change * back to 6------6\n word = word.replace(\"******\", \"6------6\")\n syls_melody.append([word])\n else:\n syls = [syl + \"--\" for syl in last_word.split(\"--\")]\n # remove the trailing \"--\" (added in previous line) from the last syllable\n syls[-1] = syls[-1][:-2]\n syls_melody.append(syls)\n # print(syls_melody)\n return syls_melody\n\n\ndef find_next_barline(syls_text, tilda_idx):\n # set default to beyond the last word, in case the barline is missing, all words after ~ will be combined\n barline_idx = len(syls_text)\n # the barline is a word on its own, so start from the next word\n for i, word in enumerate(syls_text[tilda_idx + 1 :]):\n if word == [\"|\"]:\n barline_idx = tilda_idx + 1 + i\n break\n return barline_idx\n\n\ndef find_next_barline_mel(syls_melody, tilda_idx):\n # set default to beyond the last word, in case the barline is missing, all words after ~ will be combined\n barline_idx_mel = len(syls_melody)\n # the barline is a word on its own, so start from the next word\n for i, word in enumerate(syls_melody[tilda_idx + 1 :]):\n if word == [\"3---\"] or word == [\"4---\"] or word == [\"3\"] or word == [\"4\"]:\n barline_idx_mel = tilda_idx + 1 + i\n break\n return barline_idx_mel\n\n\ndef find_next_brace_end(syls_text, brace_start_idx):\n # set default to the last word, in case the brace end is missing, all words after { will be combined\n brace_end_idx = len(syls_text) - 1\n # there are cases where there's only one word in the braces, so start from the same word as the brace start\n for i, word in enumerate(syls_text[brace_start_idx:]):\n if word[-1][-1] == \"}\":\n brace_end_idx = brace_start_idx + i\n break\n return brace_end_idx\n\n\ndef postprocess(syls_text, syls_melody):\n # process the braces {} before processing ~ and |, because some chants have ~ inside {}, in this case,\n # processing {} will solve both\n brace_start_idx = []\n brace_end_idx = []\n for i, word in enumerate(syls_text):\n if word[0][0] == \"{\":\n brace_start_idx.append(i)\n if word[-1][-1] == \"}\":\n brace_end_idx.append(i)\n for idx in brace_start_idx:\n next_brace_end = find_next_brace_end(syls_text, idx)\n rebuilt_words = []\n for word in syls_text[idx : next_brace_end + 1]:\n word = [syl.strip(\"-\") for syl in word]\n rebuilt_word = \"\".join(word)\n rebuilt_words.append(rebuilt_word)\n syls_text[idx] = [\" \".join(rebuilt_words)]\n for i in range(idx + 1, next_brace_end + 1):\n syls_text[i] = [\"*\"]\n syls_text = [word for word in syls_text if word != [\"*\"]]\n\n # process the text between ~ and |\n barline_idx = []\n tilda_idx = []\n for i, word in enumerate(syls_text):\n if word == [\"|\"]:\n barline_idx.append(i)\n if word[0][0] == \"~\":\n tilda_idx.append(i)\n\n # when the text between ~ and | is combined into one word (since they aren't syllabized, they're actually one syllable),\n # the corresponding melody needs to be combined into one syllable, so that they align beautifully.\n # if multiple words are present between ~ and |, they are combined into one word.\n # this causes a change in the index of every word after them.\n # this becomes a problem when there are multiple ~ in the text (more than one region require combination).\n # `melody_offset` measures the change in indexing, so that we always index the correct words for combination\n melody_offset = 0\n for idx in tilda_idx:\n # combine text words\n rebuilt_words = []\n next_barline = find_next_barline(syls_text, idx)\n for word in syls_text[idx:next_barline]:\n word = [syl.strip(\"-\") for syl in word]\n rebuilt_word = \"\".join(word)\n rebuilt_words.append(rebuilt_word)\n syls_text[idx] = [\" \".join(rebuilt_words)]\n for i in range(idx + 1, next_barline):\n syls_text[i] = [\"*\"]\n\n # combine melody words\n # based on the tilda index in text, find the index of melody words to combine\n # most of the time, only one melody word needs to be combined\n # but some situations require combination of multiple melody words, see 399083\n next_barline_mel = find_next_barline_mel(syls_melody, idx - melody_offset)\n melody_words_to_combine = syls_melody[idx - melody_offset : next_barline_mel]\n # combine the melody words into one word (a list of melody syls)\n melody_words_combined = [\n syl for word in melody_words_to_combine for syl in word\n ]\n try:\n # combine the melody syls into one syl\n syls_melody[idx - melody_offset] = [\"\".join(melody_words_combined)]\n except IndexError:\n # sometimes the melody is shorter than text, so the tilda in text doesn't have melody\n print(\"MELODY SHORTER THAN TEXT, DIDNT REACH TILDA\")\n break\n # for the melody words that have been merged into some word before them,\n # mark them differently so that they still occupy the index and do not appear in the results\n for i in range(idx - melody_offset + 1, next_barline_mel):\n syls_melody[i] = [\"*\"]\n\n # this is crucial for getting the index correct. melody offset updating depends on the number of melody words\n # and text words that have been merged, and also the current melody offset\n melody_offset = (\n next_barline - idx - 1 - (next_barline_mel - idx - 1 + melody_offset)\n )\n\n # remove the previously merged words (marked *) from the final results\n syls_melody = [word for word in syls_melody if word != [\"*\"]]\n syls_text = [word for word in syls_text if word != [\"*\"]]\n return syls_text, syls_melody\n\n\ndef align(syls_text, syls_melody):\n # if melody has more words than text, fill spaces to the end of the text\n # if melody has fewer words than text, discard the extra text (loop through melody length below)\n if len(syls_melody) > len(syls_text):\n syls_text = syls_text + [\" \"] * (len(syls_melody) - len(syls_text))\n\n list_of_zips = []\n for i in range(len(syls_melody)):\n # when the melody ends with ---, the last melody word would be an empty string [\"\"]\n # this is usually ok because it happens only at the end of a chant,\n # where there's no text to go with this empty melody word.\n # it becomes a problem when the melody has fewer words than text, in which case the empty\n # melody word would align with an extra text word that should not appear in the alignment\n # see 560782 and 674219\n # if the melody word is empty, ignore it during alignment\n if syls_melody[i] == [\"\"]:\n continue\n\n # if the melody word is a barline, but there's no barline in text to align with it\n # see 270470 270305\n if syls_melody[i] == [\"3---\"] and syls_text[i] != [\"|\"]:\n # insert a barline or space (as a word) to text\n # syls_text.insert(i, [\"|\"])\n syls_text.insert(i, [\" \"])\n\n # if the melody word has more syllables, add space to the text\n if len(syls_melody[i]) > len(syls_text[i]):\n word_zip = zip_longest(syls_melody[i], syls_text[i], fillvalue=\" \")\n else:\n # when the text word has more syllables, there are two options:\n # 1. hide the extra syllables (more like old cantus)\n # word_zip = zip(syls_melody[i], syls_text[i])\n\n # 2. append dashes to the melody word, so that the text word and melody word have the same number of syllables\n # the second option may cause gaps in staff lines, if the appended content takes up less horizontal space than the extra text syllables\n word_zip = zip_longest(syls_melody[i], syls_text[i], fillvalue=\"------\")\n list_of_zips.append(word_zip)\n\n return list_of_zips\n", "path": "django/cantusdb_project/align_text_mel.py" } ]
[ { "content": "from latin_syllabification import syllabify_word\nfrom itertools import zip_longest\n\n\"\"\"\nsome useful info taken from the text entry guideline:\nthe symbols are present only in the MS spelling, not std spelling\nvertical stroke | identifies sections within a chant, it's meant to help align text with melody\n should be surrounded by spaces\ntilda ~ identifies \"Psalm incipits\" or any text that doesn't align with pitches\n immediately before text, no spaces\nIPSUM (same), it looks like: | space ~Ipsum space [actual psalm text from the antiphon]\n this doesn't affect alignment, just treat the part between ~ and next | as one syllable in one word\n\n MISSING TEXT WITH READABLE PITCHES\nthe number sign # identifies missing text, it could be missing complete word(s) or syllable(s)\n complete words missing: space # space\n partially visible word (syllables missing): - (one hyphen represents missing syllables) then space # space for missing section of text\n volpiano for the section with missing text: -- between neumes, begin and end section with --- (always treat the section as a word?)\n\n READABLE TEXT UNDER MISSING PITCHES\nfor a complete word:\n enclose affected text in {} and don't syllabize them,\n volpiano use 6------6 to represent missing pitches, --- before and after each 6\n\nfor select syllables:\n enclose affected syllable(s) in {},\n volpiano use 6------6 to represent missing pitches, no --- before and after each 6???\nIn either case, the 6*6 align with {*}\n\n MISSING BOTH PITCHES AND TEXT\nno text, no pitches visible:\n {#} indicates missing text,\n if partial word readable, use - for the missing sylable(s) and then enter space {#} space for the remainder of missing text\n volpiano use 6------6 as described above\n\nno pitches, partial text visible:\n enclose affected text in {}, use - for the missing portions of words\n use # within {} to indicate location of missing text\n volpiano use 6------6 as described above\n\nthere should never be a space in volpiano. hyphens do the separation and spacing.\n\"\"\"\n\n\ndef syllabize_text(text, pre_syllabized=False):\n # vertical stroke | identifies sections within a chant, it should be surrounded by spaces\n # if it's missing spaces, add them\n if \"|\" in text:\n substrs_around_barline = text.split(\"|\")\n # this may introduce extra spaces. those will be removed in the next part\n text = \" | \".join(substrs_around_barline)\n\n # curly braces {} identifies text affected by missing pitches, insert spaces before { and after }\n # so that the text in braces are are treated as a separate word\n if \"{\" in text:\n substrs_around_brace_start = text.split(\"{\")\n # this may introduce extra spaces. those will be removed in the next part\n text = \" {\".join(substrs_around_brace_start)\n if \"}\" in text:\n substrs_around_brace_end = text.split(\"}\")\n # this may introduce extra spaces. those will be removed in the next part\n text = \"} \".join(substrs_around_brace_end)\n\n words_text = text.split(\" \")\n # initialize the first word with a space, which aligns with the clef at the beginning of melody\n # syls_text is a list of lists (words). each word is a list of syllables\n syls_text = [[\" \"]]\n\n if pre_syllabized:\n # if the chant has its syllabized_full_text hardcoded in the DB\n for word in words_text:\n # this `if` eliminates the extra spaces\n if word:\n syls = [syl + \"-\" for syl in word.split(\"-\")]\n syls[-1] = syls[-1][:-1]\n syls_text.append(syls)\n else:\n for word in words_text:\n if word:\n syls = [syl + \"-\" for syl in syllabify_word(word)]\n syls[-1] = syls[-1][:-1]\n syls_text.append(syls)\n return syls_text\n\n\ndef syllabize_melody(volpiano):\n # the clef in volpiano should be 1--- with three dashes, if missing any dash, insert it\n if volpiano[1] != \"-\":\n volpiano = volpiano[:1] + \"-\" + volpiano[1:]\n if volpiano[2] != \"-\":\n volpiano = volpiano[:2] + \"-\" + volpiano[2:]\n if volpiano[3] != \"-\":\n volpiano = volpiano[:3] + \"-\" + volpiano[3:]\n\n # before splitting on \"---\", note that some volpianos use \"6------6\" to identify missing content\n # the \"6------6\" should not be split\n if \"6------6\" in volpiano:\n # temporarily replace 6-----6 by *\n volpiano = volpiano.replace(\"6------6\", \"******\")\n # split volpiano into melody words\n words_melody = [word + \"---\" for word in volpiano.split(\"---\")]\n # remove the trailing \"---\" (added in previous line) from the last word\n words_melody[-1] = words_melody[-1][:-3]\n\n # split melody words into syllables\n # `syls_melody` would be a list of lists (words), each word is a list of syllables\n syls_melody = []\n for word in words_melody[:-1]:\n # to accommodate for text like `mar{tirum et}`, we appended space before all curly braces,\n # so that the text in curly braces can nicely align with the `---6------6---` in melody\n # (they're both treated as a single word)\n # however, there are cases like `an{#}` (originally no space before curly brace,\n # while the corresp `6------6` in melody has only two leading dashes because it corresponds to only a syllable)\n # in order to accommodate both cases, we change the syllable-level `6------6` into word-level\n # i.e., make it a single word on its own\n # example: see 219427 and 619450\n # this variable is for capturing the syllable-level `6------6` (below referred to as `gap`),\n syl_level_gap = None\n # `syls` contains the melody syllables in each melody word\n syls = []\n # the last 3 charactors (---) are discarded\n for i, syl in enumerate(word[:-3].split(\"--\")):\n if \"******\" in syl:\n # if the syllable contains `6------6`\n # (it may not be exactly `6------6`, it could also be sth like `6------677`)\n if i == 0:\n # if `6------6` is the first syllable in the word, it must be a word-level gap\n # just put it directly into the list for the current word\n syl = syl.replace(\"******\", \"6------6\")\n syls.append(syl + \"--\")\n else:\n # if the gap is not the first syllable in the word,\n # the word must be sth like `---k--6------677---` (syl-level gap)\n # we save it and later add it directly to the `syls_melody` list\n syl_level_gap = syl.replace(\"******\", \"6------6\")\n else:\n # for normal syls, directly add them to the list for the current word\n syls.append(syl + \"--\")\n # this next line is equivalent to removing the trailing \"--\" and\n # then adding the \"---\" back to the end of each word\n syls[-1] = syls[-1] + \"-\"\n syls_melody.append(syls)\n if syl_level_gap:\n # if there is syl-level gap, add it to `syls_melody` like a word\n syls_melody.append([syl_level_gap])\n\n # for the last word in melody\n last_word = words_melody[-1]\n if \"******\" in last_word:\n # change * back to 6------6\n word = word.replace(\"******\", \"6------6\")\n syls_melody.append([word])\n else:\n syls = [syl + \"--\" for syl in last_word.split(\"--\")]\n # remove the trailing \"--\" (added in previous line) from the last syllable\n syls[-1] = syls[-1][:-2]\n syls_melody.append(syls)\n return syls_melody\n\n\ndef find_next_barline(syls_text, tilda_idx):\n # set default to beyond the last word, in case the barline is missing, all words after ~ will be combined\n barline_idx = len(syls_text)\n # the barline is a word on its own, so start from the next word\n for i, word in enumerate(syls_text[tilda_idx + 1 :]):\n if word == [\"|\"]:\n barline_idx = tilda_idx + 1 + i\n break\n return barline_idx\n\n\ndef find_next_barline_mel(syls_melody, tilda_idx):\n # set default to beyond the last word, in case the barline is missing, all words after ~ will be combined\n barline_idx_mel = len(syls_melody)\n # the barline is a word on its own, so start from the next word\n for i, word in enumerate(syls_melody[tilda_idx + 1 :]):\n if word == [\"3---\"] or word == [\"4---\"] or word == [\"3\"] or word == [\"4\"]:\n barline_idx_mel = tilda_idx + 1 + i\n break\n return barline_idx_mel\n\n\ndef find_next_brace_end(syls_text, brace_start_idx):\n # set default to the last word, in case the brace end is missing, all words after { will be combined\n brace_end_idx = len(syls_text) - 1\n # there are cases where there's only one word in the braces, so start from the same word as the brace start\n for i, word in enumerate(syls_text[brace_start_idx:]):\n if word[-1][-1] == \"}\":\n brace_end_idx = brace_start_idx + i\n break\n return brace_end_idx\n\n\ndef postprocess(syls_text, syls_melody):\n # process the braces {} before processing ~ and |, because some chants have ~ inside {}, in this case,\n # processing {} will solve both\n brace_start_idx = []\n brace_end_idx = []\n for i, word in enumerate(syls_text):\n if word[0][0] == \"{\":\n brace_start_idx.append(i)\n if word[-1][-1] == \"}\":\n brace_end_idx.append(i)\n for idx in brace_start_idx:\n next_brace_end = find_next_brace_end(syls_text, idx)\n rebuilt_words = []\n for word in syls_text[idx : next_brace_end + 1]:\n word = [syl.strip(\"-\") for syl in word]\n rebuilt_word = \"\".join(word)\n rebuilt_words.append(rebuilt_word)\n syls_text[idx] = [\" \".join(rebuilt_words)]\n for i in range(idx + 1, next_brace_end + 1):\n syls_text[i] = [\"*\"]\n syls_text = [word for word in syls_text if word != [\"*\"]]\n\n # process the text between ~ and |\n barline_idx = []\n tilda_idx = []\n for i, word in enumerate(syls_text):\n if word == [\"|\"]:\n barline_idx.append(i)\n if word[0][0] == \"~\":\n tilda_idx.append(i)\n\n # when the text between ~ and | is combined into one word (since they aren't syllabized, they're actually one syllable),\n # the corresponding melody needs to be combined into one syllable, so that they align beautifully.\n # if multiple words are present between ~ and |, they are combined into one word.\n # this causes a change in the index of every word after them.\n # this becomes a problem when there are multiple ~ in the text (more than one region require combination).\n # `melody_offset` measures the change in indexing, so that we always index the correct words for combination\n melody_offset = 0\n for idx in tilda_idx:\n # combine text words\n rebuilt_words = []\n next_barline = find_next_barline(syls_text, idx)\n for word in syls_text[idx:next_barline]:\n word = [syl.strip(\"-\") for syl in word]\n rebuilt_word = \"\".join(word)\n rebuilt_words.append(rebuilt_word)\n syls_text[idx] = [\" \".join(rebuilt_words)]\n for i in range(idx + 1, next_barline):\n syls_text[i] = [\"*\"]\n\n # combine melody words\n # based on the tilda index in text, find the index of melody words to combine\n # most of the time, only one melody word needs to be combined\n # but some situations require combination of multiple melody words, see 399083\n next_barline_mel = find_next_barline_mel(syls_melody, idx - melody_offset)\n melody_words_to_combine = syls_melody[idx - melody_offset : next_barline_mel]\n # combine the melody words into one word (a list of melody syls)\n melody_words_combined = [\n syl for word in melody_words_to_combine for syl in word\n ]\n try:\n # combine the melody syls into one syl\n syls_melody[idx - melody_offset] = [\"\".join(melody_words_combined)]\n except IndexError:\n # sometimes the melody is shorter than text, so the tilda in text doesn't have melody\n print(\"MELODY SHORTER THAN TEXT, DIDNT REACH TILDA\")\n break\n # for the melody words that have been merged into some word before them,\n # mark them differently so that they still occupy the index and do not appear in the results\n for i in range(idx - melody_offset + 1, next_barline_mel):\n syls_melody[i] = [\"*\"]\n\n # this is crucial for getting the index correct. melody offset updating depends on the number of melody words\n # and text words that have been merged, and also the current melody offset\n melody_offset = (\n next_barline - idx - 1 - (next_barline_mel - idx - 1 + melody_offset)\n )\n\n # remove the previously merged words (marked *) from the final results\n syls_melody = [word for word in syls_melody if word != [\"*\"]]\n syls_text = [word for word in syls_text if word != [\"*\"]]\n return syls_text, syls_melody\n\n\ndef align(syls_text, syls_melody):\n # if melody has more words than text, fill spaces to the end of the text\n # if melody has fewer words than text, discard the extra text (loop through melody length below)\n if len(syls_melody) > len(syls_text):\n syls_text = syls_text + [\" \"] * (len(syls_melody) - len(syls_text))\n\n list_of_zips = []\n for i in range(len(syls_melody)):\n # when the melody ends with ---, the last melody word would be an empty string [\"\"]\n # this is usually ok because it happens only at the end of a chant,\n # where there's no text to go with this empty melody word.\n # it becomes a problem when the melody has fewer words than text, in which case the empty\n # melody word would align with an extra text word that should not appear in the alignment\n # see 560782 and 674219\n # if the melody word is empty, ignore it during alignment\n if syls_melody[i] == [\"\"]:\n continue\n\n # if the melody word is a barline, but there's no barline in text to align with it\n # see 270470 270305\n if syls_melody[i] == [\"3---\"] and syls_text[i] != [\"|\"]:\n # insert a barline or space (as a word) to text\n # syls_text.insert(i, [\"|\"])\n syls_text.insert(i, [\" \"])\n\n # if the melody word has more syllables, add space to the text\n if len(syls_melody[i]) > len(syls_text[i]):\n word_zip = zip_longest(syls_melody[i], syls_text[i], fillvalue=\" \")\n else:\n # when the text word has more syllables, there are two options:\n # 1. hide the extra syllables (more like old cantus)\n # word_zip = zip(syls_melody[i], syls_text[i])\n\n # 2. append dashes to the melody word, so that the text word and melody word have the same number of syllables\n # the second option may cause gaps in staff lines, if the appended content takes up less horizontal space than the extra text syllables\n word_zip = zip_longest(syls_melody[i], syls_text[i], fillvalue=\"------\")\n list_of_zips.append(word_zip)\n\n return list_of_zips\n", "path": "django/cantusdb_project/align_text_mel.py" } ]
diff --git a/django/cantusdb_project/align_text_mel.py b/django/cantusdb_project/align_text_mel.py index b7e1e1db6..fcac31f7e 100644 --- a/django/cantusdb_project/align_text_mel.py +++ b/django/cantusdb_project/align_text_mel.py @@ -155,7 +155,6 @@ def syllabize_melody(volpiano): # remove the trailing "--" (added in previous line) from the last syllable syls[-1] = syls[-1][:-2] syls_melody.append(syls) - # print(syls_melody) return syls_melody diff --git a/django/cantusdb_project/articles/templates/article_detail.html b/django/cantusdb_project/articles/templates/article_detail.html index 1966b801d..d2da28206 100644 --- a/django/cantusdb_project/articles/templates/article_detail.html +++ b/django/cantusdb_project/articles/templates/article_detail.html @@ -1,17 +1,18 @@ {% extends "base.html" %} {% block content %} +<title>{{ article.title }} | Cantus Manuscript Database</title> <div class="mr-3 p-3 col-md-12 bg-white rounded"> <object align="right" class="search-bar"> {% include "global_search_bar.html" %} </object> <h3> - {{article.title}} + {{ article.title }} </h3> <div class="row"> <div class="col"> <div class="container"> <div class="container text-wrap"> - <small>Submitted by <a href="{{ article.author.get_absolute_url }}">{{ article.author }}</a> on {{ article.date_created|date:"D, m/d/Y - H:i" }}</small> + <small>Submitted by <a href="{% url 'user-detail' article.author.id %}">{{ article.author }}</a> on {{ article.date_created|date:"D, m/d/Y - H:i" }}</small> <div style="padding-top: 1em;"> {{ article.body.html|safe }} </div> diff --git a/django/cantusdb_project/articles/templates/article_list.html b/django/cantusdb_project/articles/templates/article_list.html index 3c6aa2eec..ad2120c6b 100644 --- a/django/cantusdb_project/articles/templates/article_list.html +++ b/django/cantusdb_project/articles/templates/article_list.html @@ -1,5 +1,6 @@ {% extends "base.html" %} {% block content %} +<title>What's New | Cantus Manuscript Database</title> <div class="mr-3 p-3 col-md-10 mx-auto bg-white rounded"> <object align="right" class="search-bar"> {% include "global_search_bar.html" %} @@ -10,7 +11,7 @@ <h3>What's New</h3> <div class="col"> <small>{{ article.date_created|date:"D, m/d/Y - H:i" }}</small> <h4> - <a href="{{ article.get_absolute_url }}">{{ article.title }}</a> + <a href="{% url 'article-detail' article.id %}">{{ article.title }}</a> </h4> <div class="container"> {{ article.body|safe|truncatechars_html:3000 }} diff --git a/django/cantusdb_project/main_app/templates/century_detail.html b/django/cantusdb_project/main_app/templates/century_detail.html index 2e17c511d..57a33dcec 100644 --- a/django/cantusdb_project/main_app/templates/century_detail.html +++ b/django/cantusdb_project/main_app/templates/century_detail.html @@ -9,7 +9,7 @@ <h3>{{ century.name }}</h3> <ul> {% for source in century.sources.all|dictsort:"title" %} <li> - <a href="{{ source.get_absolute_url }}"> + <a href="{% url 'source-detail' source.id %}"> {{ source.title }} </a> </li> diff --git a/django/cantusdb_project/main_app/templates/chant_create.html b/django/cantusdb_project/main_app/templates/chant_create.html index b7b7713c8..c8770693d 100644 --- a/django/cantusdb_project/main_app/templates/chant_create.html +++ b/django/cantusdb_project/main_app/templates/chant_create.html @@ -11,7 +11,7 @@ <h3>Create Chant</h3> <div class="alert alert-success alert-dismissible"> {% for message in messages %} <a href="#" class="close" data-dismiss="alert" aria-label="close">&times;</a> - <a href="{{ previous_chant.get_absolute_url }}" style="color:#155724" target="_blank">{{ message }}</a> + <a href="{% url 'chant-detail' previous_chant.id %}" style="color:#155724" target="_blank">{{ message }}</a> {% endfor %} </div> {% endif %} @@ -232,12 +232,12 @@ <h3>Create Chant</h3> <div class="card mb-3 w-100"> <div class="card-header"> - <h5><a id="source" href="{{ source.get_absolute_url }}">{{ source.title }}</a></h5> + <h5><a id="source" href="{% url 'source-detail' source.id %}">{{ source.title }}</a></h5> </div> <div class="card-body" style="font-size: 15px"> <b>Suggestions based on the previous chant:</b><br> {% if previous_chant %} - {{ previous_chant.folio }} {{ previous_chant.c_sequence}} <a href="{{ previous_chant.get_absolute_url }}" target="_blank">{{ previous_chant.incipit }}</a><br> + {{ previous_chant.folio }} {{ previous_chant.c_sequence}} <a href="{% url 'chant-detail' previous_chant.id %}" target="_blank">{{ previous_chant.incipit }}</a><br> Cantus ID: <a href="http://cantusindex.org/id/{{ previous_chant.cantus_id }}" target="_blank">{{ previous_chant.cantus_id }}</a><br> {% if suggested_chants %} {% for chant in suggested_chants %} diff --git a/django/cantusdb_project/main_app/templates/chant_detail.html b/django/cantusdb_project/main_app/templates/chant_detail.html index bd8cc0564..3592c0642 100644 --- a/django/cantusdb_project/main_app/templates/chant_detail.html +++ b/django/cantusdb_project/main_app/templates/chant_detail.html @@ -30,7 +30,7 @@ <h3>{{ chant.incipit }}</h3> <div class="col"> <dt>Source</dt> <dd> - <a href="{{ chant.source.get_absolute_url }}">{{ chant.source.title }}</a> + <a href="{% url 'source-detail' chant.source.id %}">{{ chant.source.title }}</a> </dd> </div> {% endif %} @@ -64,7 +64,7 @@ <h3>{{ chant.incipit }}</h3> <div class="col-2"> <dt>Feast</dt> <dd> - <a href="{{ chant.feast.get_absolute_url }}">{{ chant.feast.name }}</a> + <a href="{% url 'feast-detail' chant.feast.id %}" title="{{ chant.feast.description }}">{{ chant.feast.name }}</a> </dd> </div> {% endif %} @@ -73,7 +73,7 @@ <h3>{{ chant.incipit }}</h3> <div class="col-2"> <dt>Office/Mass</dt> <dd> - <a href="{{ chant.office.get_absolute_url }}" title="{{ chant.office.description }}">{{ chant.office.name }}</a> + <a href="{% url 'office-detail' chant.office.id %}" title="{{ chant.office.description }}">{{ chant.office.name }}</a> </dd> </div> {% endif %} @@ -82,7 +82,7 @@ <h3>{{ chant.incipit }}</h3> <div class="col-2"> <dt>Genre</dt> <dd> - <a href="{{ chant.genre.get_absolute_url }}" title="{{ chant.genre.description }}">{{ chant.genre.name }}</a> + <a href="{% url 'genre-detail' chant.genre.id %}" title="{{ chant.genre.description }}">{{ chant.genre.name }}</a> </dd> </div> {% endif %} @@ -285,7 +285,7 @@ <h4>List of melodies</h4> <b>Source navigation</b> <br> {% if source %} - <a href="{{ source.get_absolute_url }}"> <b>{{ source.siglum }}</b> </a> + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}"> <b>{{ source.siglum }}</b> </a> {% else %} This chant is not associated with any source. {% endif %} @@ -325,10 +325,26 @@ <h4>List of melodies</h4> <table class="table table-sm small table-bordered"> {% for chant in chants %} <tr> - <td>{{ chant.c_sequence }}</td> - <td>{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td><a href="{{ chant.get_absolute_url }}">{{ chant.incipit|default_if_none:"" }}</a></td> - <td><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> + <td> + {{ chant.c_sequence }} + </td> + <td> + <span title="{{ chant.office.description }}"> + {{ chant.office.name|default_if_none:"" }} + </span> + <b title="{{ chant.genre.description }}">{{ chant.genre.name|default_if_none:"" }}</b> + {{ chant.position|default_if_none:"" }} + </td> + <td> + <a href="{% url 'chant-detail' chant.id %}"> + {{ chant.incipit|default_if_none:"" }} + </a> + </td> + <td> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> </tr> {% endfor %} </table> @@ -337,34 +353,71 @@ <h4>List of melodies</h4> <a id="previousToggle" href="#" onclick="togglePrevious(); return false;">Display previous chants ▾</a> {% endif %} <br> - - {% for feast, chants in feasts_current_folio %} - Folio: <b>{{ chant.folio }}</b> - Feast: <b>{{ feast.name }}</b> - <table class="table table-sm small table-bordered"> - {% for chant in chants %} - <tr> - <td>{{ chant.c_sequence }}</td> - <td>{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td><a href="{{ chant.get_absolute_url }}">{{ chant.incipit|default_if_none:"" }}</a></td> - <td><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> - </tr> - {% endfor %} - </table> - {% endfor %} + {% with chant.c_sequence as current_seq %} + {% for feast, chants in feasts_current_folio %} + Folio: <b>{{ chant.folio }}</b> - Feast: <b title="{{ feast.description }}">{{ feast.name }}</b> + <table class="table table-sm small table-bordered"> + {% for chant in chants %} + <tr> + <td> + {{ chant.c_sequence }} + </td> + <td> + <span title="{{ chant.office.description }}"> + {{ chant.office.name|default_if_none:"" }} + </span> + <b title="{{ chant.genre.description }}">{{ chant.genre.name|default_if_none:"" }}</b> + {{ chant.position|default_if_none:"" }} + </td> + <td> + <a href="{% url 'chant-detail' chant.id %}"> + {% if chant.c_sequence == current_seq %} + <b>{{ chant.incipit|default_if_none:"" }}</b> + {% else %} + {{ chant.incipit|default_if_none:"" }} + {% endif %} + </a> + </td> + <td> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> + </tr> + {% endfor %} + </table> + {% endfor %} + {% endwith %} {% if next_folio %} <a id="nextToggle" href="#" onclick="toggleNext(); return false;">Display next chants ▾</a> <br> <div id="nextDiv" style="display:none"> {% for feast, chants in feasts_next_folio %} - Folio: <b>{{ next_folio }}</b> - Feast: <b>{{ feast.name }}</b> + Folio: <b>{{ next_folio }}</b> - Feast: <b title="{{ feast.description }}">{{ feast.name }}</b> <table class="table table-sm small table-bordered"> {% for chant in chants %} <tr> - <td>{{ chant.c_sequence }}</td> - <td>{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td><a href="{{ chant.get_absolute_url }}">{{ chant.incipit|default_if_none:"" }}</a></td> - <td><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> + <td> + {{ chant.c_sequence }} + </td> + <td> + <span title="{{ chant.office.description }}"> + {{ chant.office.name|default_if_none:"" }} + </span> + <b title="{{ chant.genre.description }}">{{ chant.genre.name|default_if_none:"" }}</b> + {{ chant.position|default_if_none:"" }} + </td> + <td> + <a href="{% url 'chant-detail' chant.id %}"> + {{ chant.incipit|default_if_none:"" }} + </a> + </td> + <td> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> </tr> {% endfor %} </table> @@ -381,12 +434,12 @@ <h4>List of melodies</h4> <div class="card-header"> <small><a href="/sources?segment={{ source.segment.id }}">{{ source.segment.name }}</a></small> <br> - {{ source.siglum }} + <span title="{{ source.title }}">{{ source.siglum }}</span> </div> <div class=" card-body"> <small> {% if source.provenance.name %} - Provenance: <b><a href="{{ source.provenance.get_absolute_url }}">{{source.provenance.name}}</a></b> + Provenance: <b><a href="{% url 'provenance-detail' source.provenance.id %}">{{source.provenance.name}}</a></b> <br> {% endif %} @@ -394,7 +447,7 @@ <h4>List of melodies</h4> Date: {% for century in source.century.all %} <b> - <a href="{{ century.get_absolute_url }}">{{ century.name }}</a> + <a href="{% url 'century-detail' century.id %}">{{ century.name }}</a> </b> {% endfor %} | @@ -409,7 +462,7 @@ <h4>List of melodies</h4> {% if source.notation.all %} <b> - <a href="{{ source.notation.all.first.get_absolute_url }}">{{ source.notation.all.first.name }}</a> + <a href="{% url 'notation-detail' source.notation.all.first.id %}">{{ source.notation.all.first.name }}</a> </b> <br> {% endif %} @@ -419,7 +472,7 @@ <h4>List of melodies</h4> <ul> {% for editor in source.inventoried_by.all %} <li> - <a href={{ editor.get_absolute_url }}><b>{{ editor.full_name }}</b></a> + <a href="{% url 'user-detail' editor.id %}"><b>{{ editor.full_name }}</b></a> <br> {{ editor.institution|default_if_none:"" }} </li> @@ -433,7 +486,7 @@ <h4>List of melodies</h4> <ul> {% for editor in source.proofreaders.all %} <li> - <a href={{ editor.get_absolute_url }}><b>{{ editor.full_name }}</b></a> + <a href="{% url 'user-detail' editor.id %}"><b>{{ editor.full_name }}</b></a> <br> </li> {% endfor %} diff --git a/django/cantusdb_project/main_app/templates/chant_edit.html b/django/cantusdb_project/main_app/templates/chant_edit.html index 1e3247a18..9b0dafbaf 100644 --- a/django/cantusdb_project/main_app/templates/chant_edit.html +++ b/django/cantusdb_project/main_app/templates/chant_edit.html @@ -227,7 +227,7 @@ <h3>Full text &amp; Volpiano edit form</h3> <small><b>&plus; Add new source</b></small> </a> <br> - <a href="{{ source.get_absolute_url }}" style="display: inline-block; margin-top:5px;"> + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}" style="display: inline-block; margin-top:5px;"> {{ source.siglum }} </a> {% endif %} @@ -240,7 +240,7 @@ <h3>Full text &amp; Volpiano edit form</h3> <div class="card mb-3 w-100"> <div class="card-header"> - <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> + <h4><a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a></h4> </div> <div class="card-body"> <small> @@ -276,17 +276,40 @@ <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> {% comment %} render if the user has selected a specific folio {% endcomment %} {% if feasts_current_folio %} {% for feast, chants in feasts_current_folio %} - <small>Folio: <b>{{ chant.folio }}</b> - Feast: <b>{{ feast.name }}</b></small> + <small>Folio: <b>{{ chant.folio }}</b> - Feast: <b title="{{ chant.feast.description }}">{{ feast.name }}</b></small> <table class="table table-sm small table-bordered"> {% for chant in chants %} <tr> - <td class="h-25" style="width: 5%">{{ chant.c_sequence }}</td> - <td class="h-25" style="width: 20%">{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"><a href="{{ chant.get_absolute_url }}" target="_blank">{{ chant.incipit|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 20%"><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 5%">{{ chant.mode|default:"" }}</td> + <td class="h-25" style="width: 5%"> + {{ chant.c_sequence }} + </td> + <td class="h-25" style="width: 20%"> + <span title="{{ chant.office.description }}"> + {{ chant.office.name|default_if_none:"" }} + </span> + <b title="{{ chant.genre.description }}"> + {{ chant.genre.name|default_if_none:"" }} + </b> + {{ chant.position|default_if_none:"" }} + </td> + <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"> + <a href="{% url 'chant-detail' chant.id %}" target="_blank"> + {{ chant.incipit|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 20%"> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 5%"> + {{ chant.mode|default:"" }} + </td> <td class="h-25" style="width: 10%"> - <a href="{% url 'source-edit-chants' source.id %}?pk={{ chant.pk }}&folio={{ chant.folio }}">EDIT</button></td> + <a href="{% url 'source-edit-chants' source.id %}?pk={{ chant.pk }}&folio={{ chant.folio }}"> + EDIT + </a> + </td> </tr> {% endfor %} </table> @@ -294,17 +317,39 @@ <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> {% comment %} render if the user has selected a specific feast {% endcomment %} {% elif folios_current_feast %} {% for folio, chants in folios_current_feast %} - <small>Folio: <b>{{ folio }}</b> - Feast: <b>{{ chant.feast }}</b></small> + <small>Folio: <b>{{ folio }}</b> - Feast: <b title="{{ chant.feast.description }}">{{ chant.feast }}</b></small> <table class="table table-sm small table-bordered"> {% for chant in chants %} <tr> - <td class="h-25" style="width: 5%">{{ chant.c_sequence }}</td> - <td class="h-25" style="width: 20%">{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"><a href="{{ chant.get_absolute_url }}" target="_blank">{{ chant.incipit|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 20%"><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 5%">{{ chant.mode|default:"" }}</td> + <td class="h-25" style="width: 5%"> + {{ chant.c_sequence }} + </td> + <td class="h-25" style="width: 20%"> + <span title="{{ chant.office.description }}"> + {{ chant.office.name|default_if_none:"" }} + </span> + <b title="{{ chant.genre.description }}"> + {{ chant.genre.name|default_if_none:"" }} + </b> + {{ chant.position|default_if_none:"" }} + </td> + <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"> + <a href="{% url 'chant-detail' chant.id %}" target="_blank"> + {{ chant.incipit|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 20%"> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 5%"> + {{ chant.mode|default:"" }} + </td> <td class="h-25" style="width: 10%"> - <a href="{% url 'source-edit-chants' source.id %}?pk={{ chant.pk }}&feast={{ chant.feast.id }}">EDIT</button> + <a href="{% url 'source-edit-chants' source.id %}?pk={{ chant.pk }}&feast={{ chant.feast.id }}"> + EDIT + </a> </td> </tr> {% endfor %} diff --git a/django/cantusdb_project/main_app/templates/chant_list.html b/django/cantusdb_project/main_app/templates/chant_list.html index 0345ada3c..9eb75782a 100644 --- a/django/cantusdb_project/main_app/templates/chant_list.html +++ b/django/cantusdb_project/main_app/templates/chant_list.html @@ -72,11 +72,11 @@ <h3>Browse Chants</h3> <tbody> {% for chant in chants %} <tr> - <td class="text-wrap" style="text-align:center">{{ chant.siglum|default:"" }}</td> + <td class="text-wrap" style="text-align:center" title="{{ chant.source.title }}">{{ chant.source.siglum|default:"" }}</td> <td class="text-wrap" style="text-align:center"><b>{{ chant.folio|default:"" }}</b></td> <td class="text-wrap" style="text-align:center">{{ chant.c_sequence|default_if_none:"" }}</td> {# default_if_none: sometimes, c_sequence is 0, and should still be displayed #} <td class="text-wrap"> - <a href="{{ chant.get_absolute_url }}"><b>{{ chant.incipit|default:"" }}</b></a> + <a href="{% url 'chant-detail' chant.id %}"><b>{{ chant.incipit|default:"" }}</b></a> <p>{{ chant.manuscript_full_text_std_spelling|default:"" }}<br> {% if chant.volpiano %} <span style="font-family: volpiano; font-size:25px">{{ chant.volpiano|default:"" }}</span> @@ -84,13 +84,19 @@ <h3>Browse Chants</h3> </p> </td> <td class="text-wrap" style="text-align:center"> - <a href="{{ chant.feast.get_absolute_url }}">{{ chant.feast.name|default:"" }}</a> + {% if chant.feast %} + <a href="{% url 'feast-detail' chant.feast.id %}" title="{{ chant.feast.description }}">{{ chant.feast.name|default:"" }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> - <a href="{{ chant.office.get_absolute_url }}" title="{{ chant.office.description }}">{{ chant.office.name|default:"" }}</a> + {% if chant.office %} + <a href="{% url 'office-detail' chant.office.id %}" title="{{ chant.office.description }}">{{ chant.office.name|default:"" }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> - <a href="{{ chant.genre.get_absolute_url }}" title="{{ chant.genre.description }}">{{ chant.genre.name|default:"" }}</a> + {% if chant.genre %} + <a href="{% url 'genre-detail' chant.genre.id %}" title="{{ chant.genre.description }}">{{ chant.genre.name|default:"" }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center">{{ chant.position|default:"" }}</td> <td class="text-wrap" style="text-align:center"> @@ -117,7 +123,7 @@ <h3>Browse Chants</h3> <div class="card mb-3 w-100"> <div class="card-header"> - <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> + <h4><a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a></h4> </div> <div class="card-body"> diff --git a/django/cantusdb_project/main_app/templates/chant_proofread.html b/django/cantusdb_project/main_app/templates/chant_proofread.html index ee8c25df5..baf6a9a68 100644 --- a/django/cantusdb_project/main_app/templates/chant_proofread.html +++ b/django/cantusdb_project/main_app/templates/chant_proofread.html @@ -260,7 +260,7 @@ <h3>Proofreading tool</h3> <div class="card mb-3 w-100"> <div class="card-header"> - <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> + <h4><a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a></h4> </div> <div class="card-body"> <small> @@ -296,13 +296,34 @@ <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> <table class="table table-sm small table-bordered"> {% for chant in chants %} <tr> - <td class="h-25" style="width: 5%">{{ chant.c_sequence }}</td> - <td class="h-25" style="width: 20%">{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"><a href="{{ chant.get_absolute_url }}">{{ chant.incipit|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 20%"><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 5%">{{ chant.mode|default:"" }}</td> + <td class="h-25" style="width: 5%"> + {{ chant.c_sequence }} + </td> + <td class="h-25" style="width: 20%"> + <span title="{{ chant.office.description }}">{{ chant.office.name|default_if_none:"" }}</span> + <b title="{{ chant.genre.description }}"> + {{ chant.genre.name|default_if_none:"" }} + </b> + {{ chant.position|default_if_none:"" }} + </td> + <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"> + <a href="{% url 'chant-detail' chant.id %}"> + {{ chant.incipit|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 20%"> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 5%"> + {{ chant.mode|default:"" }} + </td> <td class="h-25" style="width: 10%"> - <a href="{% url 'chant-proofread' source.id %}?pk={{ chant.pk }}&folio={{ chant.folio }}">EDIT</button></td> + <a href="{% url 'chant-proofread' source.id %}?pk={{ chant.pk }}&folio={{ chant.folio }}"> + EDIT + </a> + </td> </tr> {% endfor %} </table> @@ -314,13 +335,33 @@ <h4><a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a></h4> <table class="table table-sm small table-bordered"> {% for chant in chants %} <tr> - <td class="h-25" style="width: 5%">{{ chant.c_sequence }}</td> - <td class="h-25" style="width: 20%">{{ chant.office.name|default_if_none:"" }} <b>{{ chant.genre.name|default_if_none:"" }}</b> {{ chant.position|default_if_none:"" }} </td> - <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"><a href="{{ chant.get_absolute_url }}">{{ chant.incipit|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 20%"><a href="{{ chant.get_ci_url }}" target="_blank">{{ chant.cantus_id|default_if_none:"" }}</a></td> - <td class="h-25" style="width: 5%">{{ chant.mode|default:"" }}</td> + <td class="h-25" style="width: 5%"> + {{ chant.c_sequence }} + </td> + <td class="h-25" style="width: 20%"> + <span title="{{ chant.office.description }}">{{ chant.office.name|default_if_none:"" }}</span> + <b title="{{ chant.genre.description }}"> + {{ chant.genre.name|default_if_none:"" }} + </b> + {{ chant.position|default_if_none:"" }} + </td> + <td class="h-25" style="width: 40%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; max-width: 0"> + <a href="{% url 'chant-detail' chant.id %}"> + {{ chant.incipit|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 20%"> + <a href="{{ chant.get_ci_url }}" target="_blank"> + {{ chant.cantus_id|default_if_none:"" }} + </a> + </td> + <td class="h-25" style="width: 5%"> + {{ chant.mode|default:"" }} + </td> <td class="h-25" style="width: 10%"> - <a href="{% url 'chant-proofread' source.id %}?pk={{ chant.pk }}&feast={{ chant.feast.id }}">EDIT</button> + <a href="{% url 'chant-proofread' source.id %}?pk={{ chant.pk }}&feast={{ chant.feast.id }}"> + EDIT + </a> </td> </tr> {% endfor %} diff --git a/django/cantusdb_project/main_app/templates/chant_search.html b/django/cantusdb_project/main_app/templates/chant_search.html index 980bd0472..04caba705 100644 --- a/django/cantusdb_project/main_app/templates/chant_search.html +++ b/django/cantusdb_project/main_app/templates/chant_search.html @@ -10,7 +10,7 @@ <h3>Search Chants</h3> {% if source %} <p> - <b>Searching in source: <a href="{{ source.get_absolute_url }}" target="_blank">{{ source.title }}</a></b> + <b>Searching in source: <a href="{% url 'source-detail' source.id %}" target="_blank">{{ source.title }}</a></b> </p> {% elif keyword %} <p> @@ -209,7 +209,9 @@ <h3>Search Chants</h3> {% for chant in chants %} <tr> <td class="text-wrap" style="text-align:left"> - <a href="{{ chant.source.get_absolute_url }}">{{ chant.source.siglum }}</a> + {% if chant.source %} + <a href="{% url 'source-detail' chant.source.id %}" title="{{ chant.source.title }}">{{ chant.source.siglum }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> {{ chant.folio|default:""|truncatechars_html:140 }} @@ -217,12 +219,12 @@ <h3>Search Chants</h3> <td class="text-wrap" style="text-align:left"> {% comment %} this is used for distinguishing chants from sequences, - if the object is chant, use chant.get_absolute_url, - otherwise, use sequence.get_absolute_url + if the object is chant, use chant-detail view, + otherwise, use sequence-detail view. the combined queryset turned all objects into chants so this is the only way to make the distinction {% endcomment %} {% if chant.search_vector %} - <b><a href="{{ chant.get_absolute_url }}">{{ chant.incipit|default:"" }}</a></b> + <b><a href="{% url 'chant-detail' chant.id %}">{{ chant.incipit|default:"" }}</a></b> {% else %} <b><a href="{% url 'sequence-detail' chant.id %}">{{ chant.incipit|default:"" }}</a></b> {% endif %} @@ -231,13 +233,19 @@ <h3>Search Chants</h3> </p> </td> <td class="text-wrap" style="text-align:center"> - <a href="{{ chant.feast.get_absolute_url }}">{{ chant.feast.name|default:"" }}</a> + {% if chant.feast %} + <a href="{% url 'feast-detail' chant.feast.id %}" title="{{ chant.feast.description }}">{{ chant.feast.name|default:"" }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> - <a href="{{ chant.office.get_absolute_url }}" title="{{ chant.office.description }}">{{ chant.office.name|default:"" }}</a> + {% if chant.office %} + <a href="{% url 'office-detail' chant.office.id %}" title="{{ chant.office.description }}">{{ chant.office.name|default:"" }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> - <a href="{{ chant.genre.get_absolute_url }}" title="{{ chant.genre.description }}">{{ chant.genre.name|default:"" }}</a> + {% if chant.genre %} + <a href="{% url 'genre-detail' chant.genre.id %}" title="{{ chant.genre.description }}">{{ chant.genre.name|default:"" }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> {{ chant.position|default:"" }} diff --git a/django/cantusdb_project/main_app/templates/chant_seq_by_cantus_id.html b/django/cantusdb_project/main_app/templates/chant_seq_by_cantus_id.html index d893edb19..9a407786d 100644 --- a/django/cantusdb_project/main_app/templates/chant_seq_by_cantus_id.html +++ b/django/cantusdb_project/main_app/templates/chant_seq_by_cantus_id.html @@ -31,26 +31,58 @@ <h3> {% for chant in chants %} <tr> <td class="text-wrap"> - <a href="{{ chant.source.get_absolute_url }}">{{ chant.source.siglum }}</a> + {% if chant.source %} + <a href="{% url 'source-detail' chant.source.id %}" title="{{ chant.source.title }}">{{ chant.source.siglum }}</a> + {% endif %} </td> <td class="text-wrap">{{ chant.folio }}</td> {% comment %} this is used for distinguishing chants from sequences, - if the object is chant, use chant.get_absolute_url, - otherwise, use sequence.get_absolute_url + if the object is chant, use chant-detail view, + otherwise, use sequence-detail view. the combined queryset turned all objects into chants so this is the only way to make the distinction {% endcomment %} {% if chant.search_vector %} - <td class="text-wrap"><a href="{{ chant.get_absolute_url }}">{{ chant.incipit }}</a></td> + <td class="text-wrap"><a href="{% url 'chant-detail' chant.id %}">{{ chant.incipit }}</a></td> {% else %} <td class="text-wrap"><a href="{% url 'sequence-detail' chant.id %}">{{ chant.incipit }}</a></td> {% endif %} - <td class="text-wrap" title="{{ chant.office.description }}">{{ chant.office.name|default:"" }} </td> - <td class="text-wrap" title="{{ chant.genre.description }}">{{ chant.genre.name|default:"" }} </td> - <td class="text-wrap">{{ chant.position|default:"" }}</td> - <td class="text-wrap"><a href="{{ chant.feast.get_absolute_url }}">{{ chant.feast.name|default:"" }}</a></td> - <td class="text-wrap">{{ chant.mode|default:"" }}</td> - <td class="text-wrap" style="font-family: volpiano; font-size:30px">{% if chant.volpiano %}<a href="{{ chant.get_absolute_url }}" style="text-decoration: none" title="Chant has volpiano melody">1</a>{% endif %}</td> - <td class="text-wrap">{% if chant.image_link %}<a href="{{ chant.image_link }}" target="_blank">Image</a>{% endif %}</td> + <td class="text-wrap"> + {% if chant.office %} + <a href="{% url 'office-detail' chant.office.id %}" title="{{ chant.office.description }}"> + {{ chant.office.name }} + </a> + {% endif %} + </td> + <td class="text-wrap"> + {% if chant.genre %} + <a href="{% url 'genre-detail' chant.genre.id %}" title="{{ chant.genre.description }}"> + {{ chant.genre.name }} + </a> + {% endif %} + </td> + <td class="text-wrap"> + {{ chant.position|default:"" }} + </td> + <td class="text-wrap"> + {% if chant.feast %} + <a href="{% url 'feast-detail' chant.feast.id %}" title="{{ chant.feast.description }}"> + {{ chant.feast.name }} + </a> + {% endif %} + </td> + <td class="text-wrap"> + {{ chant.mode|default:"" }} + </td> + <td class="text-wrap" style="font-family: volpiano; font-size:30px"> + {% if chant.volpiano %} + <a href="{% url 'chant-detail' chant.id %}" style="text-decoration: none" title="Chant has volpiano melody">1</a> + {% endif %} + </td> + <td class="text-wrap"> + {% if chant.image_link %} + <a href="{{ chant.image_link }}" target="_blank">Image</a> + {% endif %} + </td> </tr> {% endfor %} </tbody> diff --git a/django/cantusdb_project/main_app/templates/feast_detail.html b/django/cantusdb_project/main_app/templates/feast_detail.html index c3609112e..af363b7f5 100644 --- a/django/cantusdb_project/main_app/templates/feast_detail.html +++ b/django/cantusdb_project/main_app/templates/feast_detail.html @@ -57,7 +57,11 @@ <h4>The most frequent chants</h4> {% comment %} use `urlencode` filter because 1 chant and 2 sequences have forward slash in their cantus_id (data error) {% endcomment %} <td><a href="{% url 'chant-by-cantus-id' cantus_id|urlencode:"" %}">{{ cantus_id|default:"" }}</a></td> <td>{{ incipit }}</td> - <td><a href="{{ genre.get_absolute_url }}" title="{{ genre.description }}">{{ genre.name }}</a></td> + <td> + {% if genre %} + <a href="{% url 'genre-detail' genre.id %}" title="{{ genre.description }}">{{ genre.name }}</a> + {% endif %} + </td> <td>{{ count }}</td> </tr> {% endfor %} @@ -78,7 +82,7 @@ <h4>Sources containing the feast</h4> <tbody> {% for source, count in sources_zip %} <tr> - <td><a href="{% url 'chant-list' %}?source={{ source.id }}">{{ source.siglum }}</a></td> + <td><a href="{% url 'chant-list' %}?source={{ source.id }}" title="{{ source.title }}">{{ source.siglum }}</a></td> <td><a href="{% url 'chant-list' %}?source={{ source.id }}&feast={{ feast.id }}">{{ count }}</a></td> </tr> {% endfor %} diff --git a/django/cantusdb_project/main_app/templates/feast_list.html b/django/cantusdb_project/main_app/templates/feast_list.html index 46529b85f..2dd3f3ec0 100644 --- a/django/cantusdb_project/main_app/templates/feast_list.html +++ b/django/cantusdb_project/main_app/templates/feast_list.html @@ -69,7 +69,9 @@ <h3>List of Feasts</h3> {% for feast in feasts %} <tr> <td class="text-wrap"> - <a href="{{ feast.get_absolute_url }}"><b>{{ feast.name }}</b></a> + <a href="{% url 'feast-detail' feast.id %}" title="{{ feast.description }}"> + <b>{{ feast.name }}</b> + </a> </td> <td class="text-wrap">{{ feast.description|default:"" }}</td> <td class="text-wrap"> diff --git a/django/cantusdb_project/main_app/templates/full_index.html b/django/cantusdb_project/main_app/templates/full_index.html index d59519585..4a43434e8 100644 --- a/django/cantusdb_project/main_app/templates/full_index.html +++ b/django/cantusdb_project/main_app/templates/full_index.html @@ -22,7 +22,7 @@ <body> <title>Inventory | Cantus Manuscript Database</title> <h3>CANTUS Manuscript Inventory: - <a href="{{ source.get_absolute_url }}" target="_href">{{ source.title }}</a> + <a href="{% url 'source-detail' source.id %}" target="_href">{{ source.title }}</a> </h3> This source inventory contains {{ chants.count }} chants. <table class="table table-sm small table-bordered table-striped table-hover"> @@ -50,7 +50,7 @@ <h3>CANTUS Manuscript Inventory: `default`: use the given default if the value evaluates to False `default_if_none`: use the given default only if the value is None {% endcomment %} - <td>{{ chant.source.siglum|default_if_none:"" }}</td> + <td title="{{ chant.source.title }}">{{ chant.source.siglum|default_if_none:"" }}</td> <td>{{ chant.marginalia|default_if_none:"" }}</td> <td>{{ chant.folio|default_if_none:"" }}</td> <td> @@ -60,11 +60,23 @@ <h3>CANTUS Manuscript Inventory: {{ chant.s_sequence|default_if_none:"" }} {% endif %} </td> - <td>{{ chant.feast.name|default_if_none:"" }}</td> - <td><span title="{{ chant.office.description }}">{{ chant.office.name|default_if_none:"" }}</span></td> - <td><span title="{{ chant.genre.description }}">{{ chant.genre.name|default_if_none:"" }}</span></td> + <td> + <span title="{{ chant.feast.description }}"> + {{ chant.feast.name|default_if_none:"" }} + </span> + </td> + <td> + <span title="{{ chant.office.description }}"> + {{ chant.office.name|default_if_none:"" }} + </span> + </td> + <td> + <span title="{{ chant.genre.description }}"> + {{ chant.genre.name|default_if_none:"" }} + </span> + </td> <td>{{ chant.position|default_if_none:"" }}</td> - <td><a href="{{ chant.get_absolute_url }}" target="_blank">{{ chant.incipit|default_if_none:"" }}</a></td> + <td><a href="{% url 'chant-detail' chant.id %}" target="_blank">{{ chant.incipit|default_if_none:"" }}</a></td> <td>{{ chant.cantus_id|default_if_none:"" }}</td> <td>{{ chant.mode|default_if_none:"" }}</td> <td>{{ chant.differentia|default_if_none:"" }}</td> diff --git a/django/cantusdb_project/main_app/templates/genre_list.html b/django/cantusdb_project/main_app/templates/genre_list.html index ea9041425..5ed47b2bd 100644 --- a/django/cantusdb_project/main_app/templates/genre_list.html +++ b/django/cantusdb_project/main_app/templates/genre_list.html @@ -43,7 +43,7 @@ <h3>List of Genres</h3> {% for genre in genres %} <tr> <td style="text-align:center"> - <a href="{{ genre.get_absolute_url }}"><b>{{ genre.name }}</b></a> + <a href="{% url 'genre-detail' genre.id %}"><b>{{ genre.name }}</b></a> </td> <td style="text-align:center"> {{ genre.description|default:"" }} diff --git a/django/cantusdb_project/main_app/templates/indexer_list.html b/django/cantusdb_project/main_app/templates/indexer_list.html index 7d1eda1f3..6545f2116 100644 --- a/django/cantusdb_project/main_app/templates/indexer_list.html +++ b/django/cantusdb_project/main_app/templates/indexer_list.html @@ -31,13 +31,13 @@ <h3>List of Indexers</h3> {% for indexer in indexers %} <tr> <td class="text-center text-wrap"> - <a href="{{ indexer.get_absolute_url }}">{{ indexer.full_name }}</a> + <a href="{% url 'user-detail' indexer.id %}">{{ indexer.full_name }}</a> </td> <td class="text-center text-wrap">{{ indexer.institution|default:"" }}</td> <td class="text-center text-wrap">{{ indexer.city|default:"" }}</td> <td class="text-center text-wrap">{{ indexer.country|default:"" }}</td> <td class="text-center text-wrap"> - <a href="{{ indexer.get_absolute_url }}#sources">{{ indexer.source_count }} source{{ indexer.source_count|pluralize }}</a> + <a href="{% url 'user-detail' indexer.id %}#sources">{{ indexer.source_count }} source{{ indexer.source_count|pluralize }}</a> </td> </tr> {% endfor %} diff --git a/django/cantusdb_project/main_app/templates/melody_search.html b/django/cantusdb_project/main_app/templates/melody_search.html index d71d127c8..74fed1cc6 100644 --- a/django/cantusdb_project/main_app/templates/melody_search.html +++ b/django/cantusdb_project/main_app/templates/melody_search.html @@ -82,7 +82,7 @@ <h3>Search by melody</h3> </form> {% if source %} - <b>Searching in source: <a href="{{ source.get_absolute_url }}" target="_blank">{{ source.title }}</a></b> + <b>Searching in source: <a href="{% url 'source-detail' source.id %}" target="_blank">{{ source.title }}</a></b> {% endif %} <div id="resultsDiv"></div> diff --git a/django/cantusdb_project/main_app/templates/notation_detail.html b/django/cantusdb_project/main_app/templates/notation_detail.html index b07210d79..173ad9176 100644 --- a/django/cantusdb_project/main_app/templates/notation_detail.html +++ b/django/cantusdb_project/main_app/templates/notation_detail.html @@ -9,7 +9,7 @@ <h3>{{ notation.name }}</h3> <ul> {% for source in notation.sources.all|dictsort:"title" %} <li> - <a href="{{ source.get_absolute_url }}"> + <a href="{% url 'source-detail' source.id %}"> {{ source.title }} </a> </li> diff --git a/django/cantusdb_project/main_app/templates/office_list.html b/django/cantusdb_project/main_app/templates/office_list.html index 499ccaf86..67f6a3cc1 100644 --- a/django/cantusdb_project/main_app/templates/office_list.html +++ b/django/cantusdb_project/main_app/templates/office_list.html @@ -18,7 +18,7 @@ <h3>Office/Mass abbreviations</h3> {% for office in offices %} <tr> <td class="text-wrap"> - <a href="{{ office.get_absolute_url }}"><b>{{ office.name }}</b></a> + <a href="{% url 'office-detail' office.id %}"><b>{{ office.name }}</b></a> </td> <td class="text-wrap">{{ office.description }}</td> </tr> diff --git a/django/cantusdb_project/main_app/templates/provenance_detail.html b/django/cantusdb_project/main_app/templates/provenance_detail.html index 5df454afd..a539135b0 100644 --- a/django/cantusdb_project/main_app/templates/provenance_detail.html +++ b/django/cantusdb_project/main_app/templates/provenance_detail.html @@ -9,7 +9,7 @@ <h3>{{ provenance.name }}</h3> <ul> {% for source in provenance.sources.all|dictsort:"title" %} <li> - <a href="{{ source.get_absolute_url }}"> + <a href="{% url 'source-detail' source.id %}"> {{ source.title }} </a> </li> diff --git a/django/cantusdb_project/main_app/templates/sequence_detail.html b/django/cantusdb_project/main_app/templates/sequence_detail.html index 838732dbc..5f5c0f25a 100644 --- a/django/cantusdb_project/main_app/templates/sequence_detail.html +++ b/django/cantusdb_project/main_app/templates/sequence_detail.html @@ -26,14 +26,16 @@ <h3>{{ sequence.title }}</h3> <dd> {{ sequence.siglum }} </dd> - <dt> - Source - </dt> - <dd> - <a href="{{ sequence.source.get_absolute_url }}"> - {{ sequence.source.title }} - </a> - </dd> + {% if sequence.source %} + <dt> + Source + </dt> + <dd> + <a href="{% url 'source-detail' sequence.source.id %}"> + {{ sequence.source.title }} + </a> + </dd> + {% endif %} <div class="row"> <div class="col"> {% if sequence.folio %} @@ -80,7 +82,7 @@ <h3>{{ sequence.title }}</h3> Genre </dt> <dd> - <a href="{{ sequence.genre.get_absolute_url }}" title="{{ sequence.genre.description }}">{{ sequence.genre.name }}</a> + <a href="{% url 'genre-detail' sequence.genre.id %}" title="{{ sequence.genre.description }}">{{ sequence.genre.name }}</a> </dd> </div> {% endif %} @@ -185,12 +187,14 @@ <h4>Concordances</h4> {% for sequence in concordances %} <tr> <td class="text-wrap" style="text-align:center"> - <a href="{{ sequence.source.get_absolute_url }}"><b>{{ sequence.siglum }}</b></a> - <br> - <b>{{ sequence.folio }}</b> {{ sequence.s_sequence }} + {% if sequence.source %} + <a href="{% url 'source-detail' sequence.source.id %}" title="{{ sequence.source.title }}"><b>{{ sequence.siglum }}</b></a> + <br> + <b>{{ sequence.folio }}</b> {{ sequence.s_sequence }} + {% endif %} </td> <td class="text-wrap" style="text-align:center"> - <a href={{ sequence.get_absolute_url}} >{{ sequence.incipit|default:"" }}</a> + <a href="{% url 'sequence-detail' sequence.id %}">{{ sequence.incipit|default:"" }}</a> </td> <td class="text-wrap" style="text-align:center"> {{ sequence.rubrics|default:"" }} diff --git a/django/cantusdb_project/main_app/templates/sequence_list.html b/django/cantusdb_project/main_app/templates/sequence_list.html index 2980aae86..0427cdc1d 100644 --- a/django/cantusdb_project/main_app/templates/sequence_list.html +++ b/django/cantusdb_project/main_app/templates/sequence_list.html @@ -45,13 +45,15 @@ <h3>Clavis Sequentiarum (Calvin Bower)</h3> {% for sequence in sequences %} <tr style="text-align:center"> <td class="text-wrap"> - <a href={{ sequence.source.get_absolute_url }}> - <b>{{ sequence.source.siglum|default:"" }}</b><br> - </a> + {% if sequence.source %} + <a href="{% url 'source-detail' sequence.source.id %}" title="{{ sequence.source.title }}"> + <b>{{ sequence.source.siglum }}</b><br> + </a> + {% endif %} <b>{{ sequence.folio|default:"" }}</b> {{ sequence.s_sequence|default:"" }} </td> <td class="text-wrap"> - <a href="{{ sequence.get_absolute_url }}"> + <a href="{% url 'sequence-detail' sequence.id %}"> {{ sequence.incipit|default:"" }} </a> </td> diff --git a/django/cantusdb_project/main_app/templates/source_detail.html b/django/cantusdb_project/main_app/templates/source_detail.html index 6fd28d1a3..f657c0001 100644 --- a/django/cantusdb_project/main_app/templates/source_detail.html +++ b/django/cantusdb_project/main_app/templates/source_detail.html @@ -49,7 +49,7 @@ <h3>{{ source.title }}</h3> <dt>Other Editors</dt> <dd> {% for editor in source.other_editors.all %} - <a href={{ editor.get_absolute_url }}>{{ editor.full_name }}</a><br> + <a href="{% url 'user-detail' editor.id %}">{{ editor.full_name }}</a><br> {% endfor %} </dd> {% endif %} @@ -58,7 +58,7 @@ <h3>{{ source.title }}</h3> <dt>Full Text Entered by</dt> <dd> {% for editor in source.full_text_entered_by.all %} - <a href={{ editor.get_absolute_url }}>{{ editor.full_name }}</a><br> + <a href="{% url 'user-detail' editor.id %}">{{ editor.full_name }}</a><br> {% endfor %} </dd> {% endif %} @@ -67,7 +67,7 @@ <h3>{{ source.title }}</h3> <dt>Melodies Entered by</dt> <dd> {% for editor in source.melodies_entered_by.all %} - <a href={{ editor.get_absolute_url }}>{{ editor.full_name }}</a><br> + <a href="{% url 'user-detail' editor.id %}">{{ editor.full_name }}</a><br> {% endfor %} </dd> {% endif %} @@ -110,12 +110,12 @@ <h4>Sequences in this source</h4> {% for sequence in sequences %} <tr> <td class="text-wrap" style="text-align:center"> - <a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a> + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a> <br> <b>{{ sequence.folio }}</b> {{ sequence.s_sequence }} </td> <td class="text-wrap" style="text-align:center"> - <a href={{ sequence.get_absolute_url }} >{{ sequence.incipit|default:"" }}</a> + <a href="{% url 'sequence-detail' sequence.id %}" >{{ sequence.incipit|default:"" }}</a> </td> <td class="text-wrap" style="text-align:center"> {{ sequence.rubrics|default:"" }} @@ -185,13 +185,13 @@ <h4>{{ source.siglum }}</h4> <div class=" card-body"> <small> {% if source.provenance.name %} - Provenance: <b><a href="{{ source.provenance.get_absolute_url }}">{{source.provenance.name}}</a></b> + Provenance: <b><a href="{% url 'provenance-detail' source.provenance.id %}">{{source.provenance.name}}</a></b> <br> {% endif %} {% if source.date %} Date: {% for century in source.century.all %} - <b><a href="{{ century.get_absolute_url }}"> + <b><a href="{% url 'century-detail' century.id %}"> {{ century.name }} </a></b> {% endfor %} @@ -204,7 +204,7 @@ <h4>{{ source.siglum }}</h4> <br> {% endif %} {% if source.notation.all %} - Notation: <b><a href="{{ source.notation.all.first.get_absolute_url }}"> + Notation: <b><a href="{% url 'notation-detail' source.notation.all.first.id %}"> {{ source.notation.all.first.name }} </a></b> <br> @@ -214,7 +214,7 @@ <h4>{{ source.siglum }}</h4> <ul> {% for editor in source.inventoried_by.all %} <li> - <a href={{ editor.get_absolute_url }}><b>{{ editor.full_name }}</b></a><br> + <a href={% url 'user-detail' editor.id %}><b>{{ editor.full_name }}</b></a><br> {{ editor.institution|default_if_none:"" }} </li> {% endfor %} @@ -226,7 +226,7 @@ <h4>{{ source.siglum }}</h4> <ul> {% for editor in source.proofreaders.all %} <li> - <a href={{ editor.get_absolute_url }}><b>{{ editor.full_name }}</b></a><br> + <a href={% url 'user-detail' editor.id %}><b>{{ editor.full_name }}</b></a><br> </li> {% endfor %} </ul> diff --git a/django/cantusdb_project/main_app/templates/source_list.html b/django/cantusdb_project/main_app/templates/source_list.html index 409fa96cd..a3ed3a580 100644 --- a/django/cantusdb_project/main_app/templates/source_list.html +++ b/django/cantusdb_project/main_app/templates/source_list.html @@ -81,14 +81,18 @@ <h3>Browse Sources</h3> {% for source in sources %} <tr> <td class="text-wrap" style="text-align:center"> - <a href="{{ source.get_absolute_url }}"><b>{{ source.siglum }}</b></a> + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}"><b>{{ source.siglum }}</b></a> </td> <td class="text-wrap" style="text-align:center"> {{ source.summary|default:""|truncatechars_html:140 }} </td> <td class="text-wrap" style="text-align:center"> - <b><a href="{{ source.century.first.get_absolute_url }}">{{ source.century.first.name }}</a></b><br> - <a href="{{ source.provenance.get_absolute_url }}">{{ source.provenance.name|default:"" }}</a> + {% if source.century.all %} + <b><a href="{% url 'century-detail' source.century.first.id %}">{{ source.century.first.name }}</a></b><br> + {% endif %} + {% if source.provenance %} + <a href="{% url 'provenance-detail' source.provenance.id %}">{{ source.provenance.name }}</a> + {% endif %} </td> <td class="text-wrap" style="text-align:center"> {% if source.image_link %} diff --git a/django/cantusdb_project/main_app/templates/user_detail.html b/django/cantusdb_project/main_app/templates/user_detail.html index 765f19533..eb326d170 100644 --- a/django/cantusdb_project/main_app/templates/user_detail.html +++ b/django/cantusdb_project/main_app/templates/user_detail.html @@ -34,7 +34,7 @@ <h5>Inventoried</h5> <ul> {% for source in inventoried_sources %} <li> - <a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a> ({{ source.title }}) + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a> ({{ source.title }}) </li> {% endfor %} </ul> @@ -44,7 +44,7 @@ <h5>Entered Full Text</h5> <ul> {% for source in full_text_sources %} <li> - <a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a> ({{ source.title }}) + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a> ({{ source.title }}) </li> {% endfor %} </ul> @@ -54,7 +54,7 @@ <h5>Entered Melodies</h5> <ul> {% for source in melody_sources %} <li> - <a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a> ({{ source.title }}) + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a> ({{ source.title }}) </li> {% endfor %} </ul> @@ -64,7 +64,7 @@ <h5>Proofread</h5> <ul> {% for source in proofread_sources %} <li> - <a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a> ({{ source.title }}) + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a> ({{ source.title }}) </li> {% endfor %} </ul> @@ -74,7 +74,7 @@ <h5>Edited</h5> <ul> {% for source in edited_sources %} <li> - <a href="{{ source.get_absolute_url }}">{{ source.siglum }}</a> ({{ source.title }}) + <a href="{% url 'source-detail' source.id %}" title="{{ source.title }}">{{ source.siglum }}</a> ({{ source.title }}) </li> {% endfor %} </ul> diff --git a/django/cantusdb_project/main_app/templates/user_list.html b/django/cantusdb_project/main_app/templates/user_list.html index 36023958b..408556e36 100644 --- a/django/cantusdb_project/main_app/templates/user_list.html +++ b/django/cantusdb_project/main_app/templates/user_list.html @@ -30,7 +30,7 @@ <h3>All Users</h3> {% for user in users %} <tr> <td class="text-center text-wrap"> - <a href="{{ user.get_absolute_url }}">{{ user.full_name|default:"" }}</a> + <a href="{% url 'user-detail' user.id %}">{{ user.full_name|default:"" }}</a> </td> <td>{{ user.institution|default:"" }}</td> <td>{{ user.city|default:"" }}</td> diff --git a/django/cantusdb_project/main_app/tests/make_fakes.py b/django/cantusdb_project/main_app/tests/make_fakes.py index d35e37eb0..189f398f6 100644 --- a/django/cantusdb_project/main_app/tests/make_fakes.py +++ b/django/cantusdb_project/main_app/tests/make_fakes.py @@ -266,7 +266,7 @@ def make_fake_source(published=None, title=None, segment=None, segment_name=None # tuples and we only need the first element of each tuple if title is None: - title = make_fake_text(LONG_CHAR_FIELD_MAX) + title = faker.sentence() if siglum is None: siglum = make_fake_text(SHORT_CHAR_FIELD_MAX) if description is None: diff --git a/django/cantusdb_project/main_app/tests/test_views.py b/django/cantusdb_project/main_app/tests/test_views.py index 4ee7583ad..e1b3732ce 100644 --- a/django/cantusdb_project/main_app/tests/test_views.py +++ b/django/cantusdb_project/main_app/tests/test_views.py @@ -752,6 +752,7 @@ def test_keyword_search_contains(self): def test_source_link_column(self): siglum = "Sigl-01" source = make_fake_source(published=True, siglum=siglum) + source_title = source.title url = source.get_absolute_url() fulltext = "manuscript full text" search_term = "full" @@ -764,8 +765,9 @@ def test_source_link_column(self): ) html = str(response.content) self.assertIn(siglum, html) + self.assertIn(source_title, html) self.assertIn(url, html) - self.assertIn(f'<a href="{url}">{siglum}</a>', html) + self.assertIn(f'<a href="{url}" title="{source_title}">{siglum}</a>', html) def test_folio_column(self): source = make_fake_source(published=True) @@ -786,6 +788,7 @@ def test_feast_column(self): source = make_fake_source(published=True) feast = make_fake_feast() feast_name = feast.name + feast_description = feast.description url = feast.get_absolute_url() fulltext = "manuscript full text" search_term = "full" @@ -799,8 +802,9 @@ def test_feast_column(self): ) html = str(response.content) self.assertIn(feast_name, html) + self.assertIn(feast_description, html) self.assertIn(url, html) - self.assertIn(f'<a href="{url}">{feast_name}</a>', html) + self.assertIn(f'<a href="{url}" title="{feast_description}">{feast_name}</a>', html) def test_office_column(self): source = make_fake_source(published=True) diff --git a/django/cantusdb_project/requirements.txt b/django/cantusdb_project/requirements.txt index 981588198..5b4d3ba28 100644 --- a/django/cantusdb_project/requirements.txt +++ b/django/cantusdb_project/requirements.txt @@ -13,6 +13,7 @@ Django==4.1.7 django-autocomplete-light==3.5.1 django-extra-views==0.13.0 django-quill-editor==0.1.40 +django_debug_toolbar==3.8.1 Faker==4.1.0 gunicorn==20.0.4 idna==2.10
matrix-org__synapse-6682
http requests for uris with non-ascii characters cause CRITICAL errors ``` 2019-11-22 13:15:59,276 - twisted - 172 - CRITICAL - - Capture point (most recent call last): File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/opt/synapse/synapse/synapse/app/homeserver.py", line 659, in <module> main() File "/opt/synapse/synapse/synapse/app/homeserver.py", line 655, in main run(hs) File "/opt/synapse/synapse/synapse/app/homeserver.py", line 646, in run logger=logger, File "/opt/synapse/synapse/synapse/app/_base.py", line 139, in start_reactor run() File "/opt/synapse/synapse/synapse/app/_base.py", line 114, in run run_command() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 1267, in run self.mainLoop() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/base.py", line 1279, in mainLoop self.doIteration(t) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/epollreactor.py", line 235, in doPoll log.callWithLogger(selectable, _drdw, selectable, fd, event) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/log.py", line 103, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/log.py", line 86, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite why = selectable.doRead() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/tcp.py", line 243, in doRead return self._dataReceived(data) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/internet/tcp.py", line 249, in _dataReceived rval = self.protocol.dataReceived(data) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 2912, in dataReceived return self._channel.dataReceived(data) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 2211, in dataReceived return basic.LineReceiver.dataReceived(self, data) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/protocols/basic.py", line 579, in dataReceived why = self.rawDataReceived(data) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 2218, in rawDataReceived self._transferDecoder.dataReceived(data) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 1699, in dataReceived finishCallback(data[contentLength:]) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 2115, in _finishRequestBody self.allContentReceived() File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 2190, in allContentReceived req.requestReceived(command, path, version) File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/http.py", line 917, in requestReceived self.process() Traceback (most recent call last): File "/opt/synapse/env3/lib/python3.5/site-packages/twisted/web/server.py", line 199, in process self.render(resrc) File "/opt/synapse/synapse/synapse/http/site.py", line 130, in render self._started_processing(servlet_name) File "/opt/synapse/synapse/synapse/http/site.py", line 233, in _started_processing self.get_redacted_uri(), File "/opt/synapse/synapse/synapse/http/site.py", line 91, in get_redacted_uri uri = self.uri.decode("ascii") UnicodeDecodeError: 'ascii' codec can't decode byte 0xc8 in position 25: ordinal not in range(128) ```
[ { "content": "# Copyright 2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport logging\nimport time\n\nfrom twisted.web.server import Request, Site\n\nfrom synapse.http import redact_uri\nfrom synapse.http.request_metrics import RequestMetrics, requests_counter\nfrom synapse.logging.context import LoggingContext, PreserveLoggingContext\n\nlogger = logging.getLogger(__name__)\n\n_next_request_seq = 0\n\n\nclass SynapseRequest(Request):\n \"\"\"Class which encapsulates an HTTP request to synapse.\n\n All of the requests processed in synapse are of this type.\n\n It extends twisted's twisted.web.server.Request, and adds:\n * Unique request ID\n * A log context associated with the request\n * Redaction of access_token query-params in __repr__\n * Logging at start and end\n * Metrics to record CPU, wallclock and DB time by endpoint.\n\n It also provides a method `processing`, which returns a context manager. If this\n method is called, the request won't be logged until the context manager is closed;\n this is useful for asynchronous request handlers which may go on processing the\n request even after the client has disconnected.\n\n Attributes:\n logcontext(LoggingContext) : the log context for this request\n \"\"\"\n\n def __init__(self, channel, *args, **kw):\n Request.__init__(self, channel, *args, **kw)\n self.site = channel.site\n self._channel = channel # this is used by the tests\n self.authenticated_entity = None\n self.start_time = 0\n\n # we can't yet create the logcontext, as we don't know the method.\n self.logcontext = None\n\n global _next_request_seq\n self.request_seq = _next_request_seq\n _next_request_seq += 1\n\n # whether an asynchronous request handler has called processing()\n self._is_processing = False\n\n # the time when the asynchronous request handler completed its processing\n self._processing_finished_time = None\n\n # what time we finished sending the response to the client (or the connection\n # dropped)\n self.finish_time = None\n\n def __repr__(self):\n # We overwrite this so that we don't log ``access_token``\n return \"<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>\" % (\n self.__class__.__name__,\n id(self),\n self.get_method(),\n self.get_redacted_uri(),\n self.clientproto.decode(\"ascii\", errors=\"replace\"),\n self.site.site_tag,\n )\n\n def get_request_id(self):\n return \"%s-%i\" % (self.get_method(), self.request_seq)\n\n def get_redacted_uri(self):\n uri = self.uri\n if isinstance(uri, bytes):\n uri = self.uri.decode(\"ascii\")\n return redact_uri(uri)\n\n def get_method(self):\n \"\"\"Gets the method associated with the request (or placeholder if not\n method has yet been received).\n\n Note: This is necessary as the placeholder value in twisted is str\n rather than bytes, so we need to sanitise `self.method`.\n\n Returns:\n str\n \"\"\"\n method = self.method\n if isinstance(method, bytes):\n method = self.method.decode(\"ascii\")\n return method\n\n def get_user_agent(self):\n return self.requestHeaders.getRawHeaders(b\"User-Agent\", [None])[-1]\n\n def render(self, resrc):\n # this is called once a Resource has been found to serve the request; in our\n # case the Resource in question will normally be a JsonResource.\n\n # create a LogContext for this request\n request_id = self.get_request_id()\n logcontext = self.logcontext = LoggingContext(request_id)\n logcontext.request = request_id\n\n # override the Server header which is set by twisted\n self.setHeader(\"Server\", self.site.server_version_string)\n\n with PreserveLoggingContext(self.logcontext):\n # we start the request metrics timer here with an initial stab\n # at the servlet name. For most requests that name will be\n # JsonResource (or a subclass), and JsonResource._async_render\n # will update it once it picks a servlet.\n servlet_name = resrc.__class__.__name__\n self._started_processing(servlet_name)\n\n Request.render(self, resrc)\n\n # record the arrival of the request *after*\n # dispatching to the handler, so that the handler\n # can update the servlet name in the request\n # metrics\n requests_counter.labels(self.get_method(), self.request_metrics.name).inc()\n\n @contextlib.contextmanager\n def processing(self):\n \"\"\"Record the fact that we are processing this request.\n\n Returns a context manager; the correct way to use this is:\n\n @defer.inlineCallbacks\n def handle_request(request):\n with request.processing(\"FooServlet\"):\n yield really_handle_the_request()\n\n Once the context manager is closed, the completion of the request will be logged,\n and the various metrics will be updated.\n \"\"\"\n if self._is_processing:\n raise RuntimeError(\"Request is already processing\")\n self._is_processing = True\n\n try:\n yield\n except Exception:\n # this should already have been caught, and sent back to the client as a 500.\n logger.exception(\"Asynchronous messge handler raised an uncaught exception\")\n finally:\n # the request handler has finished its work and either sent the whole response\n # back, or handed over responsibility to a Producer.\n\n self._processing_finished_time = time.time()\n self._is_processing = False\n\n # if we've already sent the response, log it now; otherwise, we wait for the\n # response to be sent.\n if self.finish_time is not None:\n self._finished_processing()\n\n def finish(self):\n \"\"\"Called when all response data has been written to this Request.\n\n Overrides twisted.web.server.Request.finish to record the finish time and do\n logging.\n \"\"\"\n self.finish_time = time.time()\n Request.finish(self)\n if not self._is_processing:\n with PreserveLoggingContext(self.logcontext):\n self._finished_processing()\n\n def connectionLost(self, reason):\n \"\"\"Called when the client connection is closed before the response is written.\n\n Overrides twisted.web.server.Request.connectionLost to record the finish time and\n do logging.\n \"\"\"\n self.finish_time = time.time()\n Request.connectionLost(self, reason)\n\n # we only get here if the connection to the client drops before we send\n # the response.\n #\n # It's useful to log it here so that we can get an idea of when\n # the client disconnects.\n with PreserveLoggingContext(self.logcontext):\n logger.warning(\n \"Error processing request %r: %s %s\", self, reason.type, reason.value\n )\n\n if not self._is_processing:\n self._finished_processing()\n\n def _started_processing(self, servlet_name):\n \"\"\"Record the fact that we are processing this request.\n\n This will log the request's arrival. Once the request completes,\n be sure to call finished_processing.\n\n Args:\n servlet_name (str): the name of the servlet which will be\n processing this request. This is used in the metrics.\n\n It is possible to update this afterwards by updating\n self.request_metrics.name.\n \"\"\"\n self.start_time = time.time()\n self.request_metrics = RequestMetrics()\n self.request_metrics.start(\n self.start_time, name=servlet_name, method=self.get_method()\n )\n\n self.site.access_logger.info(\n \"%s - %s - Received request: %s %s\",\n self.getClientIP(),\n self.site.site_tag,\n self.get_method(),\n self.get_redacted_uri(),\n )\n\n def _finished_processing(self):\n \"\"\"Log the completion of this request and update the metrics\n \"\"\"\n\n if self.logcontext is None:\n # this can happen if the connection closed before we read the\n # headers (so render was never called). In that case we'll already\n # have logged a warning, so just bail out.\n return\n\n usage = self.logcontext.get_resource_usage()\n\n if self._processing_finished_time is None:\n # we completed the request without anything calling processing()\n self._processing_finished_time = time.time()\n\n # the time between receiving the request and the request handler finishing\n processing_time = self._processing_finished_time - self.start_time\n\n # the time between the request handler finishing and the response being sent\n # to the client (nb may be negative)\n response_send_time = self.finish_time - self._processing_finished_time\n\n # need to decode as it could be raw utf-8 bytes\n # from a IDN servname in an auth header\n authenticated_entity = self.authenticated_entity\n if authenticated_entity is not None and isinstance(authenticated_entity, bytes):\n authenticated_entity = authenticated_entity.decode(\"utf-8\", \"replace\")\n\n # ...or could be raw utf-8 bytes in the User-Agent header.\n # N.B. if you don't do this, the logger explodes cryptically\n # with maximum recursion trying to log errors about\n # the charset problem.\n # c.f. https://github.com/matrix-org/synapse/issues/3471\n user_agent = self.get_user_agent()\n if user_agent is not None:\n user_agent = user_agent.decode(\"utf-8\", \"replace\")\n else:\n user_agent = \"-\"\n\n code = str(self.code)\n if not self.finished:\n # we didn't send the full response before we gave up (presumably because\n # the connection dropped)\n code += \"!\"\n\n self.site.access_logger.info(\n \"%s - %s - {%s}\"\n \" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)\"\n ' %sB %s \"%s %s %s\" \"%s\" [%d dbevts]',\n self.getClientIP(),\n self.site.site_tag,\n authenticated_entity,\n processing_time,\n response_send_time,\n usage.ru_utime,\n usage.ru_stime,\n usage.db_sched_duration_sec,\n usage.db_txn_duration_sec,\n int(usage.db_txn_count),\n self.sentLength,\n code,\n self.get_method(),\n self.get_redacted_uri(),\n self.clientproto.decode(\"ascii\", errors=\"replace\"),\n user_agent,\n usage.evt_db_fetch_count,\n )\n\n try:\n self.request_metrics.stop(self.finish_time, self.code, self.sentLength)\n except Exception as e:\n logger.warning(\"Failed to stop metrics: %r\", e)\n\n\nclass XForwardedForRequest(SynapseRequest):\n def __init__(self, *args, **kw):\n SynapseRequest.__init__(self, *args, **kw)\n\n \"\"\"\n Add a layer on top of another request that only uses the value of an\n X-Forwarded-For header as the result of C{getClientIP}.\n \"\"\"\n\n def getClientIP(self):\n \"\"\"\n @return: The client address (the first address) in the value of the\n I{X-Forwarded-For header}. If the header is not present, return\n C{b\"-\"}.\n \"\"\"\n return (\n self.requestHeaders.getRawHeaders(b\"x-forwarded-for\", [b\"-\"])[0]\n .split(b\",\")[0]\n .strip()\n .decode(\"ascii\")\n )\n\n\nclass SynapseSite(Site):\n \"\"\"\n Subclass of a twisted http Site that does access logging with python's\n standard logging\n \"\"\"\n\n def __init__(\n self,\n logger_name,\n site_tag,\n config,\n resource,\n server_version_string,\n *args,\n **kwargs\n ):\n Site.__init__(self, resource, *args, **kwargs)\n\n self.site_tag = site_tag\n\n proxied = config.get(\"x_forwarded\", False)\n self.requestFactory = XForwardedForRequest if proxied else SynapseRequest\n self.access_logger = logging.getLogger(logger_name)\n self.server_version_string = server_version_string.encode(\"ascii\")\n\n def log(self, request):\n pass\n", "path": "synapse/http/site.py" } ]
[ { "content": "# Copyright 2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport logging\nimport time\n\nfrom twisted.web.server import Request, Site\n\nfrom synapse.http import redact_uri\nfrom synapse.http.request_metrics import RequestMetrics, requests_counter\nfrom synapse.logging.context import LoggingContext, PreserveLoggingContext\n\nlogger = logging.getLogger(__name__)\n\n_next_request_seq = 0\n\n\nclass SynapseRequest(Request):\n \"\"\"Class which encapsulates an HTTP request to synapse.\n\n All of the requests processed in synapse are of this type.\n\n It extends twisted's twisted.web.server.Request, and adds:\n * Unique request ID\n * A log context associated with the request\n * Redaction of access_token query-params in __repr__\n * Logging at start and end\n * Metrics to record CPU, wallclock and DB time by endpoint.\n\n It also provides a method `processing`, which returns a context manager. If this\n method is called, the request won't be logged until the context manager is closed;\n this is useful for asynchronous request handlers which may go on processing the\n request even after the client has disconnected.\n\n Attributes:\n logcontext(LoggingContext) : the log context for this request\n \"\"\"\n\n def __init__(self, channel, *args, **kw):\n Request.__init__(self, channel, *args, **kw)\n self.site = channel.site\n self._channel = channel # this is used by the tests\n self.authenticated_entity = None\n self.start_time = 0\n\n # we can't yet create the logcontext, as we don't know the method.\n self.logcontext = None\n\n global _next_request_seq\n self.request_seq = _next_request_seq\n _next_request_seq += 1\n\n # whether an asynchronous request handler has called processing()\n self._is_processing = False\n\n # the time when the asynchronous request handler completed its processing\n self._processing_finished_time = None\n\n # what time we finished sending the response to the client (or the connection\n # dropped)\n self.finish_time = None\n\n def __repr__(self):\n # We overwrite this so that we don't log ``access_token``\n return \"<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>\" % (\n self.__class__.__name__,\n id(self),\n self.get_method(),\n self.get_redacted_uri(),\n self.clientproto.decode(\"ascii\", errors=\"replace\"),\n self.site.site_tag,\n )\n\n def get_request_id(self):\n return \"%s-%i\" % (self.get_method(), self.request_seq)\n\n def get_redacted_uri(self):\n uri = self.uri\n if isinstance(uri, bytes):\n uri = self.uri.decode(\"ascii\", errors=\"replace\")\n return redact_uri(uri)\n\n def get_method(self):\n \"\"\"Gets the method associated with the request (or placeholder if not\n method has yet been received).\n\n Note: This is necessary as the placeholder value in twisted is str\n rather than bytes, so we need to sanitise `self.method`.\n\n Returns:\n str\n \"\"\"\n method = self.method\n if isinstance(method, bytes):\n method = self.method.decode(\"ascii\")\n return method\n\n def get_user_agent(self):\n return self.requestHeaders.getRawHeaders(b\"User-Agent\", [None])[-1]\n\n def render(self, resrc):\n # this is called once a Resource has been found to serve the request; in our\n # case the Resource in question will normally be a JsonResource.\n\n # create a LogContext for this request\n request_id = self.get_request_id()\n logcontext = self.logcontext = LoggingContext(request_id)\n logcontext.request = request_id\n\n # override the Server header which is set by twisted\n self.setHeader(\"Server\", self.site.server_version_string)\n\n with PreserveLoggingContext(self.logcontext):\n # we start the request metrics timer here with an initial stab\n # at the servlet name. For most requests that name will be\n # JsonResource (or a subclass), and JsonResource._async_render\n # will update it once it picks a servlet.\n servlet_name = resrc.__class__.__name__\n self._started_processing(servlet_name)\n\n Request.render(self, resrc)\n\n # record the arrival of the request *after*\n # dispatching to the handler, so that the handler\n # can update the servlet name in the request\n # metrics\n requests_counter.labels(self.get_method(), self.request_metrics.name).inc()\n\n @contextlib.contextmanager\n def processing(self):\n \"\"\"Record the fact that we are processing this request.\n\n Returns a context manager; the correct way to use this is:\n\n @defer.inlineCallbacks\n def handle_request(request):\n with request.processing(\"FooServlet\"):\n yield really_handle_the_request()\n\n Once the context manager is closed, the completion of the request will be logged,\n and the various metrics will be updated.\n \"\"\"\n if self._is_processing:\n raise RuntimeError(\"Request is already processing\")\n self._is_processing = True\n\n try:\n yield\n except Exception:\n # this should already have been caught, and sent back to the client as a 500.\n logger.exception(\"Asynchronous messge handler raised an uncaught exception\")\n finally:\n # the request handler has finished its work and either sent the whole response\n # back, or handed over responsibility to a Producer.\n\n self._processing_finished_time = time.time()\n self._is_processing = False\n\n # if we've already sent the response, log it now; otherwise, we wait for the\n # response to be sent.\n if self.finish_time is not None:\n self._finished_processing()\n\n def finish(self):\n \"\"\"Called when all response data has been written to this Request.\n\n Overrides twisted.web.server.Request.finish to record the finish time and do\n logging.\n \"\"\"\n self.finish_time = time.time()\n Request.finish(self)\n if not self._is_processing:\n with PreserveLoggingContext(self.logcontext):\n self._finished_processing()\n\n def connectionLost(self, reason):\n \"\"\"Called when the client connection is closed before the response is written.\n\n Overrides twisted.web.server.Request.connectionLost to record the finish time and\n do logging.\n \"\"\"\n self.finish_time = time.time()\n Request.connectionLost(self, reason)\n\n # we only get here if the connection to the client drops before we send\n # the response.\n #\n # It's useful to log it here so that we can get an idea of when\n # the client disconnects.\n with PreserveLoggingContext(self.logcontext):\n logger.warning(\n \"Error processing request %r: %s %s\", self, reason.type, reason.value\n )\n\n if not self._is_processing:\n self._finished_processing()\n\n def _started_processing(self, servlet_name):\n \"\"\"Record the fact that we are processing this request.\n\n This will log the request's arrival. Once the request completes,\n be sure to call finished_processing.\n\n Args:\n servlet_name (str): the name of the servlet which will be\n processing this request. This is used in the metrics.\n\n It is possible to update this afterwards by updating\n self.request_metrics.name.\n \"\"\"\n self.start_time = time.time()\n self.request_metrics = RequestMetrics()\n self.request_metrics.start(\n self.start_time, name=servlet_name, method=self.get_method()\n )\n\n self.site.access_logger.info(\n \"%s - %s - Received request: %s %s\",\n self.getClientIP(),\n self.site.site_tag,\n self.get_method(),\n self.get_redacted_uri(),\n )\n\n def _finished_processing(self):\n \"\"\"Log the completion of this request and update the metrics\n \"\"\"\n\n if self.logcontext is None:\n # this can happen if the connection closed before we read the\n # headers (so render was never called). In that case we'll already\n # have logged a warning, so just bail out.\n return\n\n usage = self.logcontext.get_resource_usage()\n\n if self._processing_finished_time is None:\n # we completed the request without anything calling processing()\n self._processing_finished_time = time.time()\n\n # the time between receiving the request and the request handler finishing\n processing_time = self._processing_finished_time - self.start_time\n\n # the time between the request handler finishing and the response being sent\n # to the client (nb may be negative)\n response_send_time = self.finish_time - self._processing_finished_time\n\n # need to decode as it could be raw utf-8 bytes\n # from a IDN servname in an auth header\n authenticated_entity = self.authenticated_entity\n if authenticated_entity is not None and isinstance(authenticated_entity, bytes):\n authenticated_entity = authenticated_entity.decode(\"utf-8\", \"replace\")\n\n # ...or could be raw utf-8 bytes in the User-Agent header.\n # N.B. if you don't do this, the logger explodes cryptically\n # with maximum recursion trying to log errors about\n # the charset problem.\n # c.f. https://github.com/matrix-org/synapse/issues/3471\n user_agent = self.get_user_agent()\n if user_agent is not None:\n user_agent = user_agent.decode(\"utf-8\", \"replace\")\n else:\n user_agent = \"-\"\n\n code = str(self.code)\n if not self.finished:\n # we didn't send the full response before we gave up (presumably because\n # the connection dropped)\n code += \"!\"\n\n self.site.access_logger.info(\n \"%s - %s - {%s}\"\n \" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)\"\n ' %sB %s \"%s %s %s\" \"%s\" [%d dbevts]',\n self.getClientIP(),\n self.site.site_tag,\n authenticated_entity,\n processing_time,\n response_send_time,\n usage.ru_utime,\n usage.ru_stime,\n usage.db_sched_duration_sec,\n usage.db_txn_duration_sec,\n int(usage.db_txn_count),\n self.sentLength,\n code,\n self.get_method(),\n self.get_redacted_uri(),\n self.clientproto.decode(\"ascii\", errors=\"replace\"),\n user_agent,\n usage.evt_db_fetch_count,\n )\n\n try:\n self.request_metrics.stop(self.finish_time, self.code, self.sentLength)\n except Exception as e:\n logger.warning(\"Failed to stop metrics: %r\", e)\n\n\nclass XForwardedForRequest(SynapseRequest):\n def __init__(self, *args, **kw):\n SynapseRequest.__init__(self, *args, **kw)\n\n \"\"\"\n Add a layer on top of another request that only uses the value of an\n X-Forwarded-For header as the result of C{getClientIP}.\n \"\"\"\n\n def getClientIP(self):\n \"\"\"\n @return: The client address (the first address) in the value of the\n I{X-Forwarded-For header}. If the header is not present, return\n C{b\"-\"}.\n \"\"\"\n return (\n self.requestHeaders.getRawHeaders(b\"x-forwarded-for\", [b\"-\"])[0]\n .split(b\",\")[0]\n .strip()\n .decode(\"ascii\")\n )\n\n\nclass SynapseSite(Site):\n \"\"\"\n Subclass of a twisted http Site that does access logging with python's\n standard logging\n \"\"\"\n\n def __init__(\n self,\n logger_name,\n site_tag,\n config,\n resource,\n server_version_string,\n *args,\n **kwargs\n ):\n Site.__init__(self, resource, *args, **kwargs)\n\n self.site_tag = site_tag\n\n proxied = config.get(\"x_forwarded\", False)\n self.requestFactory = XForwardedForRequest if proxied else SynapseRequest\n self.access_logger = logging.getLogger(logger_name)\n self.server_version_string = server_version_string.encode(\"ascii\")\n\n def log(self, request):\n pass\n", "path": "synapse/http/site.py" } ]
diff --git a/changelog.d/6682.bugfix b/changelog.d/6682.bugfix new file mode 100644 index 000000000000..d48ea31477f5 --- /dev/null +++ b/changelog.d/6682.bugfix @@ -0,0 +1,2 @@ +Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. + diff --git a/synapse/http/site.py b/synapse/http/site.py index 9f2d035fa0e5..911251c0bcff 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -88,7 +88,7 @@ def get_request_id(self): def get_redacted_uri(self): uri = self.uri if isinstance(uri, bytes): - uri = self.uri.decode("ascii") + uri = self.uri.decode("ascii", errors="replace") return redact_uri(uri) def get_method(self):
Lightning-AI__torchmetrics-2346
High memory usage of Perplexity metric ## 🐛 Bug I ran out of memory (GPU) when computing the perplexity metric and would like to propose a small optimization to decrease its memory utilization. ### To Reproduce For instance, when running the following code PyTorch tries to allocate 1024 GB of GPU memory on my system. ```py from torchmetrics.text import Perplexity import torch gen = torch.manual_seed(42) preds = torch.rand(512, 1024, 12, generator=gen).cuda() target = torch.randint(12, (512, 1024), generator=gen).cuda() perp = Perplexity().cuda() print(perp(preds, target)) ``` ### Memory Inefficiency I think the inefficiency is in this line: https://github.com/Lightning-AI/torchmetrics/blob/a68455afb9041d1d32c1d6546897fee416abdc41/src/torchmetrics/functional/text/perplexity.py#L94 `probs[:, target]` results in a large temporary tensor with `(512*1024)^2` elements. Afterwards only the diagonal values are used. ### Potential Solution In contrast ``` probs = probs[torch.arange(target.numel()), target][mask] ``` would only require memory of the size of target. Would you consider accepting a pull request with this optimization? Or was the previous implementation chosen for another reason? ### Environment - TorchMetrics v1.2.1 (installed with pip) and Master branch. - Python 3.10.12 - Pytorch 2.2.0 - CUDA 12.1
[ { "content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\n\ndef _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:\n \"\"\"Check shape and type consistency of input vectors.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n\n Raises:\n ValueError:\n If ``preds`` tensor has no 3 dimensions.\n ValueError:\n If ``target`` tensor has no 2 dimensions.\n ValueError:\n If the first two dimensions of ``preds`` and ``target`` do not equal.\n TypeError:\n If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``\n TypeError:\n If ``target`` is not of a type LongTensor (torch.int64)\n\n \"\"\"\n if len(preds.shape) != 3:\n raise ValueError(\n \"Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],\"\n f\" but got {len(preds.shape)}.\"\n )\n if len(target.shape) != 2:\n raise ValueError(\n \"Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],\"\n f\" but got {len(target.shape)}.\"\n )\n if preds.shape[:2] != target.shape:\n raise ValueError(\n \"Input tensors `preds` and `target` are expected to have equaling first two dimensions,\"\n f\" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}.\"\n )\n if not preds.is_floating_point():\n raise TypeError(f\"Input tensor `preds` is expected to be of floating point type but got {preds.dtype}.\")\n if target.dtype != torch.int64:\n raise TypeError(f\"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.\")\n\n\ndef _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute intermediate statistics for Perplexity.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Log probabilities, summed over all samples\n Number of samples\n\n \"\"\"\n _check_shape_and_type_consistency(preds, target)\n\n probs = torch.nn.functional.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n target = target.reshape(-1)\n\n if ignore_index is not None:\n mask = target.ne(ignore_index)\n target = target.where(target != ignore_index, torch.tensor(0, device=target.device))\n else:\n mask = torch.ones_like(target, dtype=torch.bool)\n\n probs = probs[:, target].diagonal()[mask]\n total_log_probs = -probs.log().sum()\n count = mask.sum()\n\n return total_log_probs, count\n\n\ndef _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:\n \"\"\"Compute the Perplexity.\n\n Args:\n total: Log probabilities, summed over all samples\n count: Number of samples\n Returns:\n Perplexity\n\n \"\"\"\n return torch.exp(total / count)\n\n\ndef perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:\n \"\"\"Perplexity measures how well a language model predicts a text sample.\n\n This metric is calculated as the average number of bits per word a model needs to represent the sample.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Perplexity value\n\n Examples:\n >>> import torch\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand(2, 8, 5, generator=gen)\n >>> target = torch.randint(5, (2, 8), generator=gen)\n >>> target[0, 6:] = -100\n >>> perplexity(preds, target, ignore_index=-100)\n tensor(5.8540)\n\n \"\"\"\n total, count = _perplexity_update(preds, target, ignore_index)\n return _perplexity_compute(total, count)\n", "path": "src/torchmetrics/functional/text/perplexity.py" } ]
[ { "content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\n\ndef _check_shape_and_type_consistency(preds: Tensor, target: Tensor) -> None:\n \"\"\"Check shape and type consistency of input vectors.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n\n Raises:\n ValueError:\n If ``preds`` tensor has no 3 dimensions.\n ValueError:\n If ``target`` tensor has no 2 dimensions.\n ValueError:\n If the first two dimensions of ``preds`` and ``target`` do not equal.\n TypeError:\n If ``preds`` dtype is not one of ``(torch.float16, torch.float32, torch.float64)``\n TypeError:\n If ``target`` is not of a type LongTensor (torch.int64)\n\n \"\"\"\n if len(preds.shape) != 3:\n raise ValueError(\n \"Input tensor `preds` is expected to have 3 dimensions, [batch_size, seq_len, vocab_size],\"\n f\" but got {len(preds.shape)}.\"\n )\n if len(target.shape) != 2:\n raise ValueError(\n \"Input tensor `target` is expected to have 2 dimensions, [batch_size, seq_len],\"\n f\" but got {len(target.shape)}.\"\n )\n if preds.shape[:2] != target.shape:\n raise ValueError(\n \"Input tensors `preds` and `target` are expected to have equaling first two dimensions,\"\n f\" [batch_size, seq_len], but got {preds.shape[:2]} and {target.shape}.\"\n )\n if not preds.is_floating_point():\n raise TypeError(f\"Input tensor `preds` is expected to be of floating point type but got {preds.dtype}.\")\n if target.dtype != torch.int64:\n raise TypeError(f\"Input tensor `target` is expected to be of a type {torch.int64} but got {target.dtype}.\")\n\n\ndef _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute intermediate statistics for Perplexity.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size]. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Log probabilities, summed over all samples\n Number of samples\n\n \"\"\"\n _check_shape_and_type_consistency(preds, target)\n\n probs = torch.nn.functional.softmax(preds.reshape(-1, preds.shape[-1]), dim=1)\n target = target.reshape(-1)\n\n if ignore_index is not None:\n mask = target.ne(ignore_index)\n target = target.where(target != ignore_index, torch.tensor(0, device=target.device))\n else:\n mask = torch.ones_like(target, dtype=torch.bool)\n\n probs = probs[torch.arange(target.numel()), target][mask]\n total_log_probs = -probs.log().sum()\n count = mask.sum()\n\n return total_log_probs, count\n\n\ndef _perplexity_compute(total: Tensor, count: Tensor) -> Tensor:\n \"\"\"Compute the Perplexity.\n\n Args:\n total: Log probabilities, summed over all samples\n count: Number of samples\n Returns:\n Perplexity\n\n \"\"\"\n return torch.exp(total / count)\n\n\ndef perplexity(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> Tensor:\n \"\"\"Perplexity measures how well a language model predicts a text sample.\n\n This metric is calculated as the average number of bits per word a model needs to represent the sample.\n\n Args:\n preds:\n Logits or a unnormalized score assigned to each token in a sequence with shape [batch_size, seq_len,\n vocab_size], which is the output of a language model. Scores will be normalized internally using softmax.\n target:\n Ground truth values with a shape [batch_size, seq_len].\n ignore_index:\n Integer specifying a target class to ignore. If given, this class index does not contribute\n to the returned score.\n\n Returns:\n Perplexity value\n\n Examples:\n >>> import torch\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand(2, 8, 5, generator=gen)\n >>> target = torch.randint(5, (2, 8), generator=gen)\n >>> target[0, 6:] = -100\n >>> perplexity(preds, target, ignore_index=-100)\n tensor(5.8540)\n\n \"\"\"\n total, count = _perplexity_update(preds, target, ignore_index)\n return _perplexity_compute(total, count)\n", "path": "src/torchmetrics/functional/text/perplexity.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index ad35113a0f6..b7d3e419e16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed +- Fixed high memory consumption in `Perplexity` metric ([#2346](https://github.com/Lightning-AI/torchmetrics/pull/2346)) + + - Fixed cached network in `FeatureShare` not being moved to the correct device ([#2348](https://github.com/Lightning-AI/torchmetrics/pull/2348)) diff --git a/src/torchmetrics/functional/text/perplexity.py b/src/torchmetrics/functional/text/perplexity.py index cb0bafd5082..39f832905cf 100644 --- a/src/torchmetrics/functional/text/perplexity.py +++ b/src/torchmetrics/functional/text/perplexity.py @@ -91,7 +91,7 @@ def _perplexity_update(preds: Tensor, target: Tensor, ignore_index: Optional[int else: mask = torch.ones_like(target, dtype=torch.bool) - probs = probs[:, target].diagonal()[mask] + probs = probs[torch.arange(target.numel()), target][mask] total_log_probs = -probs.log().sum() count = mask.sum()
RedHatInsights__insights-core-1112
hostname parser doesn't handle sos_commands/general/hostname sos_commands/general/hostname contains the hostname with a newline at the end, which results in a file with two lines. The hostname parser specifically checks for one line but doesn't gracefully handle the problem. We can update the parser to handle this case and/or investigate whether proc/sys/kernel/hostname is a valid substitute to put in sos_archive.py instead.
[ { "content": "\"\"\"\nhostname - command ``/bin/hostname``\n====================================\n\nThis parser simply reads the output of ``/bin/hostname``, which is the\nconfigured fully qualified domain name of the client system. It then\nsplits it into ``hostname`` and ``domain`` and stores these as attributes,\nalong with the unmodified name in the ``fqdn`` attribute.\n\nExamples:\n\n >>> hostname = shared[Hostname]\n >>> hostname.fqdn\n 'www.example.com'\n >>> hostname.hostname\n 'www'\n >>> hostname.domain\n 'example.com'\n\n\"\"\"\n\nfrom .. import Parser, parser\nfrom insights.specs import Specs\n\n\n@parser(Specs.hostname)\nclass Hostname(Parser):\n \"\"\"Class for parsing ``hostname`` command output.\n\n Attributes:\n fqdn: The fully qualified domain name of the host. The same to\n ``hostname`` when domain part is not set.\n hostname: The hostname.\n domain: The domain get from the fqdn.\n \"\"\"\n def parse_content(self, content):\n raw = None\n if len(content) == 1:\n raw = content[0].strip()\n self.fqdn = raw\n self.hostname = raw.split(\".\")[0] if raw else None\n self.domain = \".\".join(raw.split(\".\")[1:]) if raw else None\n\n def __str__(self):\n return \"<hostname: {h}, domain: {d}>\".format(h=self.hostname, d=self.domain)\n", "path": "insights/parsers/hostname.py" } ]
[ { "content": "\"\"\"\nhostname - command ``/bin/hostname``\n====================================\n\nThis parser simply reads the output of ``/bin/hostname``, which is the\nconfigured fully qualified domain name of the client system. It then\nsplits it into ``hostname`` and ``domain`` and stores these as attributes,\nalong with the unmodified name in the ``fqdn`` attribute.\n\nExamples:\n\n >>> hostname = shared[Hostname]\n >>> hostname.fqdn\n 'www.example.com'\n >>> hostname.hostname\n 'www'\n >>> hostname.domain\n 'example.com'\n\n\"\"\"\n\nfrom .. import Parser, parser\nfrom insights.specs import Specs\n\n\n@parser(Specs.hostname)\nclass Hostname(Parser):\n \"\"\"Class for parsing ``hostname`` command output.\n\n Attributes:\n fqdn: The fully qualified domain name of the host. The same to\n ``hostname`` when domain part is not set.\n hostname: The hostname.\n domain: The domain get from the fqdn.\n \"\"\"\n def parse_content(self, content):\n content = filter(None, content)\n raw = None\n if len(content) == 1:\n raw = content[0].strip()\n self.fqdn = raw\n self.hostname = raw.split(\".\")[0] if raw else None\n self.domain = \".\".join(raw.split(\".\")[1:]) if raw else None\n\n def __str__(self):\n return \"<hostname: {h}, domain: {d}>\".format(h=self.hostname, d=self.domain)\n", "path": "insights/parsers/hostname.py" } ]
diff --git a/insights/parsers/hostname.py b/insights/parsers/hostname.py index 1af7ed8e60..ecb3658983 100644 --- a/insights/parsers/hostname.py +++ b/insights/parsers/hostname.py @@ -34,6 +34,7 @@ class Hostname(Parser): domain: The domain get from the fqdn. """ def parse_content(self, content): + content = filter(None, content) raw = None if len(content) == 1: raw = content[0].strip() diff --git a/insights/parsers/tests/test_hostname.py b/insights/parsers/tests/test_hostname.py index 8b25de81c9..7b3bade535 100644 --- a/insights/parsers/tests/test_hostname.py +++ b/insights/parsers/tests/test_hostname.py @@ -2,7 +2,14 @@ from insights.tests import context_wrap HOSTNAME = "rhel7.example.com" +HOSTNAME_MULTILINE = """ +rhel7.example.com +""" + HOSTNAME_SHORT = "rhel7" +HOSTNAME_SHORT_MULTILINE = """ +rhel7 +""" def test_hostname(): @@ -12,11 +19,22 @@ def test_hostname(): assert data.domain == "example.com" assert "{0}".format(data) == "<hostname: rhel7, domain: example.com>" + data = Hostname(context_wrap(HOSTNAME_MULTILINE, strip=False)) + assert data.fqdn == "rhel7.example.com" + assert data.hostname == "rhel7" + assert data.domain == "example.com" + assert "{0}".format(data) == "<hostname: rhel7, domain: example.com>" + data = Hostname(context_wrap(HOSTNAME_SHORT)) assert data.fqdn == "rhel7" assert data.hostname == "rhel7" assert data.domain == "" + data = Hostname(context_wrap(HOSTNAME_SHORT_MULTILINE, strip=False)) + assert data.fqdn == "rhel7" + assert data.hostname == "rhel7" + assert data.domain == "" + data = Hostname(context_wrap("")) assert data.fqdn is None assert data.hostname is None diff --git a/insights/tests/__init__.py b/insights/tests/__init__.py index 3d220710c4..186cfe2d42 100644 --- a/insights/tests/__init__.py +++ b/insights/tests/__init__.py @@ -77,9 +77,12 @@ def context_wrap(lines, release=DEFAULT_RELEASE, version="-1.-1", machine_id="machine_id", + strip=True, **kwargs): if isinstance(lines, basestring): - lines = lines.strip().splitlines() + if strip: + lines = lines.strip() + lines = lines.splitlines() return Context(content=lines, path=path, hostname=hostname, release=release, version=version.split("."),
beetbox__beets-2631
Broken pipe on piping `list` output on Python 3 ### Problem I'm trying to write a quick dirty script to play the first result of a search, looking much like you'd expect: ```sh mpv --no-audio-display "$(beet ls -p $@ | head -n 1)" ``` However, there's something in the formatting of the output that `head` doesn't like: ```sh $ beet ls -p home /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 /media/beets/5 Seconds of Summer/LIVESOS/07 Long Way Home.mp3 /media/beets/5 Seconds of Summer/Sounds Good Feels Good/12 Broken Home.mp3 /media/beets/American Authors/What We Live For/07 Go Big or Go Home.mp3 /media/beets/Boys Like Girls/Crazy World/09 Take Me Home.mp3 ... ``` However ```sh beet -vv ls -p home | head -n 1 user configuration: /home/shagaru/.config/beets/config.yaml data directory: /home/shagaru/.config/beets plugin paths: Sending event: pluginload library database: /media/Music/library.db library directory: /media/Music Sending event: library_opened /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='UTF-8'> BrokenPipeError: [Errno 32] Broken pipe ``` Trying to cheat and run it through `echo` instead produces the following: ```sh $ echo -e $(beet ls -p home) | head -n 1 /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 /media/beets/5 Seconds of Summer/LIVESOS/07 Long Way Home.mp3 /media/beets/5 Seconds of Summer/Sounds Good Feels Good/12 Broken Home.mp3 /media/beets/American Authors/What We Live For/07 Go Big or Go Home.mp3 /media/beets/Boys Like Girls/Crazy World/09 Take Me Home.mp3 ... ``` as one string, so it seems like the proper line-ending characters aren't being properly generated. This may be expected of Python output, I don't know; I haven't written using it. End question I suppose is there any way around how Python handles printing lines short of rewriting that entire part of the program? ### Setup * OS: GNU/Linux * Python version: 3.5.2 * beets version: 1.4.4 * Turning off plugins made problem go away (yes/no): no My configuration (output of `beet config`) shouldn't matter, because it's a question of how the program functions inherently. Broken pipe on piping `list` output on Python 3 ### Problem I'm trying to write a quick dirty script to play the first result of a search, looking much like you'd expect: ```sh mpv --no-audio-display "$(beet ls -p $@ | head -n 1)" ``` However, there's something in the formatting of the output that `head` doesn't like: ```sh $ beet ls -p home /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 /media/beets/5 Seconds of Summer/LIVESOS/07 Long Way Home.mp3 /media/beets/5 Seconds of Summer/Sounds Good Feels Good/12 Broken Home.mp3 /media/beets/American Authors/What We Live For/07 Go Big or Go Home.mp3 /media/beets/Boys Like Girls/Crazy World/09 Take Me Home.mp3 ... ``` However ```sh beet -vv ls -p home | head -n 1 user configuration: /home/shagaru/.config/beets/config.yaml data directory: /home/shagaru/.config/beets plugin paths: Sending event: pluginload library database: /media/Music/library.db library directory: /media/Music Sending event: library_opened /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='UTF-8'> BrokenPipeError: [Errno 32] Broken pipe ``` Trying to cheat and run it through `echo` instead produces the following: ```sh $ echo -e $(beet ls -p home) | head -n 1 /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 /media/beets/5 Seconds of Summer/LIVESOS/07 Long Way Home.mp3 /media/beets/5 Seconds of Summer/Sounds Good Feels Good/12 Broken Home.mp3 /media/beets/American Authors/What We Live For/07 Go Big or Go Home.mp3 /media/beets/Boys Like Girls/Crazy World/09 Take Me Home.mp3 ... ``` as one string, so it seems like the proper line-ending characters aren't being properly generated. This may be expected of Python output, I don't know; I haven't written using it. End question I suppose is there any way around how Python handles printing lines short of rewriting that entire part of the program? ### Setup * OS: GNU/Linux * Python version: 3.5.2 * beets version: 1.4.4 * Turning off plugins made problem go away (yes/no): no My configuration (output of `beet config`) shouldn't matter, because it's a question of how the program functions inherently. Broken pipe on piping `list` output on Python 3 ### Problem I'm trying to write a quick dirty script to play the first result of a search, looking much like you'd expect: ```sh mpv --no-audio-display "$(beet ls -p $@ | head -n 1)" ``` However, there's something in the formatting of the output that `head` doesn't like: ```sh $ beet ls -p home /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 /media/beets/5 Seconds of Summer/LIVESOS/07 Long Way Home.mp3 /media/beets/5 Seconds of Summer/Sounds Good Feels Good/12 Broken Home.mp3 /media/beets/American Authors/What We Live For/07 Go Big or Go Home.mp3 /media/beets/Boys Like Girls/Crazy World/09 Take Me Home.mp3 ... ``` However ```sh beet -vv ls -p home | head -n 1 user configuration: /home/shagaru/.config/beets/config.yaml data directory: /home/shagaru/.config/beets plugin paths: Sending event: pluginload library database: /media/Music/library.db library directory: /media/Music Sending event: library_opened /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='UTF-8'> BrokenPipeError: [Errno 32] Broken pipe ``` Trying to cheat and run it through `echo` instead produces the following: ```sh $ echo -e $(beet ls -p home) | head -n 1 /media/beets/5 Seconds of Summer/5 Seconds of Summer/09 Long Way Home.mp3 /media/beets/5 Seconds of Summer/LIVESOS/07 Long Way Home.mp3 /media/beets/5 Seconds of Summer/Sounds Good Feels Good/12 Broken Home.mp3 /media/beets/American Authors/What We Live For/07 Go Big or Go Home.mp3 /media/beets/Boys Like Girls/Crazy World/09 Take Me Home.mp3 ... ``` as one string, so it seems like the proper line-ending characters aren't being properly generated. This may be expected of Python output, I don't know; I haven't written using it. End question I suppose is there any way around how Python handles printing lines short of rewriting that entire part of the program? ### Setup * OS: GNU/Linux * Python version: 3.5.2 * beets version: 1.4.4 * Turning off plugins made problem go away (yes/no): no My configuration (output of `beet config`) shouldn't matter, because it's a question of how the program functions inherently.
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"This module contains all of the core logic for beets' command-line\ninterface. To invoke the CLI, just call beets.ui.main(). The actual\nCLI commands are implemented in the ui.commands module.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport optparse\nimport textwrap\nimport sys\nfrom difflib import SequenceMatcher\nimport sqlite3\nimport errno\nimport re\nimport struct\nimport traceback\nimport os.path\nfrom six.moves import input\n\nfrom beets import logging\nfrom beets import library\nfrom beets import plugins\nfrom beets import util\nfrom beets.util.functemplate import Template\nfrom beets import config\nfrom beets.util import confit, as_string\nfrom beets.autotag import mb\nfrom beets.dbcore import query as db_query\nfrom beets.dbcore import db\nimport six\n\n# On Windows platforms, use colorama to support \"ANSI\" terminal colors.\nif sys.platform == 'win32':\n try:\n import colorama\n except ImportError:\n pass\n else:\n colorama.init()\n\n\nlog = logging.getLogger('beets')\nif not log.handlers:\n log.addHandler(logging.StreamHandler())\nlog.propagate = False # Don't propagate to root handler.\n\n\nPF_KEY_QUERIES = {\n 'comp': u'comp:true',\n 'singleton': u'singleton:true',\n}\n\n\nclass UserError(Exception):\n \"\"\"UI exception. Commands should throw this in order to display\n nonrecoverable errors to the user.\n \"\"\"\n\n\n# Encoding utilities.\n\n\ndef _in_encoding():\n \"\"\"Get the encoding to use for *inputting* strings from the console.\n \"\"\"\n return _stream_encoding(sys.stdin)\n\n\ndef _out_encoding():\n \"\"\"Get the encoding to use for *outputting* strings to the console.\n \"\"\"\n return _stream_encoding(sys.stdout)\n\n\ndef _stream_encoding(stream, default='utf-8'):\n \"\"\"A helper for `_in_encoding` and `_out_encoding`: get the stream's\n preferred encoding, using a configured override or a default\n fallback if neither is not specified.\n \"\"\"\n # Configured override?\n encoding = config['terminal_encoding'].get()\n if encoding:\n return encoding\n\n # For testing: When sys.stdout or sys.stdin is a StringIO under the\n # test harness, it doesn't have an `encoding` attribute. Just use\n # UTF-8.\n if not hasattr(stream, 'encoding'):\n return default\n\n # Python's guessed output stream encoding, or UTF-8 as a fallback\n # (e.g., when piped to a file).\n return stream.encoding or default\n\n\ndef decargs(arglist):\n \"\"\"Given a list of command-line argument bytestrings, attempts to\n decode them to Unicode strings when running under Python 2.\n \"\"\"\n if six.PY2:\n return [s.decode(util.arg_encoding()) for s in arglist]\n else:\n return arglist\n\n\ndef print_(*strings, **kwargs):\n \"\"\"Like print, but rather than raising an error when a character\n is not in the terminal's encoding's character set, just silently\n replaces it.\n\n The arguments must be Unicode strings: `unicode` on Python 2; `str` on\n Python 3.\n\n The `end` keyword argument behaves similarly to the built-in `print`\n (it defaults to a newline).\n \"\"\"\n if not strings:\n strings = [u'']\n assert isinstance(strings[0], six.text_type)\n\n txt = u' '.join(strings)\n txt += kwargs.get('end', u'\\n')\n\n # Encode the string and write it to stdout.\n if six.PY2:\n # On Python 2, sys.stdout expects bytes.\n out = txt.encode(_out_encoding(), 'replace')\n sys.stdout.write(out)\n else:\n # On Python 3, sys.stdout expects text strings and uses the\n # exception-throwing encoding error policy. To avoid throwing\n # errors and use our configurable encoding override, we use the\n # underlying bytes buffer instead.\n if hasattr(sys.stdout, 'buffer'):\n out = txt.encode(_out_encoding(), 'replace')\n sys.stdout.buffer.write(out)\n sys.stdout.buffer.flush()\n else:\n # In our test harnesses (e.g., DummyOut), sys.stdout.buffer\n # does not exist. We instead just record the text string.\n sys.stdout.write(txt)\n\n\n# Configuration wrappers.\n\ndef _bool_fallback(a, b):\n \"\"\"Given a boolean or None, return the original value or a fallback.\n \"\"\"\n if a is None:\n assert isinstance(b, bool)\n return b\n else:\n assert isinstance(a, bool)\n return a\n\n\ndef should_write(write_opt=None):\n \"\"\"Decide whether a command that updates metadata should also write\n tags, using the importer configuration as the default.\n \"\"\"\n return _bool_fallback(write_opt, config['import']['write'].get(bool))\n\n\ndef should_move(move_opt=None):\n \"\"\"Decide whether a command that updates metadata should also move\n files when they're inside the library, using the importer\n configuration as the default.\n\n Specifically, commands should move files after metadata updates only\n when the importer is configured *either* to move *or* to copy files.\n They should avoid moving files when the importer is configured not\n to touch any filenames.\n \"\"\"\n return _bool_fallback(\n move_opt,\n config['import']['move'].get(bool) or\n config['import']['copy'].get(bool)\n )\n\n\n# Input prompts.\n\ndef input_(prompt=None):\n \"\"\"Like `input`, but decodes the result to a Unicode string.\n Raises a UserError if stdin is not available. The prompt is sent to\n stdout rather than stderr. A printed between the prompt and the\n input cursor.\n \"\"\"\n # raw_input incorrectly sends prompts to stderr, not stdout, so we\n # use print_() explicitly to display prompts.\n # http://bugs.python.org/issue1927\n if prompt:\n print_(prompt, end=u' ')\n\n try:\n resp = input()\n except EOFError:\n raise UserError(u'stdin stream ended while input required')\n\n if six.PY2:\n return resp.decode(_in_encoding(), 'ignore')\n else:\n return resp\n\n\ndef input_options(options, require=False, prompt=None, fallback_prompt=None,\n numrange=None, default=None, max_width=72):\n \"\"\"Prompts a user for input. The sequence of `options` defines the\n choices the user has. A single-letter shortcut is inferred for each\n option; the user's choice is returned as that single, lower-case\n letter. The options should be provided as lower-case strings unless\n a particular shortcut is desired; in that case, only that letter\n should be capitalized.\n\n By default, the first option is the default. `default` can be provided to\n override this. If `require` is provided, then there is no default. The\n prompt and fallback prompt are also inferred but can be overridden.\n\n If numrange is provided, it is a pair of `(high, low)` (both ints)\n indicating that, in addition to `options`, the user may enter an\n integer in that inclusive range.\n\n `max_width` specifies the maximum number of columns in the\n automatically generated prompt string.\n \"\"\"\n # Assign single letters to each option. Also capitalize the options\n # to indicate the letter.\n letters = {}\n display_letters = []\n capitalized = []\n first = True\n for option in options:\n # Is a letter already capitalized?\n for letter in option:\n if letter.isalpha() and letter.upper() == letter:\n found_letter = letter\n break\n else:\n # Infer a letter.\n for letter in option:\n if not letter.isalpha():\n continue # Don't use punctuation.\n if letter not in letters:\n found_letter = letter\n break\n else:\n raise ValueError(u'no unambiguous lettering found')\n\n letters[found_letter.lower()] = option\n index = option.index(found_letter)\n\n # Mark the option's shortcut letter for display.\n if not require and (\n (default is None and not numrange and first) or\n (isinstance(default, six.string_types) and\n found_letter.lower() == default.lower())):\n # The first option is the default; mark it.\n show_letter = '[%s]' % found_letter.upper()\n is_default = True\n else:\n show_letter = found_letter.upper()\n is_default = False\n\n # Colorize the letter shortcut.\n show_letter = colorize('action_default' if is_default else 'action',\n show_letter)\n\n # Insert the highlighted letter back into the word.\n capitalized.append(\n option[:index] + show_letter + option[index + 1:]\n )\n display_letters.append(found_letter.upper())\n\n first = False\n\n # The default is just the first option if unspecified.\n if require:\n default = None\n elif default is None:\n if numrange:\n default = numrange[0]\n else:\n default = display_letters[0].lower()\n\n # Make a prompt if one is not provided.\n if not prompt:\n prompt_parts = []\n prompt_part_lengths = []\n if numrange:\n if isinstance(default, int):\n default_name = six.text_type(default)\n default_name = colorize('action_default', default_name)\n tmpl = '# selection (default %s)'\n prompt_parts.append(tmpl % default_name)\n prompt_part_lengths.append(len(tmpl % six.text_type(default)))\n else:\n prompt_parts.append('# selection')\n prompt_part_lengths.append(len(prompt_parts[-1]))\n prompt_parts += capitalized\n prompt_part_lengths += [len(s) for s in options]\n\n # Wrap the query text.\n prompt = ''\n line_length = 0\n for i, (part, length) in enumerate(zip(prompt_parts,\n prompt_part_lengths)):\n # Add punctuation.\n if i == len(prompt_parts) - 1:\n part += '?'\n else:\n part += ','\n length += 1\n\n # Choose either the current line or the beginning of the next.\n if line_length + length + 1 > max_width:\n prompt += '\\n'\n line_length = 0\n\n if line_length != 0:\n # Not the beginning of the line; need a space.\n part = ' ' + part\n length += 1\n\n prompt += part\n line_length += length\n\n # Make a fallback prompt too. This is displayed if the user enters\n # something that is not recognized.\n if not fallback_prompt:\n fallback_prompt = u'Enter one of '\n if numrange:\n fallback_prompt += u'%i-%i, ' % numrange\n fallback_prompt += ', '.join(display_letters) + ':'\n\n resp = input_(prompt)\n while True:\n resp = resp.strip().lower()\n\n # Try default option.\n if default is not None and not resp:\n resp = default\n\n # Try an integer input if available.\n if numrange:\n try:\n resp = int(resp)\n except ValueError:\n pass\n else:\n low, high = numrange\n if low <= resp <= high:\n return resp\n else:\n resp = None\n\n # Try a normal letter input.\n if resp:\n resp = resp[0]\n if resp in letters:\n return resp\n\n # Prompt for new input.\n resp = input_(fallback_prompt)\n\n\ndef input_yn(prompt, require=False):\n \"\"\"Prompts the user for a \"yes\" or \"no\" response. The default is\n \"yes\" unless `require` is `True`, in which case there is no default.\n \"\"\"\n sel = input_options(\n ('y', 'n'), require, prompt, u'Enter Y or N:'\n )\n return sel == u'y'\n\n\ndef input_select_objects(prompt, objs, rep):\n \"\"\"Prompt to user to choose all, none, or some of the given objects.\n Return the list of selected objects.\n\n `prompt` is the prompt string to use for each question (it should be\n phrased as an imperative verb). `rep` is a function to call on each\n object to print it out when confirming objects individually.\n \"\"\"\n choice = input_options(\n (u'y', u'n', u's'), False,\n u'%s? (Yes/no/select)' % prompt)\n print() # Blank line.\n\n if choice == u'y': # Yes.\n return objs\n\n elif choice == u's': # Select.\n out = []\n for obj in objs:\n rep(obj)\n if input_yn(u'%s? (yes/no)' % prompt, True):\n out.append(obj)\n print() # go to a new line\n return out\n\n else: # No.\n return []\n\n\n# Human output formatting.\n\ndef human_bytes(size):\n \"\"\"Formats size, a number of bytes, in a human-readable way.\"\"\"\n powers = [u'', u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y', u'H']\n unit = 'B'\n for power in powers:\n if size < 1024:\n return u\"%3.1f %s%s\" % (size, power, unit)\n size /= 1024.0\n unit = u'iB'\n return u\"big\"\n\n\ndef human_seconds(interval):\n \"\"\"Formats interval, a number of seconds, as a human-readable time\n interval using English words.\n \"\"\"\n units = [\n (1, u'second'),\n (60, u'minute'),\n (60, u'hour'),\n (24, u'day'),\n (7, u'week'),\n (52, u'year'),\n (10, u'decade'),\n ]\n for i in range(len(units) - 1):\n increment, suffix = units[i]\n next_increment, _ = units[i + 1]\n interval /= float(increment)\n if interval < next_increment:\n break\n else:\n # Last unit.\n increment, suffix = units[-1]\n interval /= float(increment)\n\n return u\"%3.1f %ss\" % (interval, suffix)\n\n\ndef human_seconds_short(interval):\n \"\"\"Formats a number of seconds as a short human-readable M:SS\n string.\n \"\"\"\n interval = int(interval)\n return u'%i:%02i' % (interval // 60, interval % 60)\n\n\n# Colorization.\n\n# ANSI terminal colorization code heavily inspired by pygments:\n# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py\n# (pygments is by Tim Hatch, Armin Ronacher, et al.)\nCOLOR_ESCAPE = \"\\x1b[\"\nDARK_COLORS = {\n \"black\": 0,\n \"darkred\": 1,\n \"darkgreen\": 2,\n \"brown\": 3,\n \"darkyellow\": 3,\n \"darkblue\": 4,\n \"purple\": 5,\n \"darkmagenta\": 5,\n \"teal\": 6,\n \"darkcyan\": 6,\n \"lightgray\": 7\n}\nLIGHT_COLORS = {\n \"darkgray\": 0,\n \"red\": 1,\n \"green\": 2,\n \"yellow\": 3,\n \"blue\": 4,\n \"fuchsia\": 5,\n \"magenta\": 5,\n \"turquoise\": 6,\n \"cyan\": 6,\n \"white\": 7\n}\nRESET_COLOR = COLOR_ESCAPE + \"39;49;00m\"\n\n# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS\n# as they are defined in the configuration files, see function: colorize\nCOLOR_NAMES = ['text_success', 'text_warning', 'text_error', 'text_highlight',\n 'text_highlight_minor', 'action_default', 'action']\nCOLORS = None\n\n\ndef _colorize(color, text):\n \"\"\"Returns a string that prints the given text in the given color\n in a terminal that is ANSI color-aware. The color must be something\n in DARK_COLORS or LIGHT_COLORS.\n \"\"\"\n if color in DARK_COLORS:\n escape = COLOR_ESCAPE + \"%im\" % (DARK_COLORS[color] + 30)\n elif color in LIGHT_COLORS:\n escape = COLOR_ESCAPE + \"%i;01m\" % (LIGHT_COLORS[color] + 30)\n else:\n raise ValueError(u'no such color %s', color)\n return escape + text + RESET_COLOR\n\n\ndef colorize(color_name, text):\n \"\"\"Colorize text if colored output is enabled. (Like _colorize but\n conditional.)\n \"\"\"\n if config['ui']['color']:\n global COLORS\n if not COLORS:\n COLORS = dict((name,\n config['ui']['colors'][name].as_str())\n for name in COLOR_NAMES)\n # In case a 3rd party plugin is still passing the actual color ('red')\n # instead of the abstract color name ('text_error')\n color = COLORS.get(color_name)\n if not color:\n log.debug(u'Invalid color_name: {0}', color_name)\n color = color_name\n return _colorize(color, text)\n else:\n return text\n\n\ndef _colordiff(a, b, highlight='text_highlight',\n minor_highlight='text_highlight_minor'):\n \"\"\"Given two values, return the same pair of strings except with\n their differences highlighted in the specified color. Strings are\n highlighted intelligently to show differences; other values are\n stringified and highlighted in their entirety.\n \"\"\"\n if not isinstance(a, six.string_types) \\\n or not isinstance(b, six.string_types):\n # Non-strings: use ordinary equality.\n a = six.text_type(a)\n b = six.text_type(b)\n if a == b:\n return a, b\n else:\n return colorize(highlight, a), colorize(highlight, b)\n\n if isinstance(a, bytes) or isinstance(b, bytes):\n # A path field.\n a = util.displayable_path(a)\n b = util.displayable_path(b)\n\n a_out = []\n b_out = []\n\n matcher = SequenceMatcher(lambda x: False, a, b)\n for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():\n if op == 'equal':\n # In both strings.\n a_out.append(a[a_start:a_end])\n b_out.append(b[b_start:b_end])\n elif op == 'insert':\n # Right only.\n b_out.append(colorize(highlight, b[b_start:b_end]))\n elif op == 'delete':\n # Left only.\n a_out.append(colorize(highlight, a[a_start:a_end]))\n elif op == 'replace':\n # Right and left differ. Colorise with second highlight if\n # it's just a case change.\n if a[a_start:a_end].lower() != b[b_start:b_end].lower():\n color = highlight\n else:\n color = minor_highlight\n a_out.append(colorize(color, a[a_start:a_end]))\n b_out.append(colorize(color, b[b_start:b_end]))\n else:\n assert(False)\n\n return u''.join(a_out), u''.join(b_out)\n\n\ndef colordiff(a, b, highlight='text_highlight'):\n \"\"\"Colorize differences between two values if color is enabled.\n (Like _colordiff but conditional.)\n \"\"\"\n if config['ui']['color']:\n return _colordiff(a, b, highlight)\n else:\n return six.text_type(a), six.text_type(b)\n\n\ndef get_path_formats(subview=None):\n \"\"\"Get the configuration's path formats as a list of query/template\n pairs.\n \"\"\"\n path_formats = []\n subview = subview or config['paths']\n for query, view in subview.items():\n query = PF_KEY_QUERIES.get(query, query) # Expand common queries.\n path_formats.append((query, Template(view.as_str())))\n return path_formats\n\n\ndef get_replacements():\n \"\"\"Confit validation function that reads regex/string pairs.\n \"\"\"\n replacements = []\n for pattern, repl in config['replace'].get(dict).items():\n repl = repl or ''\n try:\n replacements.append((re.compile(pattern), repl))\n except re.error:\n raise UserError(\n u'malformed regular expression in replace: {0}'.format(\n pattern\n )\n )\n return replacements\n\n\ndef term_width():\n \"\"\"Get the width (columns) of the terminal.\"\"\"\n fallback = config['ui']['terminal_width'].get(int)\n\n # The fcntl and termios modules are not available on non-Unix\n # platforms, so we fall back to a constant.\n try:\n import fcntl\n import termios\n except ImportError:\n return fallback\n\n try:\n buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ' * 4)\n except IOError:\n return fallback\n try:\n height, width = struct.unpack('hh', buf)\n except struct.error:\n return fallback\n return width\n\n\nFLOAT_EPSILON = 0.01\n\n\ndef _field_diff(field, old, new):\n \"\"\"Given two Model objects, format their values for `field` and\n highlight changes among them. Return a human-readable string. If the\n value has not changed, return None instead.\n \"\"\"\n oldval = old.get(field)\n newval = new.get(field)\n\n # If no change, abort.\n if isinstance(oldval, float) and isinstance(newval, float) and \\\n abs(oldval - newval) < FLOAT_EPSILON:\n return None\n elif oldval == newval:\n return None\n\n # Get formatted values for output.\n oldstr = old.formatted().get(field, u'')\n newstr = new.formatted().get(field, u'')\n\n # For strings, highlight changes. For others, colorize the whole\n # thing.\n if isinstance(oldval, six.string_types):\n oldstr, newstr = colordiff(oldval, newstr)\n else:\n oldstr = colorize('text_error', oldstr)\n newstr = colorize('text_error', newstr)\n\n return u'{0} -> {1}'.format(oldstr, newstr)\n\n\ndef show_model_changes(new, old=None, fields=None, always=False):\n \"\"\"Given a Model object, print a list of changes from its pristine\n version stored in the database. Return a boolean indicating whether\n any changes were found.\n\n `old` may be the \"original\" object to avoid using the pristine\n version from the database. `fields` may be a list of fields to\n restrict the detection to. `always` indicates whether the object is\n always identified, regardless of whether any changes are present.\n \"\"\"\n old = old or new._db._get(type(new), new.id)\n\n # Build up lines showing changed fields.\n changes = []\n for field in old:\n # Subset of the fields. Never show mtime.\n if field == 'mtime' or (fields and field not in fields):\n continue\n\n # Detect and show difference for this field.\n line = _field_diff(field, old, new)\n if line:\n changes.append(u' {0}: {1}'.format(field, line))\n\n # New fields.\n for field in set(new) - set(old):\n if fields and field not in fields:\n continue\n\n changes.append(u' {0}: {1}'.format(\n field,\n colorize('text_highlight', new.formatted()[field])\n ))\n\n # Print changes.\n if changes or always:\n print_(format(old))\n if changes:\n print_(u'\\n'.join(changes))\n\n return bool(changes)\n\n\ndef show_path_changes(path_changes):\n \"\"\"Given a list of tuples (source, destination) that indicate the\n path changes, log the changes as INFO-level output to the beets log.\n The output is guaranteed to be unicode.\n\n Every pair is shown on a single line if the terminal width permits it,\n else it is split over two lines. E.g.,\n\n Source -> Destination\n\n vs.\n\n Source\n -> Destination\n \"\"\"\n sources, destinations = zip(*path_changes)\n\n # Ensure unicode output\n sources = list(map(util.displayable_path, sources))\n destinations = list(map(util.displayable_path, destinations))\n\n # Calculate widths for terminal split\n col_width = (term_width() - len(' -> ')) // 2\n max_width = len(max(sources + destinations, key=len))\n\n if max_width > col_width:\n # Print every change over two lines\n for source, dest in zip(sources, destinations):\n log.info(u'{0} \\n -> {1}', source, dest)\n else:\n # Print every change on a single line, and add a header\n title_pad = max_width - len('Source ') + len(' -> ')\n\n log.info(u'Source {0} Destination', ' ' * title_pad)\n for source, dest in zip(sources, destinations):\n pad = max_width - len(source)\n log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest)\n\n\n# Helper functions for option parsing.\n\ndef _store_dict(option, opt_str, value, parser):\n \"\"\"Custom action callback to parse options which have ``key=value``\n pairs as values. All such pairs passed for this option are\n aggregated into a dictionary.\n \"\"\"\n dest = option.dest\n option_values = getattr(parser.values, dest, None)\n\n if option_values is None:\n # This is the first supplied ``key=value`` pair of option.\n # Initialize empty dictionary and get a reference to it.\n setattr(parser.values, dest, dict())\n option_values = getattr(parser.values, dest)\n\n try:\n key, value = map(lambda s: util.text_string(s), value.split('='))\n if not (key and value):\n raise ValueError\n except ValueError:\n raise UserError(\n \"supplied argument `{0}' is not of the form `key=value'\"\n .format(value))\n\n option_values[key] = value\n\n\nclass CommonOptionsParser(optparse.OptionParser, object):\n \"\"\"Offers a simple way to add common formatting options.\n\n Options available include:\n - matching albums instead of tracks: add_album_option()\n - showing paths instead of items/albums: add_path_option()\n - changing the format of displayed items/albums: add_format_option()\n\n The last one can have several behaviors:\n - against a special target\n - with a certain format\n - autodetected target with the album option\n\n Each method is fully documented in the related method.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(CommonOptionsParser, self).__init__(*args, **kwargs)\n self._album_flags = False\n # this serves both as an indicator that we offer the feature AND allows\n # us to check whether it has been specified on the CLI - bypassing the\n # fact that arguments may be in any order\n\n def add_album_option(self, flags=('-a', '--album')):\n \"\"\"Add a -a/--album option to match albums instead of tracks.\n\n If used then the format option can auto-detect whether we're setting\n the format for items or albums.\n Sets the album property on the options extracted from the CLI.\n \"\"\"\n album = optparse.Option(*flags, action='store_true',\n help=u'match albums instead of tracks')\n self.add_option(album)\n self._album_flags = set(flags)\n\n def _set_format(self, option, opt_str, value, parser, target=None,\n fmt=None, store_true=False):\n \"\"\"Internal callback that sets the correct format while parsing CLI\n arguments.\n \"\"\"\n if store_true:\n setattr(parser.values, option.dest, True)\n\n # Use the explicitly specified format, or the string from the option.\n if fmt:\n value = fmt\n elif value:\n value, = decargs([value])\n else:\n value = u''\n\n parser.values.format = value\n if target:\n config[target._format_config_key].set(value)\n else:\n if self._album_flags:\n if parser.values.album:\n target = library.Album\n else:\n # the option is either missing either not parsed yet\n if self._album_flags & set(parser.rargs):\n target = library.Album\n else:\n target = library.Item\n config[target._format_config_key].set(value)\n else:\n config[library.Item._format_config_key].set(value)\n config[library.Album._format_config_key].set(value)\n\n def add_path_option(self, flags=('-p', '--path')):\n \"\"\"Add a -p/--path option to display the path instead of the default\n format.\n\n By default this affects both items and albums. If add_album_option()\n is used then the target will be autodetected.\n\n Sets the format property to u'$path' on the options extracted from the\n CLI.\n \"\"\"\n path = optparse.Option(*flags, nargs=0, action='callback',\n callback=self._set_format,\n callback_kwargs={'fmt': u'$path',\n 'store_true': True},\n help=u'print paths for matched items or albums')\n self.add_option(path)\n\n def add_format_option(self, flags=('-f', '--format'), target=None):\n \"\"\"Add -f/--format option to print some LibModel instances with a\n custom format.\n\n `target` is optional and can be one of ``library.Item``, 'item',\n ``library.Album`` and 'album'.\n\n Several behaviors are available:\n - if `target` is given then the format is only applied to that\n LibModel\n - if the album option is used then the target will be autodetected\n - otherwise the format is applied to both items and albums.\n\n Sets the format property on the options extracted from the CLI.\n \"\"\"\n kwargs = {}\n if target:\n if isinstance(target, six.string_types):\n target = {'item': library.Item,\n 'album': library.Album}[target]\n kwargs['target'] = target\n\n opt = optparse.Option(*flags, action='callback',\n callback=self._set_format,\n callback_kwargs=kwargs,\n help=u'print with custom format')\n self.add_option(opt)\n\n def add_all_common_options(self):\n \"\"\"Add album, path and format options.\n \"\"\"\n self.add_album_option()\n self.add_path_option()\n self.add_format_option()\n\n\n# Subcommand parsing infrastructure.\n#\n# This is a fairly generic subcommand parser for optparse. It is\n# maintained externally here:\n# http://gist.github.com/462717\n# There you will also find a better description of the code and a more\n# succinct example program.\n\nclass Subcommand(object):\n \"\"\"A subcommand of a root command-line application that may be\n invoked by a SubcommandOptionParser.\n \"\"\"\n def __init__(self, name, parser=None, help='', aliases=(), hide=False):\n \"\"\"Creates a new subcommand. name is the primary way to invoke\n the subcommand; aliases are alternate names. parser is an\n OptionParser responsible for parsing the subcommand's options.\n help is a short description of the command. If no parser is\n given, it defaults to a new, empty CommonOptionsParser.\n \"\"\"\n self.name = name\n self.parser = parser or CommonOptionsParser()\n self.aliases = aliases\n self.help = help\n self.hide = hide\n self._root_parser = None\n\n def print_help(self):\n self.parser.print_help()\n\n def parse_args(self, args):\n return self.parser.parse_args(args)\n\n @property\n def root_parser(self):\n return self._root_parser\n\n @root_parser.setter\n def root_parser(self, root_parser):\n self._root_parser = root_parser\n self.parser.prog = '{0} {1}'.format(\n as_string(root_parser.get_prog_name()), self.name)\n\n\nclass SubcommandsOptionParser(CommonOptionsParser):\n \"\"\"A variant of OptionParser that parses subcommands and their\n arguments.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a new subcommand-aware option parser. All of the\n options to OptionParser.__init__ are supported in addition\n to subcommands, a sequence of Subcommand objects.\n \"\"\"\n # A more helpful default usage.\n if 'usage' not in kwargs:\n kwargs['usage'] = u\"\"\"\n %prog COMMAND [ARGS...]\n %prog help COMMAND\"\"\"\n kwargs['add_help_option'] = False\n\n # Super constructor.\n super(SubcommandsOptionParser, self).__init__(*args, **kwargs)\n\n # Our root parser needs to stop on the first unrecognized argument.\n self.disable_interspersed_args()\n\n self.subcommands = []\n\n def add_subcommand(self, *cmds):\n \"\"\"Adds a Subcommand object to the parser's list of commands.\n \"\"\"\n for cmd in cmds:\n cmd.root_parser = self\n self.subcommands.append(cmd)\n\n # Add the list of subcommands to the help message.\n def format_help(self, formatter=None):\n # Get the original help message, to which we will append.\n out = super(SubcommandsOptionParser, self).format_help(formatter)\n if formatter is None:\n formatter = self.formatter\n\n # Subcommands header.\n result = [\"\\n\"]\n result.append(formatter.format_heading('Commands'))\n formatter.indent()\n\n # Generate the display names (including aliases).\n # Also determine the help position.\n disp_names = []\n help_position = 0\n subcommands = [c for c in self.subcommands if not c.hide]\n subcommands.sort(key=lambda c: c.name)\n for subcommand in subcommands:\n name = subcommand.name\n if subcommand.aliases:\n name += ' (%s)' % ', '.join(subcommand.aliases)\n disp_names.append(name)\n\n # Set the help position based on the max width.\n proposed_help_position = len(name) + formatter.current_indent + 2\n if proposed_help_position <= formatter.max_help_position:\n help_position = max(help_position, proposed_help_position)\n\n # Add each subcommand to the output.\n for subcommand, name in zip(subcommands, disp_names):\n # Lifted directly from optparse.py.\n name_width = help_position - formatter.current_indent - 2\n if len(name) > name_width:\n name = \"%*s%s\\n\" % (formatter.current_indent, \"\", name)\n indent_first = help_position\n else:\n name = \"%*s%-*s \" % (formatter.current_indent, \"\",\n name_width, name)\n indent_first = 0\n result.append(name)\n help_width = formatter.width - help_position\n help_lines = textwrap.wrap(subcommand.help, help_width)\n help_line = help_lines[0] if help_lines else ''\n result.append(\"%*s%s\\n\" % (indent_first, \"\", help_line))\n result.extend([\"%*s%s\\n\" % (help_position, \"\", line)\n for line in help_lines[1:]])\n formatter.dedent()\n\n # Concatenate the original help message with the subcommand\n # list.\n return out + \"\".join(result)\n\n def _subcommand_for_name(self, name):\n \"\"\"Return the subcommand in self.subcommands matching the\n given name. The name may either be the name of a subcommand or\n an alias. If no subcommand matches, returns None.\n \"\"\"\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None\n\n def parse_global_options(self, args):\n \"\"\"Parse options up to the subcommand argument. Returns a tuple\n of the options object and the remaining arguments.\n \"\"\"\n options, subargs = self.parse_args(args)\n\n # Force the help command\n if options.help:\n subargs = ['help']\n elif options.version:\n subargs = ['version']\n return options, subargs\n\n def parse_subcommand(self, args):\n \"\"\"Given the `args` left unused by a `parse_global_options`,\n return the invoked subcommand, the subcommand options, and the\n subcommand arguments.\n \"\"\"\n # Help is default command\n if not args:\n args = ['help']\n\n cmdname = args.pop(0)\n subcommand = self._subcommand_for_name(cmdname)\n if not subcommand:\n raise UserError(u\"unknown command '{0}'\".format(cmdname))\n\n suboptions, subargs = subcommand.parse_args(args)\n return subcommand, suboptions, subargs\n\n\noptparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)\n\n\n# The main entry point and bootstrapping.\n\ndef _load_plugins(config):\n \"\"\"Load the plugins specified in the configuration.\n \"\"\"\n paths = config['pluginpath'].as_str_seq(split=False)\n paths = [util.normpath(p) for p in paths]\n log.debug(u'plugin paths: {0}', util.displayable_path(paths))\n\n # On Python 3, the search paths need to be unicode.\n paths = [util.py3_path(p) for p in paths]\n\n # Extend the `beetsplug` package to include the plugin paths.\n import beetsplug\n beetsplug.__path__ = paths + beetsplug.__path__\n\n # For backwards compatibility, also support plugin paths that\n # *contain* a `beetsplug` package.\n sys.path += paths\n\n plugins.load_plugins(config['plugins'].as_str_seq())\n plugins.send(\"pluginload\")\n return plugins\n\n\ndef _setup(options, lib=None):\n \"\"\"Prepare and global state and updates it with command line options.\n\n Returns a list of subcommands, a list of plugins, and a library instance.\n \"\"\"\n # Configure the MusicBrainz API.\n mb.configure()\n\n config = _configure(options)\n\n plugins = _load_plugins(config)\n\n # Get the default subcommands.\n from beets.ui.commands import default_commands\n\n subcommands = list(default_commands)\n subcommands.extend(plugins.commands())\n\n if lib is None:\n lib = _open_library(config)\n plugins.send(\"library_opened\", lib=lib)\n library.Item._types.update(plugins.types(library.Item))\n library.Album._types.update(plugins.types(library.Album))\n\n return subcommands, plugins, lib\n\n\ndef _configure(options):\n \"\"\"Amend the global configuration object with command line options.\n \"\"\"\n # Add any additional config files specified with --config. This\n # special handling lets specified plugins get loaded before we\n # finish parsing the command line.\n if getattr(options, 'config', None) is not None:\n overlay_path = options.config\n del options.config\n config.set_file(overlay_path)\n else:\n overlay_path = None\n config.set_args(options)\n\n # Configure the logger.\n if config['verbose'].get(int):\n log.set_global_level(logging.DEBUG)\n else:\n log.set_global_level(logging.INFO)\n\n if overlay_path:\n log.debug(u'overlaying configuration: {0}',\n util.displayable_path(overlay_path))\n\n config_path = config.user_config_path()\n if os.path.isfile(config_path):\n log.debug(u'user configuration: {0}',\n util.displayable_path(config_path))\n else:\n log.debug(u'no user configuration found at {0}',\n util.displayable_path(config_path))\n\n log.debug(u'data directory: {0}',\n util.displayable_path(config.config_dir()))\n return config\n\n\ndef _open_library(config):\n \"\"\"Create a new library instance from the configuration.\n \"\"\"\n dbpath = util.bytestring_path(config['library'].as_filename())\n try:\n lib = library.Library(\n dbpath,\n config['directory'].as_filename(),\n get_path_formats(),\n get_replacements(),\n )\n lib.get_item(0) # Test database connection.\n except (sqlite3.OperationalError, sqlite3.DatabaseError):\n log.debug(u'{}', traceback.format_exc())\n raise UserError(u\"database file {0} could not be opened\".format(\n util.displayable_path(dbpath)\n ))\n log.debug(u'library database: {0}\\n'\n u'library directory: {1}',\n util.displayable_path(lib.path),\n util.displayable_path(lib.directory))\n return lib\n\n\ndef _raw_main(args, lib=None):\n \"\"\"A helper function for `main` without top-level exception\n handling.\n \"\"\"\n parser = SubcommandsOptionParser()\n parser.add_format_option(flags=('--format-item',), target=library.Item)\n parser.add_format_option(flags=('--format-album',), target=library.Album)\n parser.add_option('-l', '--library', dest='library',\n help=u'library database file to use')\n parser.add_option('-d', '--directory', dest='directory',\n help=u\"destination music directory\")\n parser.add_option('-v', '--verbose', dest='verbose', action='count',\n help=u'log more details (use twice for even more)')\n parser.add_option('-c', '--config', dest='config',\n help=u'path to configuration file')\n parser.add_option('-h', '--help', dest='help', action='store_true',\n help=u'show this help message and exit')\n parser.add_option('--version', dest='version', action='store_true',\n help=optparse.SUPPRESS_HELP)\n\n options, subargs = parser.parse_global_options(args)\n\n # Special case for the `config --edit` command: bypass _setup so\n # that an invalid configuration does not prevent the editor from\n # starting.\n if subargs and subargs[0] == 'config' \\\n and ('-e' in subargs or '--edit' in subargs):\n from beets.ui.commands import config_edit\n return config_edit()\n\n test_lib = bool(lib)\n subcommands, plugins, lib = _setup(options, lib)\n parser.add_subcommand(*subcommands)\n\n subcommand, suboptions, subargs = parser.parse_subcommand(subargs)\n subcommand.func(lib, suboptions, subargs)\n\n plugins.send('cli_exit', lib=lib)\n if not test_lib:\n # Clean up the library unless it came from the test harness.\n lib._close()\n\n\ndef main(args=None):\n \"\"\"Run the main command-line interface for beets. Includes top-level\n exception handlers that print friendly error messages.\n \"\"\"\n try:\n _raw_main(args)\n except UserError as exc:\n message = exc.args[0] if exc.args else None\n log.error(u'error: {0}', message)\n sys.exit(1)\n except util.HumanReadableException as exc:\n exc.log(log)\n sys.exit(1)\n except library.FileOperationError as exc:\n # These errors have reasonable human-readable descriptions, but\n # we still want to log their tracebacks for debugging.\n log.debug('{}', traceback.format_exc())\n log.error('{}', exc)\n sys.exit(1)\n except confit.ConfigError as exc:\n log.error(u'configuration error: {0}', exc)\n sys.exit(1)\n except db_query.InvalidQueryError as exc:\n log.error(u'invalid query: {0}', exc)\n sys.exit(1)\n except IOError as exc:\n if exc.errno == errno.EPIPE:\n # \"Broken pipe\". End silently.\n pass\n else:\n raise\n except KeyboardInterrupt:\n # Silently ignore ^C except in verbose mode.\n log.debug(u'{}', traceback.format_exc())\n except db.DBAccessError as exc:\n log.error(\n u'database access error: {0}\\n'\n u'the library file might have a permissions problem',\n exc\n )\n sys.exit(1)\n", "path": "beets/ui/__init__.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"This module contains all of the core logic for beets' command-line\ninterface. To invoke the CLI, just call beets.ui.main(). The actual\nCLI commands are implemented in the ui.commands module.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport optparse\nimport textwrap\nimport sys\nfrom difflib import SequenceMatcher\nimport sqlite3\nimport errno\nimport re\nimport struct\nimport traceback\nimport os.path\nfrom six.moves import input\n\nfrom beets import logging\nfrom beets import library\nfrom beets import plugins\nfrom beets import util\nfrom beets.util.functemplate import Template\nfrom beets import config\nfrom beets.util import confit, as_string\nfrom beets.autotag import mb\nfrom beets.dbcore import query as db_query\nfrom beets.dbcore import db\nimport six\n\n# On Windows platforms, use colorama to support \"ANSI\" terminal colors.\nif sys.platform == 'win32':\n try:\n import colorama\n except ImportError:\n pass\n else:\n colorama.init()\n\n\nlog = logging.getLogger('beets')\nif not log.handlers:\n log.addHandler(logging.StreamHandler())\nlog.propagate = False # Don't propagate to root handler.\n\n\nPF_KEY_QUERIES = {\n 'comp': u'comp:true',\n 'singleton': u'singleton:true',\n}\n\n\nclass UserError(Exception):\n \"\"\"UI exception. Commands should throw this in order to display\n nonrecoverable errors to the user.\n \"\"\"\n\n\n# Encoding utilities.\n\n\ndef _in_encoding():\n \"\"\"Get the encoding to use for *inputting* strings from the console.\n \"\"\"\n return _stream_encoding(sys.stdin)\n\n\ndef _out_encoding():\n \"\"\"Get the encoding to use for *outputting* strings to the console.\n \"\"\"\n return _stream_encoding(sys.stdout)\n\n\ndef _stream_encoding(stream, default='utf-8'):\n \"\"\"A helper for `_in_encoding` and `_out_encoding`: get the stream's\n preferred encoding, using a configured override or a default\n fallback if neither is not specified.\n \"\"\"\n # Configured override?\n encoding = config['terminal_encoding'].get()\n if encoding:\n return encoding\n\n # For testing: When sys.stdout or sys.stdin is a StringIO under the\n # test harness, it doesn't have an `encoding` attribute. Just use\n # UTF-8.\n if not hasattr(stream, 'encoding'):\n return default\n\n # Python's guessed output stream encoding, or UTF-8 as a fallback\n # (e.g., when piped to a file).\n return stream.encoding or default\n\n\ndef decargs(arglist):\n \"\"\"Given a list of command-line argument bytestrings, attempts to\n decode them to Unicode strings when running under Python 2.\n \"\"\"\n if six.PY2:\n return [s.decode(util.arg_encoding()) for s in arglist]\n else:\n return arglist\n\n\ndef print_(*strings, **kwargs):\n \"\"\"Like print, but rather than raising an error when a character\n is not in the terminal's encoding's character set, just silently\n replaces it.\n\n The arguments must be Unicode strings: `unicode` on Python 2; `str` on\n Python 3.\n\n The `end` keyword argument behaves similarly to the built-in `print`\n (it defaults to a newline).\n \"\"\"\n if not strings:\n strings = [u'']\n assert isinstance(strings[0], six.text_type)\n\n txt = u' '.join(strings)\n txt += kwargs.get('end', u'\\n')\n\n # Encode the string and write it to stdout.\n if six.PY2:\n # On Python 2, sys.stdout expects bytes.\n out = txt.encode(_out_encoding(), 'replace')\n sys.stdout.write(out)\n else:\n # On Python 3, sys.stdout expects text strings and uses the\n # exception-throwing encoding error policy. To avoid throwing\n # errors and use our configurable encoding override, we use the\n # underlying bytes buffer instead.\n if hasattr(sys.stdout, 'buffer'):\n out = txt.encode(_out_encoding(), 'replace')\n sys.stdout.buffer.write(out)\n sys.stdout.buffer.flush()\n else:\n # In our test harnesses (e.g., DummyOut), sys.stdout.buffer\n # does not exist. We instead just record the text string.\n sys.stdout.write(txt)\n\n\n# Configuration wrappers.\n\ndef _bool_fallback(a, b):\n \"\"\"Given a boolean or None, return the original value or a fallback.\n \"\"\"\n if a is None:\n assert isinstance(b, bool)\n return b\n else:\n assert isinstance(a, bool)\n return a\n\n\ndef should_write(write_opt=None):\n \"\"\"Decide whether a command that updates metadata should also write\n tags, using the importer configuration as the default.\n \"\"\"\n return _bool_fallback(write_opt, config['import']['write'].get(bool))\n\n\ndef should_move(move_opt=None):\n \"\"\"Decide whether a command that updates metadata should also move\n files when they're inside the library, using the importer\n configuration as the default.\n\n Specifically, commands should move files after metadata updates only\n when the importer is configured *either* to move *or* to copy files.\n They should avoid moving files when the importer is configured not\n to touch any filenames.\n \"\"\"\n return _bool_fallback(\n move_opt,\n config['import']['move'].get(bool) or\n config['import']['copy'].get(bool)\n )\n\n\n# Input prompts.\n\ndef input_(prompt=None):\n \"\"\"Like `input`, but decodes the result to a Unicode string.\n Raises a UserError if stdin is not available. The prompt is sent to\n stdout rather than stderr. A printed between the prompt and the\n input cursor.\n \"\"\"\n # raw_input incorrectly sends prompts to stderr, not stdout, so we\n # use print_() explicitly to display prompts.\n # http://bugs.python.org/issue1927\n if prompt:\n print_(prompt, end=u' ')\n\n try:\n resp = input()\n except EOFError:\n raise UserError(u'stdin stream ended while input required')\n\n if six.PY2:\n return resp.decode(_in_encoding(), 'ignore')\n else:\n return resp\n\n\ndef input_options(options, require=False, prompt=None, fallback_prompt=None,\n numrange=None, default=None, max_width=72):\n \"\"\"Prompts a user for input. The sequence of `options` defines the\n choices the user has. A single-letter shortcut is inferred for each\n option; the user's choice is returned as that single, lower-case\n letter. The options should be provided as lower-case strings unless\n a particular shortcut is desired; in that case, only that letter\n should be capitalized.\n\n By default, the first option is the default. `default` can be provided to\n override this. If `require` is provided, then there is no default. The\n prompt and fallback prompt are also inferred but can be overridden.\n\n If numrange is provided, it is a pair of `(high, low)` (both ints)\n indicating that, in addition to `options`, the user may enter an\n integer in that inclusive range.\n\n `max_width` specifies the maximum number of columns in the\n automatically generated prompt string.\n \"\"\"\n # Assign single letters to each option. Also capitalize the options\n # to indicate the letter.\n letters = {}\n display_letters = []\n capitalized = []\n first = True\n for option in options:\n # Is a letter already capitalized?\n for letter in option:\n if letter.isalpha() and letter.upper() == letter:\n found_letter = letter\n break\n else:\n # Infer a letter.\n for letter in option:\n if not letter.isalpha():\n continue # Don't use punctuation.\n if letter not in letters:\n found_letter = letter\n break\n else:\n raise ValueError(u'no unambiguous lettering found')\n\n letters[found_letter.lower()] = option\n index = option.index(found_letter)\n\n # Mark the option's shortcut letter for display.\n if not require and (\n (default is None and not numrange and first) or\n (isinstance(default, six.string_types) and\n found_letter.lower() == default.lower())):\n # The first option is the default; mark it.\n show_letter = '[%s]' % found_letter.upper()\n is_default = True\n else:\n show_letter = found_letter.upper()\n is_default = False\n\n # Colorize the letter shortcut.\n show_letter = colorize('action_default' if is_default else 'action',\n show_letter)\n\n # Insert the highlighted letter back into the word.\n capitalized.append(\n option[:index] + show_letter + option[index + 1:]\n )\n display_letters.append(found_letter.upper())\n\n first = False\n\n # The default is just the first option if unspecified.\n if require:\n default = None\n elif default is None:\n if numrange:\n default = numrange[0]\n else:\n default = display_letters[0].lower()\n\n # Make a prompt if one is not provided.\n if not prompt:\n prompt_parts = []\n prompt_part_lengths = []\n if numrange:\n if isinstance(default, int):\n default_name = six.text_type(default)\n default_name = colorize('action_default', default_name)\n tmpl = '# selection (default %s)'\n prompt_parts.append(tmpl % default_name)\n prompt_part_lengths.append(len(tmpl % six.text_type(default)))\n else:\n prompt_parts.append('# selection')\n prompt_part_lengths.append(len(prompt_parts[-1]))\n prompt_parts += capitalized\n prompt_part_lengths += [len(s) for s in options]\n\n # Wrap the query text.\n prompt = ''\n line_length = 0\n for i, (part, length) in enumerate(zip(prompt_parts,\n prompt_part_lengths)):\n # Add punctuation.\n if i == len(prompt_parts) - 1:\n part += '?'\n else:\n part += ','\n length += 1\n\n # Choose either the current line or the beginning of the next.\n if line_length + length + 1 > max_width:\n prompt += '\\n'\n line_length = 0\n\n if line_length != 0:\n # Not the beginning of the line; need a space.\n part = ' ' + part\n length += 1\n\n prompt += part\n line_length += length\n\n # Make a fallback prompt too. This is displayed if the user enters\n # something that is not recognized.\n if not fallback_prompt:\n fallback_prompt = u'Enter one of '\n if numrange:\n fallback_prompt += u'%i-%i, ' % numrange\n fallback_prompt += ', '.join(display_letters) + ':'\n\n resp = input_(prompt)\n while True:\n resp = resp.strip().lower()\n\n # Try default option.\n if default is not None and not resp:\n resp = default\n\n # Try an integer input if available.\n if numrange:\n try:\n resp = int(resp)\n except ValueError:\n pass\n else:\n low, high = numrange\n if low <= resp <= high:\n return resp\n else:\n resp = None\n\n # Try a normal letter input.\n if resp:\n resp = resp[0]\n if resp in letters:\n return resp\n\n # Prompt for new input.\n resp = input_(fallback_prompt)\n\n\ndef input_yn(prompt, require=False):\n \"\"\"Prompts the user for a \"yes\" or \"no\" response. The default is\n \"yes\" unless `require` is `True`, in which case there is no default.\n \"\"\"\n sel = input_options(\n ('y', 'n'), require, prompt, u'Enter Y or N:'\n )\n return sel == u'y'\n\n\ndef input_select_objects(prompt, objs, rep):\n \"\"\"Prompt to user to choose all, none, or some of the given objects.\n Return the list of selected objects.\n\n `prompt` is the prompt string to use for each question (it should be\n phrased as an imperative verb). `rep` is a function to call on each\n object to print it out when confirming objects individually.\n \"\"\"\n choice = input_options(\n (u'y', u'n', u's'), False,\n u'%s? (Yes/no/select)' % prompt)\n print() # Blank line.\n\n if choice == u'y': # Yes.\n return objs\n\n elif choice == u's': # Select.\n out = []\n for obj in objs:\n rep(obj)\n if input_yn(u'%s? (yes/no)' % prompt, True):\n out.append(obj)\n print() # go to a new line\n return out\n\n else: # No.\n return []\n\n\n# Human output formatting.\n\ndef human_bytes(size):\n \"\"\"Formats size, a number of bytes, in a human-readable way.\"\"\"\n powers = [u'', u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y', u'H']\n unit = 'B'\n for power in powers:\n if size < 1024:\n return u\"%3.1f %s%s\" % (size, power, unit)\n size /= 1024.0\n unit = u'iB'\n return u\"big\"\n\n\ndef human_seconds(interval):\n \"\"\"Formats interval, a number of seconds, as a human-readable time\n interval using English words.\n \"\"\"\n units = [\n (1, u'second'),\n (60, u'minute'),\n (60, u'hour'),\n (24, u'day'),\n (7, u'week'),\n (52, u'year'),\n (10, u'decade'),\n ]\n for i in range(len(units) - 1):\n increment, suffix = units[i]\n next_increment, _ = units[i + 1]\n interval /= float(increment)\n if interval < next_increment:\n break\n else:\n # Last unit.\n increment, suffix = units[-1]\n interval /= float(increment)\n\n return u\"%3.1f %ss\" % (interval, suffix)\n\n\ndef human_seconds_short(interval):\n \"\"\"Formats a number of seconds as a short human-readable M:SS\n string.\n \"\"\"\n interval = int(interval)\n return u'%i:%02i' % (interval // 60, interval % 60)\n\n\n# Colorization.\n\n# ANSI terminal colorization code heavily inspired by pygments:\n# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py\n# (pygments is by Tim Hatch, Armin Ronacher, et al.)\nCOLOR_ESCAPE = \"\\x1b[\"\nDARK_COLORS = {\n \"black\": 0,\n \"darkred\": 1,\n \"darkgreen\": 2,\n \"brown\": 3,\n \"darkyellow\": 3,\n \"darkblue\": 4,\n \"purple\": 5,\n \"darkmagenta\": 5,\n \"teal\": 6,\n \"darkcyan\": 6,\n \"lightgray\": 7\n}\nLIGHT_COLORS = {\n \"darkgray\": 0,\n \"red\": 1,\n \"green\": 2,\n \"yellow\": 3,\n \"blue\": 4,\n \"fuchsia\": 5,\n \"magenta\": 5,\n \"turquoise\": 6,\n \"cyan\": 6,\n \"white\": 7\n}\nRESET_COLOR = COLOR_ESCAPE + \"39;49;00m\"\n\n# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS\n# as they are defined in the configuration files, see function: colorize\nCOLOR_NAMES = ['text_success', 'text_warning', 'text_error', 'text_highlight',\n 'text_highlight_minor', 'action_default', 'action']\nCOLORS = None\n\n\ndef _colorize(color, text):\n \"\"\"Returns a string that prints the given text in the given color\n in a terminal that is ANSI color-aware. The color must be something\n in DARK_COLORS or LIGHT_COLORS.\n \"\"\"\n if color in DARK_COLORS:\n escape = COLOR_ESCAPE + \"%im\" % (DARK_COLORS[color] + 30)\n elif color in LIGHT_COLORS:\n escape = COLOR_ESCAPE + \"%i;01m\" % (LIGHT_COLORS[color] + 30)\n else:\n raise ValueError(u'no such color %s', color)\n return escape + text + RESET_COLOR\n\n\ndef colorize(color_name, text):\n \"\"\"Colorize text if colored output is enabled. (Like _colorize but\n conditional.)\n \"\"\"\n if config['ui']['color']:\n global COLORS\n if not COLORS:\n COLORS = dict((name,\n config['ui']['colors'][name].as_str())\n for name in COLOR_NAMES)\n # In case a 3rd party plugin is still passing the actual color ('red')\n # instead of the abstract color name ('text_error')\n color = COLORS.get(color_name)\n if not color:\n log.debug(u'Invalid color_name: {0}', color_name)\n color = color_name\n return _colorize(color, text)\n else:\n return text\n\n\ndef _colordiff(a, b, highlight='text_highlight',\n minor_highlight='text_highlight_minor'):\n \"\"\"Given two values, return the same pair of strings except with\n their differences highlighted in the specified color. Strings are\n highlighted intelligently to show differences; other values are\n stringified and highlighted in their entirety.\n \"\"\"\n if not isinstance(a, six.string_types) \\\n or not isinstance(b, six.string_types):\n # Non-strings: use ordinary equality.\n a = six.text_type(a)\n b = six.text_type(b)\n if a == b:\n return a, b\n else:\n return colorize(highlight, a), colorize(highlight, b)\n\n if isinstance(a, bytes) or isinstance(b, bytes):\n # A path field.\n a = util.displayable_path(a)\n b = util.displayable_path(b)\n\n a_out = []\n b_out = []\n\n matcher = SequenceMatcher(lambda x: False, a, b)\n for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():\n if op == 'equal':\n # In both strings.\n a_out.append(a[a_start:a_end])\n b_out.append(b[b_start:b_end])\n elif op == 'insert':\n # Right only.\n b_out.append(colorize(highlight, b[b_start:b_end]))\n elif op == 'delete':\n # Left only.\n a_out.append(colorize(highlight, a[a_start:a_end]))\n elif op == 'replace':\n # Right and left differ. Colorise with second highlight if\n # it's just a case change.\n if a[a_start:a_end].lower() != b[b_start:b_end].lower():\n color = highlight\n else:\n color = minor_highlight\n a_out.append(colorize(color, a[a_start:a_end]))\n b_out.append(colorize(color, b[b_start:b_end]))\n else:\n assert(False)\n\n return u''.join(a_out), u''.join(b_out)\n\n\ndef colordiff(a, b, highlight='text_highlight'):\n \"\"\"Colorize differences between two values if color is enabled.\n (Like _colordiff but conditional.)\n \"\"\"\n if config['ui']['color']:\n return _colordiff(a, b, highlight)\n else:\n return six.text_type(a), six.text_type(b)\n\n\ndef get_path_formats(subview=None):\n \"\"\"Get the configuration's path formats as a list of query/template\n pairs.\n \"\"\"\n path_formats = []\n subview = subview or config['paths']\n for query, view in subview.items():\n query = PF_KEY_QUERIES.get(query, query) # Expand common queries.\n path_formats.append((query, Template(view.as_str())))\n return path_formats\n\n\ndef get_replacements():\n \"\"\"Confit validation function that reads regex/string pairs.\n \"\"\"\n replacements = []\n for pattern, repl in config['replace'].get(dict).items():\n repl = repl or ''\n try:\n replacements.append((re.compile(pattern), repl))\n except re.error:\n raise UserError(\n u'malformed regular expression in replace: {0}'.format(\n pattern\n )\n )\n return replacements\n\n\ndef term_width():\n \"\"\"Get the width (columns) of the terminal.\"\"\"\n fallback = config['ui']['terminal_width'].get(int)\n\n # The fcntl and termios modules are not available on non-Unix\n # platforms, so we fall back to a constant.\n try:\n import fcntl\n import termios\n except ImportError:\n return fallback\n\n try:\n buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ' * 4)\n except IOError:\n return fallback\n try:\n height, width = struct.unpack('hh', buf)\n except struct.error:\n return fallback\n return width\n\n\nFLOAT_EPSILON = 0.01\n\n\ndef _field_diff(field, old, new):\n \"\"\"Given two Model objects, format their values for `field` and\n highlight changes among them. Return a human-readable string. If the\n value has not changed, return None instead.\n \"\"\"\n oldval = old.get(field)\n newval = new.get(field)\n\n # If no change, abort.\n if isinstance(oldval, float) and isinstance(newval, float) and \\\n abs(oldval - newval) < FLOAT_EPSILON:\n return None\n elif oldval == newval:\n return None\n\n # Get formatted values for output.\n oldstr = old.formatted().get(field, u'')\n newstr = new.formatted().get(field, u'')\n\n # For strings, highlight changes. For others, colorize the whole\n # thing.\n if isinstance(oldval, six.string_types):\n oldstr, newstr = colordiff(oldval, newstr)\n else:\n oldstr = colorize('text_error', oldstr)\n newstr = colorize('text_error', newstr)\n\n return u'{0} -> {1}'.format(oldstr, newstr)\n\n\ndef show_model_changes(new, old=None, fields=None, always=False):\n \"\"\"Given a Model object, print a list of changes from its pristine\n version stored in the database. Return a boolean indicating whether\n any changes were found.\n\n `old` may be the \"original\" object to avoid using the pristine\n version from the database. `fields` may be a list of fields to\n restrict the detection to. `always` indicates whether the object is\n always identified, regardless of whether any changes are present.\n \"\"\"\n old = old or new._db._get(type(new), new.id)\n\n # Build up lines showing changed fields.\n changes = []\n for field in old:\n # Subset of the fields. Never show mtime.\n if field == 'mtime' or (fields and field not in fields):\n continue\n\n # Detect and show difference for this field.\n line = _field_diff(field, old, new)\n if line:\n changes.append(u' {0}: {1}'.format(field, line))\n\n # New fields.\n for field in set(new) - set(old):\n if fields and field not in fields:\n continue\n\n changes.append(u' {0}: {1}'.format(\n field,\n colorize('text_highlight', new.formatted()[field])\n ))\n\n # Print changes.\n if changes or always:\n print_(format(old))\n if changes:\n print_(u'\\n'.join(changes))\n\n return bool(changes)\n\n\ndef show_path_changes(path_changes):\n \"\"\"Given a list of tuples (source, destination) that indicate the\n path changes, log the changes as INFO-level output to the beets log.\n The output is guaranteed to be unicode.\n\n Every pair is shown on a single line if the terminal width permits it,\n else it is split over two lines. E.g.,\n\n Source -> Destination\n\n vs.\n\n Source\n -> Destination\n \"\"\"\n sources, destinations = zip(*path_changes)\n\n # Ensure unicode output\n sources = list(map(util.displayable_path, sources))\n destinations = list(map(util.displayable_path, destinations))\n\n # Calculate widths for terminal split\n col_width = (term_width() - len(' -> ')) // 2\n max_width = len(max(sources + destinations, key=len))\n\n if max_width > col_width:\n # Print every change over two lines\n for source, dest in zip(sources, destinations):\n log.info(u'{0} \\n -> {1}', source, dest)\n else:\n # Print every change on a single line, and add a header\n title_pad = max_width - len('Source ') + len(' -> ')\n\n log.info(u'Source {0} Destination', ' ' * title_pad)\n for source, dest in zip(sources, destinations):\n pad = max_width - len(source)\n log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest)\n\n\n# Helper functions for option parsing.\n\ndef _store_dict(option, opt_str, value, parser):\n \"\"\"Custom action callback to parse options which have ``key=value``\n pairs as values. All such pairs passed for this option are\n aggregated into a dictionary.\n \"\"\"\n dest = option.dest\n option_values = getattr(parser.values, dest, None)\n\n if option_values is None:\n # This is the first supplied ``key=value`` pair of option.\n # Initialize empty dictionary and get a reference to it.\n setattr(parser.values, dest, dict())\n option_values = getattr(parser.values, dest)\n\n try:\n key, value = map(lambda s: util.text_string(s), value.split('='))\n if not (key and value):\n raise ValueError\n except ValueError:\n raise UserError(\n \"supplied argument `{0}' is not of the form `key=value'\"\n .format(value))\n\n option_values[key] = value\n\n\nclass CommonOptionsParser(optparse.OptionParser, object):\n \"\"\"Offers a simple way to add common formatting options.\n\n Options available include:\n - matching albums instead of tracks: add_album_option()\n - showing paths instead of items/albums: add_path_option()\n - changing the format of displayed items/albums: add_format_option()\n\n The last one can have several behaviors:\n - against a special target\n - with a certain format\n - autodetected target with the album option\n\n Each method is fully documented in the related method.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(CommonOptionsParser, self).__init__(*args, **kwargs)\n self._album_flags = False\n # this serves both as an indicator that we offer the feature AND allows\n # us to check whether it has been specified on the CLI - bypassing the\n # fact that arguments may be in any order\n\n def add_album_option(self, flags=('-a', '--album')):\n \"\"\"Add a -a/--album option to match albums instead of tracks.\n\n If used then the format option can auto-detect whether we're setting\n the format for items or albums.\n Sets the album property on the options extracted from the CLI.\n \"\"\"\n album = optparse.Option(*flags, action='store_true',\n help=u'match albums instead of tracks')\n self.add_option(album)\n self._album_flags = set(flags)\n\n def _set_format(self, option, opt_str, value, parser, target=None,\n fmt=None, store_true=False):\n \"\"\"Internal callback that sets the correct format while parsing CLI\n arguments.\n \"\"\"\n if store_true:\n setattr(parser.values, option.dest, True)\n\n # Use the explicitly specified format, or the string from the option.\n if fmt:\n value = fmt\n elif value:\n value, = decargs([value])\n else:\n value = u''\n\n parser.values.format = value\n if target:\n config[target._format_config_key].set(value)\n else:\n if self._album_flags:\n if parser.values.album:\n target = library.Album\n else:\n # the option is either missing either not parsed yet\n if self._album_flags & set(parser.rargs):\n target = library.Album\n else:\n target = library.Item\n config[target._format_config_key].set(value)\n else:\n config[library.Item._format_config_key].set(value)\n config[library.Album._format_config_key].set(value)\n\n def add_path_option(self, flags=('-p', '--path')):\n \"\"\"Add a -p/--path option to display the path instead of the default\n format.\n\n By default this affects both items and albums. If add_album_option()\n is used then the target will be autodetected.\n\n Sets the format property to u'$path' on the options extracted from the\n CLI.\n \"\"\"\n path = optparse.Option(*flags, nargs=0, action='callback',\n callback=self._set_format,\n callback_kwargs={'fmt': u'$path',\n 'store_true': True},\n help=u'print paths for matched items or albums')\n self.add_option(path)\n\n def add_format_option(self, flags=('-f', '--format'), target=None):\n \"\"\"Add -f/--format option to print some LibModel instances with a\n custom format.\n\n `target` is optional and can be one of ``library.Item``, 'item',\n ``library.Album`` and 'album'.\n\n Several behaviors are available:\n - if `target` is given then the format is only applied to that\n LibModel\n - if the album option is used then the target will be autodetected\n - otherwise the format is applied to both items and albums.\n\n Sets the format property on the options extracted from the CLI.\n \"\"\"\n kwargs = {}\n if target:\n if isinstance(target, six.string_types):\n target = {'item': library.Item,\n 'album': library.Album}[target]\n kwargs['target'] = target\n\n opt = optparse.Option(*flags, action='callback',\n callback=self._set_format,\n callback_kwargs=kwargs,\n help=u'print with custom format')\n self.add_option(opt)\n\n def add_all_common_options(self):\n \"\"\"Add album, path and format options.\n \"\"\"\n self.add_album_option()\n self.add_path_option()\n self.add_format_option()\n\n\n# Subcommand parsing infrastructure.\n#\n# This is a fairly generic subcommand parser for optparse. It is\n# maintained externally here:\n# http://gist.github.com/462717\n# There you will also find a better description of the code and a more\n# succinct example program.\n\nclass Subcommand(object):\n \"\"\"A subcommand of a root command-line application that may be\n invoked by a SubcommandOptionParser.\n \"\"\"\n def __init__(self, name, parser=None, help='', aliases=(), hide=False):\n \"\"\"Creates a new subcommand. name is the primary way to invoke\n the subcommand; aliases are alternate names. parser is an\n OptionParser responsible for parsing the subcommand's options.\n help is a short description of the command. If no parser is\n given, it defaults to a new, empty CommonOptionsParser.\n \"\"\"\n self.name = name\n self.parser = parser or CommonOptionsParser()\n self.aliases = aliases\n self.help = help\n self.hide = hide\n self._root_parser = None\n\n def print_help(self):\n self.parser.print_help()\n\n def parse_args(self, args):\n return self.parser.parse_args(args)\n\n @property\n def root_parser(self):\n return self._root_parser\n\n @root_parser.setter\n def root_parser(self, root_parser):\n self._root_parser = root_parser\n self.parser.prog = '{0} {1}'.format(\n as_string(root_parser.get_prog_name()), self.name)\n\n\nclass SubcommandsOptionParser(CommonOptionsParser):\n \"\"\"A variant of OptionParser that parses subcommands and their\n arguments.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a new subcommand-aware option parser. All of the\n options to OptionParser.__init__ are supported in addition\n to subcommands, a sequence of Subcommand objects.\n \"\"\"\n # A more helpful default usage.\n if 'usage' not in kwargs:\n kwargs['usage'] = u\"\"\"\n %prog COMMAND [ARGS...]\n %prog help COMMAND\"\"\"\n kwargs['add_help_option'] = False\n\n # Super constructor.\n super(SubcommandsOptionParser, self).__init__(*args, **kwargs)\n\n # Our root parser needs to stop on the first unrecognized argument.\n self.disable_interspersed_args()\n\n self.subcommands = []\n\n def add_subcommand(self, *cmds):\n \"\"\"Adds a Subcommand object to the parser's list of commands.\n \"\"\"\n for cmd in cmds:\n cmd.root_parser = self\n self.subcommands.append(cmd)\n\n # Add the list of subcommands to the help message.\n def format_help(self, formatter=None):\n # Get the original help message, to which we will append.\n out = super(SubcommandsOptionParser, self).format_help(formatter)\n if formatter is None:\n formatter = self.formatter\n\n # Subcommands header.\n result = [\"\\n\"]\n result.append(formatter.format_heading('Commands'))\n formatter.indent()\n\n # Generate the display names (including aliases).\n # Also determine the help position.\n disp_names = []\n help_position = 0\n subcommands = [c for c in self.subcommands if not c.hide]\n subcommands.sort(key=lambda c: c.name)\n for subcommand in subcommands:\n name = subcommand.name\n if subcommand.aliases:\n name += ' (%s)' % ', '.join(subcommand.aliases)\n disp_names.append(name)\n\n # Set the help position based on the max width.\n proposed_help_position = len(name) + formatter.current_indent + 2\n if proposed_help_position <= formatter.max_help_position:\n help_position = max(help_position, proposed_help_position)\n\n # Add each subcommand to the output.\n for subcommand, name in zip(subcommands, disp_names):\n # Lifted directly from optparse.py.\n name_width = help_position - formatter.current_indent - 2\n if len(name) > name_width:\n name = \"%*s%s\\n\" % (formatter.current_indent, \"\", name)\n indent_first = help_position\n else:\n name = \"%*s%-*s \" % (formatter.current_indent, \"\",\n name_width, name)\n indent_first = 0\n result.append(name)\n help_width = formatter.width - help_position\n help_lines = textwrap.wrap(subcommand.help, help_width)\n help_line = help_lines[0] if help_lines else ''\n result.append(\"%*s%s\\n\" % (indent_first, \"\", help_line))\n result.extend([\"%*s%s\\n\" % (help_position, \"\", line)\n for line in help_lines[1:]])\n formatter.dedent()\n\n # Concatenate the original help message with the subcommand\n # list.\n return out + \"\".join(result)\n\n def _subcommand_for_name(self, name):\n \"\"\"Return the subcommand in self.subcommands matching the\n given name. The name may either be the name of a subcommand or\n an alias. If no subcommand matches, returns None.\n \"\"\"\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None\n\n def parse_global_options(self, args):\n \"\"\"Parse options up to the subcommand argument. Returns a tuple\n of the options object and the remaining arguments.\n \"\"\"\n options, subargs = self.parse_args(args)\n\n # Force the help command\n if options.help:\n subargs = ['help']\n elif options.version:\n subargs = ['version']\n return options, subargs\n\n def parse_subcommand(self, args):\n \"\"\"Given the `args` left unused by a `parse_global_options`,\n return the invoked subcommand, the subcommand options, and the\n subcommand arguments.\n \"\"\"\n # Help is default command\n if not args:\n args = ['help']\n\n cmdname = args.pop(0)\n subcommand = self._subcommand_for_name(cmdname)\n if not subcommand:\n raise UserError(u\"unknown command '{0}'\".format(cmdname))\n\n suboptions, subargs = subcommand.parse_args(args)\n return subcommand, suboptions, subargs\n\n\noptparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)\n\n\n# The main entry point and bootstrapping.\n\ndef _load_plugins(config):\n \"\"\"Load the plugins specified in the configuration.\n \"\"\"\n paths = config['pluginpath'].as_str_seq(split=False)\n paths = [util.normpath(p) for p in paths]\n log.debug(u'plugin paths: {0}', util.displayable_path(paths))\n\n # On Python 3, the search paths need to be unicode.\n paths = [util.py3_path(p) for p in paths]\n\n # Extend the `beetsplug` package to include the plugin paths.\n import beetsplug\n beetsplug.__path__ = paths + beetsplug.__path__\n\n # For backwards compatibility, also support plugin paths that\n # *contain* a `beetsplug` package.\n sys.path += paths\n\n plugins.load_plugins(config['plugins'].as_str_seq())\n plugins.send(\"pluginload\")\n return plugins\n\n\ndef _setup(options, lib=None):\n \"\"\"Prepare and global state and updates it with command line options.\n\n Returns a list of subcommands, a list of plugins, and a library instance.\n \"\"\"\n # Configure the MusicBrainz API.\n mb.configure()\n\n config = _configure(options)\n\n plugins = _load_plugins(config)\n\n # Get the default subcommands.\n from beets.ui.commands import default_commands\n\n subcommands = list(default_commands)\n subcommands.extend(plugins.commands())\n\n if lib is None:\n lib = _open_library(config)\n plugins.send(\"library_opened\", lib=lib)\n library.Item._types.update(plugins.types(library.Item))\n library.Album._types.update(plugins.types(library.Album))\n\n return subcommands, plugins, lib\n\n\ndef _configure(options):\n \"\"\"Amend the global configuration object with command line options.\n \"\"\"\n # Add any additional config files specified with --config. This\n # special handling lets specified plugins get loaded before we\n # finish parsing the command line.\n if getattr(options, 'config', None) is not None:\n overlay_path = options.config\n del options.config\n config.set_file(overlay_path)\n else:\n overlay_path = None\n config.set_args(options)\n\n # Configure the logger.\n if config['verbose'].get(int):\n log.set_global_level(logging.DEBUG)\n else:\n log.set_global_level(logging.INFO)\n\n if overlay_path:\n log.debug(u'overlaying configuration: {0}',\n util.displayable_path(overlay_path))\n\n config_path = config.user_config_path()\n if os.path.isfile(config_path):\n log.debug(u'user configuration: {0}',\n util.displayable_path(config_path))\n else:\n log.debug(u'no user configuration found at {0}',\n util.displayable_path(config_path))\n\n log.debug(u'data directory: {0}',\n util.displayable_path(config.config_dir()))\n return config\n\n\ndef _open_library(config):\n \"\"\"Create a new library instance from the configuration.\n \"\"\"\n dbpath = util.bytestring_path(config['library'].as_filename())\n try:\n lib = library.Library(\n dbpath,\n config['directory'].as_filename(),\n get_path_formats(),\n get_replacements(),\n )\n lib.get_item(0) # Test database connection.\n except (sqlite3.OperationalError, sqlite3.DatabaseError):\n log.debug(u'{}', traceback.format_exc())\n raise UserError(u\"database file {0} could not be opened\".format(\n util.displayable_path(dbpath)\n ))\n log.debug(u'library database: {0}\\n'\n u'library directory: {1}',\n util.displayable_path(lib.path),\n util.displayable_path(lib.directory))\n return lib\n\n\ndef _raw_main(args, lib=None):\n \"\"\"A helper function for `main` without top-level exception\n handling.\n \"\"\"\n parser = SubcommandsOptionParser()\n parser.add_format_option(flags=('--format-item',), target=library.Item)\n parser.add_format_option(flags=('--format-album',), target=library.Album)\n parser.add_option('-l', '--library', dest='library',\n help=u'library database file to use')\n parser.add_option('-d', '--directory', dest='directory',\n help=u\"destination music directory\")\n parser.add_option('-v', '--verbose', dest='verbose', action='count',\n help=u'log more details (use twice for even more)')\n parser.add_option('-c', '--config', dest='config',\n help=u'path to configuration file')\n parser.add_option('-h', '--help', dest='help', action='store_true',\n help=u'show this help message and exit')\n parser.add_option('--version', dest='version', action='store_true',\n help=optparse.SUPPRESS_HELP)\n\n options, subargs = parser.parse_global_options(args)\n\n # Special case for the `config --edit` command: bypass _setup so\n # that an invalid configuration does not prevent the editor from\n # starting.\n if subargs and subargs[0] == 'config' \\\n and ('-e' in subargs or '--edit' in subargs):\n from beets.ui.commands import config_edit\n return config_edit()\n\n test_lib = bool(lib)\n subcommands, plugins, lib = _setup(options, lib)\n parser.add_subcommand(*subcommands)\n\n subcommand, suboptions, subargs = parser.parse_subcommand(subargs)\n subcommand.func(lib, suboptions, subargs)\n\n plugins.send('cli_exit', lib=lib)\n if not test_lib:\n # Clean up the library unless it came from the test harness.\n lib._close()\n\n\ndef main(args=None):\n \"\"\"Run the main command-line interface for beets. Includes top-level\n exception handlers that print friendly error messages.\n \"\"\"\n try:\n _raw_main(args)\n except UserError as exc:\n message = exc.args[0] if exc.args else None\n log.error(u'error: {0}', message)\n sys.exit(1)\n except util.HumanReadableException as exc:\n exc.log(log)\n sys.exit(1)\n except library.FileOperationError as exc:\n # These errors have reasonable human-readable descriptions, but\n # we still want to log their tracebacks for debugging.\n log.debug('{}', traceback.format_exc())\n log.error('{}', exc)\n sys.exit(1)\n except confit.ConfigError as exc:\n log.error(u'configuration error: {0}', exc)\n sys.exit(1)\n except db_query.InvalidQueryError as exc:\n log.error(u'invalid query: {0}', exc)\n sys.exit(1)\n except IOError as exc:\n if exc.errno == errno.EPIPE:\n # \"Broken pipe\". End silently.\n sys.stderr.close()\n else:\n raise\n except KeyboardInterrupt:\n # Silently ignore ^C except in verbose mode.\n log.debug(u'{}', traceback.format_exc())\n except db.DBAccessError as exc:\n log.error(\n u'database access error: {0}\\n'\n u'the library file might have a permissions problem',\n exc\n )\n sys.exit(1)\n", "path": "beets/ui/__init__.py" } ]
diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py index 6ba80d22f0..af2b79a198 100644 --- a/beets/ui/__init__.py +++ b/beets/ui/__init__.py @@ -1276,7 +1276,7 @@ def main(args=None): except IOError as exc: if exc.errno == errno.EPIPE: # "Broken pipe". End silently. - pass + sys.stderr.close() else: raise except KeyboardInterrupt: diff --git a/docs/changelog.rst b/docs/changelog.rst index 35785cc3ca..281f6a32ea 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -8,6 +8,11 @@ Changelog goes here! Fixes: +* Prevent Python from warning about a ``BrokenPipeError`` being ignored even + though we do take it into account. This was an issue when using beets in + simple shell scripts. + Thanks to :user:`Azphreal`. + :bug:`2622` :bug:`2631` * :doc:`/plugins/replaygain`: Fix a regression in the previous release related to the new R128 tags. :bug:`2615` :bug:`2623`
projectmesa__mesa-2125
Fix error in failing flocking benchmark Our benchmarks are failing: https://github.com/projectmesa/mesa/actions/workflows/benchmarks.yml ```bash 08:41:17 starting benchmarks. 08:41:35 Schelling (small) timings: Init 0.00771 s; Run 0.0472 s 08:41:55 Schelling (large) timings: Init 0.05062 s; Run 0.4629 s 08:42:01 WolfSheep (small) timings: Init 0.00333 s; Run 0.0124 s 08:42:[15](https://github.com/projectmesa/mesa/actions/runs/8813652146/job/24191834761#step:7:16) WolfSheep (large) timings: Init 0.05334 s; Run 0.2206 s File "/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py", line 62, in <module> results = run_experiments(model, config) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py", line 47, in run_experiments init_time, run_time = run_model(model_class, seed, config["parameters"]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/runner/work/mesa/mesa/benchmarks/global_benchmark.py", line [21](https://github.com/projectmesa/mesa/actions/runs/8813652146/job/24191834761#step:7:22), in run_model model = model_class(simulator=simulator, seed=seed, **parameters) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/runner/work/mesa/mesa/benchmarks/Flocking/flocking.py", line 139, in __init__ boid = Boid( ^^^^^ TypeError: Boid.__init__() got an unexpected keyword argument 'pos' Error: Process completed with exit code 1. ``` Most likely something was changed in [benchmarks/Flocking/flocking.py](https://github.com/projectmesa/mesa/blob/main/benchmarks/Flocking/flocking.py) that needs to be fixed/reverted.
[ { "content": "\"\"\"\nFlockers\n=============================================================\nA Mesa implementation of Craig Reynolds's Boids flocker model.\nUses numpy arrays to represent vectors.\n\"\"\"\n\nimport numpy as np\n\nimport mesa\n\n\nclass Boid(mesa.Agent):\n \"\"\"\n A Boid-style flocker agent.\n\n The agent follows three behaviors to flock:\n - Cohesion: steering towards neighboring agents.\n - Separation: avoiding getting too close to any other agent.\n - Alignment: try to fly in the same direction as the neighbors.\n\n Boids have a vision that defines the radius in which they look for their\n neighbors to flock with. Their speed (a scalar) and direction (a vector)\n define their movement. Separation is their desired minimum distance from\n any other Boid.\n \"\"\"\n\n def __init__(\n self,\n unique_id,\n model,\n speed,\n direction,\n vision,\n separation,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n ):\n \"\"\"\n Create a new Boid flocker agent.\n\n Args:\n unique_id: Unique agent identifier.\n speed: Distance to move per step.\n direction: numpy vector for the Boid's direction of movement.\n vision: Radius to look around for nearby Boids.\n separation: Minimum distance to maintain from other Boids.\n cohere: the relative importance of matching neighbors' positions\n separate: the relative importance of avoiding close neighbors\n match: the relative importance of matching neighbors' directions\n\n \"\"\"\n super().__init__(unique_id, model)\n self.speed = speed\n self.direction = direction\n self.vision = vision\n self.separation = separation\n self.cohere_factor = cohere\n self.separate_factor = separate\n self.match_factor = match\n\n def step(self):\n \"\"\"\n Get the Boid's neighbors, compute the new vector, and move accordingly.\n \"\"\"\n\n neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)\n n = 0\n match_vector, separation_vector, cohere = np.zeros((3, 2))\n for neighbor in neighbors:\n n += 1\n heading = self.model.space.get_heading(self.pos, neighbor.pos)\n cohere += heading\n if self.model.space.get_distance(self.pos, neighbor.pos) < self.separation:\n separation_vector -= heading\n match_vector += neighbor.direction\n n = max(n, 1)\n cohere = cohere * self.cohere_factor\n separation_vector = separation_vector * self.separate_factor\n match_vector = match_vector * self.match_factor\n self.direction += (cohere + separation_vector + match_vector) / n\n self.direction /= np.linalg.norm(self.direction)\n new_pos = self.pos + self.direction * self.speed\n self.model.space.move_agent(self, new_pos)\n\n\nclass BoidFlockers(mesa.Model):\n \"\"\"\n Flocker model class. Handles agent creation, placement and scheduling.\n \"\"\"\n\n def __init__(\n self,\n seed=None,\n population=100,\n width=100,\n height=100,\n vision=10,\n speed=1,\n separation=1,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n simulator=None,\n ):\n \"\"\"\n Create a new Flockers model.\n\n Args:\n population: Number of Boids\n width, height: Size of the space.\n speed: How fast should the Boids move.\n vision: How far around should each Boid look for its neighbors\n separation: What's the minimum distance each Boid will attempt to\n keep from any other\n cohere, separate, match: factors for the relative importance of\n the three drives.\n \"\"\"\n super().__init__(seed=seed)\n self.population = population\n self.width = width\n self.height = height\n self.simulator = simulator\n\n self.schedule = mesa.time.RandomActivation(self)\n self.space = mesa.space.ContinuousSpace(self.width, self.height, True)\n self.factors = {\n \"cohere\": cohere,\n \"separate\": separate,\n \"match\": match,\n }\n\n for i in range(self.population):\n x = self.random.random() * self.space.x_max\n y = self.random.random() * self.space.y_max\n pos = np.array((x, y))\n direction = np.random.random(2) * 2 - 1\n boid = Boid(\n unique_id=i,\n model=self,\n pos=pos,\n speed=speed,\n direction=direction,\n vision=vision,\n separation=separation,\n **self.factors,\n )\n self.space.place_agent(boid, pos)\n self.schedule.add(boid)\n\n def step(self):\n self.schedule.step()\n\n\nif __name__ == \"__main__\":\n import time\n\n # model = BoidFlockers(seed=15, population=200, width=100, height=100, vision=5)\n model = BoidFlockers(seed=15, population=400, width=100, height=100, vision=15)\n\n start_time = time.perf_counter()\n for _ in range(100):\n model.step()\n\n print(time.perf_counter() - start_time)\n", "path": "benchmarks/Flocking/flocking.py" } ]
[ { "content": "\"\"\"\nFlockers\n=============================================================\nA Mesa implementation of Craig Reynolds's Boids flocker model.\nUses numpy arrays to represent vectors.\n\"\"\"\n\nimport numpy as np\n\nimport mesa\n\n\nclass Boid(mesa.Agent):\n \"\"\"\n A Boid-style flocker agent.\n\n The agent follows three behaviors to flock:\n - Cohesion: steering towards neighboring agents.\n - Separation: avoiding getting too close to any other agent.\n - Alignment: try to fly in the same direction as the neighbors.\n\n Boids have a vision that defines the radius in which they look for their\n neighbors to flock with. Their speed (a scalar) and direction (a vector)\n define their movement. Separation is their desired minimum distance from\n any other Boid.\n \"\"\"\n\n def __init__(\n self,\n unique_id,\n model,\n speed,\n direction,\n vision,\n separation,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n ):\n \"\"\"\n Create a new Boid flocker agent.\n\n Args:\n unique_id: Unique agent identifier.\n speed: Distance to move per step.\n direction: numpy vector for the Boid's direction of movement.\n vision: Radius to look around for nearby Boids.\n separation: Minimum distance to maintain from other Boids.\n cohere: the relative importance of matching neighbors' positions\n separate: the relative importance of avoiding close neighbors\n match: the relative importance of matching neighbors' directions\n\n \"\"\"\n super().__init__(unique_id, model)\n self.speed = speed\n self.direction = direction\n self.vision = vision\n self.separation = separation\n self.cohere_factor = cohere\n self.separate_factor = separate\n self.match_factor = match\n\n def step(self):\n \"\"\"\n Get the Boid's neighbors, compute the new vector, and move accordingly.\n \"\"\"\n\n neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)\n n = 0\n match_vector, separation_vector, cohere = np.zeros((3, 2))\n for neighbor in neighbors:\n n += 1\n heading = self.model.space.get_heading(self.pos, neighbor.pos)\n cohere += heading\n if self.model.space.get_distance(self.pos, neighbor.pos) < self.separation:\n separation_vector -= heading\n match_vector += neighbor.direction\n n = max(n, 1)\n cohere = cohere * self.cohere_factor\n separation_vector = separation_vector * self.separate_factor\n match_vector = match_vector * self.match_factor\n self.direction += (cohere + separation_vector + match_vector) / n\n self.direction /= np.linalg.norm(self.direction)\n new_pos = self.pos + self.direction * self.speed\n self.model.space.move_agent(self, new_pos)\n\n\nclass BoidFlockers(mesa.Model):\n \"\"\"\n Flocker model class. Handles agent creation, placement and scheduling.\n \"\"\"\n\n def __init__(\n self,\n seed=None,\n population=100,\n width=100,\n height=100,\n vision=10,\n speed=1,\n separation=1,\n cohere=0.03,\n separate=0.015,\n match=0.05,\n simulator=None,\n ):\n \"\"\"\n Create a new Flockers model.\n\n Args:\n population: Number of Boids\n width, height: Size of the space.\n speed: How fast should the Boids move.\n vision: How far around should each Boid look for its neighbors\n separation: What's the minimum distance each Boid will attempt to\n keep from any other\n cohere, separate, match: factors for the relative importance of\n the three drives.\n \"\"\"\n super().__init__(seed=seed)\n self.population = population\n self.width = width\n self.height = height\n self.simulator = simulator\n\n self.schedule = mesa.time.RandomActivation(self)\n self.space = mesa.space.ContinuousSpace(self.width, self.height, True)\n self.factors = {\n \"cohere\": cohere,\n \"separate\": separate,\n \"match\": match,\n }\n\n for i in range(self.population):\n x = self.random.random() * self.space.x_max\n y = self.random.random() * self.space.y_max\n pos = np.array((x, y))\n direction = np.random.random(2) * 2 - 1\n boid = Boid(\n unique_id=i,\n model=self,\n speed=speed,\n direction=direction,\n vision=vision,\n separation=separation,\n **self.factors,\n )\n self.space.place_agent(boid, pos)\n self.schedule.add(boid)\n\n def step(self):\n self.schedule.step()\n\n\nif __name__ == \"__main__\":\n import time\n\n # model = BoidFlockers(seed=15, population=200, width=100, height=100, vision=5)\n model = BoidFlockers(seed=15, population=400, width=100, height=100, vision=15)\n\n start_time = time.perf_counter()\n for _ in range(100):\n model.step()\n\n print(time.perf_counter() - start_time)\n", "path": "benchmarks/Flocking/flocking.py" } ]
diff --git a/benchmarks/Flocking/flocking.py b/benchmarks/Flocking/flocking.py index fc19c6c3820..ff9f97c0c96 100644 --- a/benchmarks/Flocking/flocking.py +++ b/benchmarks/Flocking/flocking.py @@ -139,7 +139,6 @@ def __init__( boid = Boid( unique_id=i, model=self, - pos=pos, speed=speed, direction=direction, vision=vision,
Pyomo__pyomo-2633
Fixed Vars unpickle as stale ## Summary I'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale. ### Steps to reproduce the issue ``` from pyomo.environ import * import pickle m = ConcreteModel() m.x = Var(domain=Binary) m.x.fix(1) unpickle = pickle.loads(pickle.dumps(m)) m.x.pprint() unpickle.x.pprint() ``` ``` x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain None : 0 : 1 : 1 : True : False : Binary x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain None : 0 : 1 : 1 : True : True : Binary ``` ### Error Message It seems like these models should be identical, even up to stale-ness, right? ### Information on your system Pyomo version: main Python version: 3.8 Operating system: linux How Pyomo was installed (PyPI, conda, source): source Solver (if applicable):
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py" } ]
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n return self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py" } ]
diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py index 1247df2247b..01cc70406a3 100644 --- a/pyomo/core/staleflag.py +++ b/pyomo/core/staleflag.py @@ -21,7 +21,7 @@ def stale_mapper(self, encode, value): if value: return 0 else: - self.get_flag(0) + return self.get_flag(0) def _get_flag(self, current_flag): """Return the current global stale flag value""" diff --git a/pyomo/core/tests/unit/test_var.py b/pyomo/core/tests/unit/test_var.py index 65081809c74..331334ef694 100644 --- a/pyomo/core/tests/unit/test_var.py +++ b/pyomo/core/tests/unit/test_var.py @@ -1598,5 +1598,27 @@ def test_stale(self): self.assertFalse(m.x.stale) self.assertFalse(m.y.stale) + def test_stale_clone(self): + m = ConcreteModel() + m.x = Var(initialize=0) + self.assertFalse(m.x.stale) + m.y = Var() + self.assertTrue(m.y.stale) + m.z = Var(initialize=0) + self.assertFalse(m.z.stale) + + i = m.clone() + self.assertFalse(i.x.stale) + self.assertTrue(i.y.stale) + self.assertFalse(i.z.stale) + + StaleFlagManager.mark_all_as_stale(delayed=True) + m.z = 5 + i = m.clone() + self.assertTrue(i.x.stale) + self.assertTrue(i.y.stale) + self.assertFalse(i.z.stale) + + if __name__ == "__main__": unittest.main()
aws__aws-cli-357
pip install awscli fails I tried `pip install awscli` from https://github.com/aws/aws-cli/blob/develop/README.rst and failed: http://sprunge.us/NfbW /home/hendry/.pip/pip.log = http://ix.io/7SC Hilarious how bad Python packaging is. I'm running Archlinux with Python 3.3.2.
[ { "content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.2']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py" } ]
diff --git a/requirements.txt b/requirements.txt index 6b6a74447dcd..fd6551a87ad9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,4 +11,4 @@ nose==1.3.0 colorama==0.2.5 mock==1.0.1 httpretty==0.6.1 -rsa==3.1.1 +rsa==3.1.2 diff --git a/setup.py b/setup.py index 9df11b894ad8..d0c1630346d9 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ 'six>=1.1.0', 'colorama==0.2.5', 'docutils>=0.10', - 'rsa==3.1.1'] + 'rsa==3.1.2'] if sys.version_info[:2] == (2, 6): # For python2.6 we have to require argparse since it
boto__boto-215
RDS call modify_dbinstance with multi_az = True doesn't actually set an instance to MultiAZ Making a call to a non-multiaz instance with multi_az=True doesn't actually switch the parameter. I assume this is also true for creating one from scratch, but I haven't tested that yet.
[ { "content": "# Copyright (c) 2010 Spotify AB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a connection to the EMR service\n\"\"\"\nimport types\n\nimport boto\nfrom boto.ec2.regioninfo import RegionInfo\nfrom boto.emr.emrobject import JobFlow, RunJobFlowResponse\nfrom boto.emr.step import JarStep\nfrom boto.connection import AWSQueryConnection\nfrom boto.exception import EmrResponseError\n\nclass EmrConnection(AWSQueryConnection):\n\n APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')\n DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')\n DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',\n 'elasticmapreduce.amazonaws.com')\n ResponseError = EmrResponseError\n\n # Constants for AWS Console debugging\n DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'\n DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None, debug=0,\n https_connection_factory=None, region=None, path='/'):\n if not region:\n region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)\n self.region = region\n AWSQueryConnection.__init__(self, aws_access_key_id,\n aws_secret_access_key,\n is_secure, port, proxy, proxy_port,\n proxy_user, proxy_pass,\n self.region.endpoint, debug,\n https_connection_factory, path)\n\n def _required_auth_capability(self):\n return ['emr']\n\n def describe_jobflow(self, jobflow_id):\n \"\"\"\n Describes a single Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id of interest\n \"\"\"\n jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])\n if jobflows:\n return jobflows[0]\n\n def describe_jobflows(self, states=None, jobflow_ids=None,\n created_after=None, created_before=None):\n \"\"\"\n Retrieve all the Elastic MapReduce job flows on your account\n\n :type states: list\n :param states: A list of strings with job flow states wanted\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n :type created_after: datetime\n :param created_after: Bound on job flow creation time\n\n :type created_before: datetime\n :param created_before: Bound on job flow creation time\n \"\"\"\n params = {}\n\n if states:\n self.build_list_params(params, states, 'JobFlowStates.member')\n if jobflow_ids:\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n if created_after:\n params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')\n if created_before:\n params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')\n\n return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])\n\n def terminate_jobflow(self, jobflow_id):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: A jobflow id \n \"\"\"\n self.terminate_jobflows([jobflow_id]) \n\n def terminate_jobflows(self, jobflow_ids):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n \"\"\"\n params = {}\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n return self.get_status('TerminateJobFlows', params)\n\n def add_jobflow_steps(self, jobflow_id, steps):\n \"\"\"\n Adds steps to a jobflow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id\n :type steps: list(boto.emr.Step)\n :param steps: A list of steps to add to the job\n \"\"\"\n if type(steps) != types.ListType:\n steps = [steps]\n params = {}\n params['JobFlowId'] = jobflow_id\n\n # Step args\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)\n\n def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,\n master_instance_type='m1.small',\n slave_instance_type='m1.small', num_instances=1,\n action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,\n enable_debugging=False,\n hadoop_version='0.18',\n steps=[],\n bootstrap_actions=[]):\n \"\"\"\n Runs a job flow\n\n :type name: str\n :param name: Name of the job flow\n :type log_uri: str\n :param log_uri: URI of the S3 bucket to place logs\n :type ec2_keyname: str\n :param ec2_keyname: EC2 key used for the instances\n :type availability_zone: str\n :param availability_zone: EC2 availability zone of the cluster\n :type master_instance_type: str\n :param master_instance_type: EC2 instance type of the master\n :type slave_instance_type: str\n :param slave_instance_type: EC2 instance type of the slave nodes\n :type num_instances: int\n :param num_instances: Number of instances in the Hadoop cluster\n :type action_on_failure: str\n :param action_on_failure: Action to take if a step terminates\n :type keep_alive: bool\n :param keep_alive: Denotes whether the cluster should stay alive upon completion\n :type enable_debugging: bool\n :param enable_debugging: Denotes whether AWS console debugging should be enabled.\n :type steps: list(boto.emr.Step)\n :param steps: List of steps to add with the job\n\n :rtype: str\n :return: The jobflow id\n \"\"\"\n params = {}\n if action_on_failure:\n params['ActionOnFailure'] = action_on_failure\n params['Name'] = name\n params['LogUri'] = log_uri\n\n # Instance args\n instance_params = self._build_instance_args(ec2_keyname, availability_zone,\n master_instance_type, slave_instance_type,\n num_instances, keep_alive, hadoop_version)\n params.update(instance_params)\n\n # Debugging step from EMR API docs\n if enable_debugging:\n debugging_step = JarStep(name='Setup Hadoop Debugging',\n action_on_failure='TERMINATE_JOB_FLOW',\n main_class=None,\n jar=self.DebuggingJar,\n step_args=self.DebuggingArgs)\n steps.insert(0, debugging_step)\n\n # Step args\n if steps:\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n if bootstrap_actions:\n bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]\n params.update(self._build_bootstrap_action_list(bootstrap_action_args))\n\n response = self.get_object('RunJobFlow', params, RunJobFlowResponse)\n return response.jobflowid\n\n def _build_bootstrap_action_args(self, bootstrap_action):\n bootstrap_action_params = {}\n bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path\n\n try:\n bootstrap_action_params['Name'] = bootstrap_action.name\n except AttributeError:\n pass\n\n args = bootstrap_action.args()\n if args:\n self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')\n\n return bootstrap_action_params\n\n def _build_step_args(self, step):\n step_params = {}\n step_params['ActionOnFailure'] = step.action_on_failure\n step_params['HadoopJarStep.Jar'] = step.jar()\n\n main_class = step.main_class()\n if main_class:\n step_params['HadoopJarStep.MainClass'] = main_class\n\n args = step.args()\n if args:\n self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')\n\n step_params['Name'] = step.name\n return step_params\n\n def _build_bootstrap_action_list(self, bootstrap_actions):\n if type(bootstrap_actions) != types.ListType:\n bootstrap_actions = [bootstrap_actions]\n\n params = {}\n for i, bootstrap_action in enumerate(bootstrap_actions):\n for key, value in bootstrap_action.iteritems():\n params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value\n return params\n\n def _build_step_list(self, steps):\n if type(steps) != types.ListType:\n steps = [steps]\n\n params = {}\n for i, step in enumerate(steps):\n for key, value in step.iteritems():\n params['Steps.member.%s.%s' % (i+1, key)] = value\n return params\n\n def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,\n slave_instance_type, num_instances, keep_alive, hadoop_version):\n params = {\n 'Instances.MasterInstanceType' : master_instance_type,\n 'Instances.SlaveInstanceType' : slave_instance_type,\n 'Instances.InstanceCount' : num_instances,\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\n 'Instances.HadoopVersion' : hadoop_version\n }\n\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n params['Placement'] = availability_zone\n\n return params\n\n", "path": "boto/emr/connection.py" } ]
[ { "content": "# Copyright (c) 2010 Spotify AB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a connection to the EMR service\n\"\"\"\nimport types\n\nimport boto\nfrom boto.ec2.regioninfo import RegionInfo\nfrom boto.emr.emrobject import JobFlow, RunJobFlowResponse\nfrom boto.emr.step import JarStep\nfrom boto.connection import AWSQueryConnection\nfrom boto.exception import EmrResponseError\n\nclass EmrConnection(AWSQueryConnection):\n\n APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')\n DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')\n DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',\n 'elasticmapreduce.amazonaws.com')\n ResponseError = EmrResponseError\n\n # Constants for AWS Console debugging\n DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'\n DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None, debug=0,\n https_connection_factory=None, region=None, path='/'):\n if not region:\n region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)\n self.region = region\n AWSQueryConnection.__init__(self, aws_access_key_id,\n aws_secret_access_key,\n is_secure, port, proxy, proxy_port,\n proxy_user, proxy_pass,\n self.region.endpoint, debug,\n https_connection_factory, path)\n\n def _required_auth_capability(self):\n return ['emr']\n\n def describe_jobflow(self, jobflow_id):\n \"\"\"\n Describes a single Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id of interest\n \"\"\"\n jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])\n if jobflows:\n return jobflows[0]\n\n def describe_jobflows(self, states=None, jobflow_ids=None,\n created_after=None, created_before=None):\n \"\"\"\n Retrieve all the Elastic MapReduce job flows on your account\n\n :type states: list\n :param states: A list of strings with job flow states wanted\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n :type created_after: datetime\n :param created_after: Bound on job flow creation time\n\n :type created_before: datetime\n :param created_before: Bound on job flow creation time\n \"\"\"\n params = {}\n\n if states:\n self.build_list_params(params, states, 'JobFlowStates.member')\n if jobflow_ids:\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n if created_after:\n params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')\n if created_before:\n params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')\n\n return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])\n\n def terminate_jobflow(self, jobflow_id):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: A jobflow id \n \"\"\"\n self.terminate_jobflows([jobflow_id]) \n\n def terminate_jobflows(self, jobflow_ids):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n \"\"\"\n params = {}\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n return self.get_status('TerminateJobFlows', params)\n\n def add_jobflow_steps(self, jobflow_id, steps):\n \"\"\"\n Adds steps to a jobflow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id\n :type steps: list(boto.emr.Step)\n :param steps: A list of steps to add to the job\n \"\"\"\n if type(steps) != types.ListType:\n steps = [steps]\n params = {}\n params['JobFlowId'] = jobflow_id\n\n # Step args\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)\n\n def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,\n master_instance_type='m1.small',\n slave_instance_type='m1.small', num_instances=1,\n action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,\n enable_debugging=False,\n hadoop_version='0.18',\n steps=[],\n bootstrap_actions=[]):\n \"\"\"\n Runs a job flow\n\n :type name: str\n :param name: Name of the job flow\n :type log_uri: str\n :param log_uri: URI of the S3 bucket to place logs\n :type ec2_keyname: str\n :param ec2_keyname: EC2 key used for the instances\n :type availability_zone: str\n :param availability_zone: EC2 availability zone of the cluster\n :type master_instance_type: str\n :param master_instance_type: EC2 instance type of the master\n :type slave_instance_type: str\n :param slave_instance_type: EC2 instance type of the slave nodes\n :type num_instances: int\n :param num_instances: Number of instances in the Hadoop cluster\n :type action_on_failure: str\n :param action_on_failure: Action to take if a step terminates\n :type keep_alive: bool\n :param keep_alive: Denotes whether the cluster should stay alive upon completion\n :type enable_debugging: bool\n :param enable_debugging: Denotes whether AWS console debugging should be enabled.\n :type steps: list(boto.emr.Step)\n :param steps: List of steps to add with the job\n\n :rtype: str\n :return: The jobflow id\n \"\"\"\n params = {}\n if action_on_failure:\n params['ActionOnFailure'] = action_on_failure\n params['Name'] = name\n params['LogUri'] = log_uri\n\n # Instance args\n instance_params = self._build_instance_args(ec2_keyname, availability_zone,\n master_instance_type, slave_instance_type,\n num_instances, keep_alive, hadoop_version)\n params.update(instance_params)\n\n # Debugging step from EMR API docs\n if enable_debugging:\n debugging_step = JarStep(name='Setup Hadoop Debugging',\n action_on_failure='TERMINATE_JOB_FLOW',\n main_class=None,\n jar=self.DebuggingJar,\n step_args=self.DebuggingArgs)\n steps.insert(0, debugging_step)\n\n # Step args\n if steps:\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n if bootstrap_actions:\n bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]\n params.update(self._build_bootstrap_action_list(bootstrap_action_args))\n\n response = self.get_object('RunJobFlow', params, RunJobFlowResponse)\n return response.jobflowid\n\n def _build_bootstrap_action_args(self, bootstrap_action):\n bootstrap_action_params = {}\n bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path\n\n try:\n bootstrap_action_params['Name'] = bootstrap_action.name\n except AttributeError:\n pass\n\n args = bootstrap_action.args()\n if args:\n self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')\n\n return bootstrap_action_params\n\n def _build_step_args(self, step):\n step_params = {}\n step_params['ActionOnFailure'] = step.action_on_failure\n step_params['HadoopJarStep.Jar'] = step.jar()\n\n main_class = step.main_class()\n if main_class:\n step_params['HadoopJarStep.MainClass'] = main_class\n\n args = step.args()\n if args:\n self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')\n\n step_params['Name'] = step.name\n return step_params\n\n def _build_bootstrap_action_list(self, bootstrap_actions):\n if type(bootstrap_actions) != types.ListType:\n bootstrap_actions = [bootstrap_actions]\n\n params = {}\n for i, bootstrap_action in enumerate(bootstrap_actions):\n for key, value in bootstrap_action.iteritems():\n params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value\n return params\n\n def _build_step_list(self, steps):\n if type(steps) != types.ListType:\n steps = [steps]\n\n params = {}\n for i, step in enumerate(steps):\n for key, value in step.iteritems():\n params['Steps.member.%s.%s' % (i+1, key)] = value\n return params\n\n def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,\n slave_instance_type, num_instances, keep_alive, hadoop_version):\n params = {\n 'Instances.MasterInstanceType' : master_instance_type,\n 'Instances.SlaveInstanceType' : slave_instance_type,\n 'Instances.InstanceCount' : num_instances,\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\n 'Instances.HadoopVersion' : hadoop_version\n }\n\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n params['Placement.AvailabilityZone'] = availability_zone\n\n return params\n\n", "path": "boto/emr/connection.py" } ]
diff --git a/boto/emr/connection.py b/boto/emr/connection.py index f0145e33de..4409d69ed8 100644 --- a/boto/emr/connection.py +++ b/boto/emr/connection.py @@ -274,7 +274,7 @@ def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_t if ec2_keyname: params['Instances.Ec2KeyName'] = ec2_keyname if availability_zone: - params['Placement'] = availability_zone + params['Placement.AvailabilityZone'] = availability_zone return params
microsoft__playwright-python-625
Release-blocker: uncomment cors tests Search for this issue URL in the code.
[ { "content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport base64\nimport inspect\nimport sys\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast\n\nfrom playwright._impl._accessibility import Accessibility\nfrom playwright._impl._api_structures import (\n FilePayload,\n FloatRect,\n PdfMargins,\n Position,\n ViewportSize,\n)\nfrom playwright._impl._api_types import Error\nfrom playwright._impl._connection import (\n ChannelOwner,\n from_channel,\n from_nullable_channel,\n)\nfrom playwright._impl._console_message import ConsoleMessage\nfrom playwright._impl._download import Download\nfrom playwright._impl._element_handle import ElementHandle\nfrom playwright._impl._event_context_manager import EventContextManagerImpl\nfrom playwright._impl._file_chooser import FileChooser\nfrom playwright._impl._frame import Frame\nfrom playwright._impl._helper import (\n ColorScheme,\n DocumentLoadState,\n KeyboardModifier,\n MouseButton,\n RouteHandler,\n RouteHandlerEntry,\n TimeoutSettings,\n URLMatch,\n URLMatcher,\n URLMatchRequest,\n URLMatchResponse,\n is_safe_close_error,\n locals_to_params,\n make_dirs_for_file,\n parse_error,\n serialize_error,\n)\nfrom playwright._impl._input import Keyboard, Mouse, Touchscreen\nfrom playwright._impl._js_handle import (\n JSHandle,\n Serializable,\n parse_result,\n serialize_argument,\n)\nfrom playwright._impl._network import Request, Response, Route, serialize_headers\nfrom playwright._impl._video import Video\nfrom playwright._impl._wait_helper import WaitHelper\n\nif sys.version_info >= (3, 8): # pragma: no cover\n from typing import Literal\nelse: # pragma: no cover\n from typing_extensions import Literal\n\nif TYPE_CHECKING: # pragma: no cover\n from playwright._impl._browser_context import BrowserContext\n\n\nclass Page(ChannelOwner):\n\n Events = SimpleNamespace(\n Close=\"close\",\n Crash=\"crash\",\n Console=\"console\",\n Dialog=\"dialog\",\n Download=\"download\",\n FileChooser=\"filechooser\",\n DOMContentLoaded=\"domcontentloaded\",\n PageError=\"pageerror\",\n Request=\"request\",\n Response=\"response\",\n RequestFailed=\"requestfailed\",\n RequestFinished=\"requestfinished\",\n FrameAttached=\"frameattached\",\n FrameDetached=\"framedetached\",\n FrameNavigated=\"framenavigated\",\n Load=\"load\",\n Popup=\"popup\",\n WebSocket=\"websocket\",\n Worker=\"worker\",\n )\n accessibility: Accessibility\n keyboard: Keyboard\n mouse: Mouse\n touchscreen: Touchscreen\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.accessibility = Accessibility(self._channel)\n self.keyboard = Keyboard(self._channel)\n self.mouse = Mouse(self._channel)\n self.touchscreen = Touchscreen(self._channel)\n\n self._main_frame: Frame = from_channel(initializer[\"mainFrame\"])\n self._main_frame._page = self\n self._frames = [self._main_frame]\n self._viewport_size: Optional[ViewportSize] = initializer.get(\"viewportSize\")\n self._is_closed = False\n self._workers: List[\"Worker\"] = []\n self._bindings: Dict[str, Any] = {}\n self._routes: List[RouteHandlerEntry] = []\n self._owned_context: Optional[\"BrowserContext\"] = None\n self._timeout_settings: TimeoutSettings = TimeoutSettings(None)\n self._video: Optional[Video] = None\n self._opener = cast(\"Page\", from_nullable_channel(initializer.get(\"opener\")))\n\n self._channel.on(\n \"bindingCall\",\n lambda params: self._on_binding(from_channel(params[\"binding\"])),\n )\n self._channel.on(\"close\", lambda _: self._on_close())\n self._channel.on(\n \"console\",\n lambda params: self.emit(\n Page.Events.Console, from_channel(params[\"message\"])\n ),\n )\n self._channel.on(\"crash\", lambda _: self._on_crash())\n self._channel.on(\"dialog\", lambda params: self._on_dialog(params))\n self._channel.on(\n \"domcontentloaded\", lambda _: self.emit(Page.Events.DOMContentLoaded)\n )\n self._channel.on(\"download\", lambda params: self._on_download(params))\n self._channel.on(\n \"fileChooser\",\n lambda params: self.emit(\n Page.Events.FileChooser,\n FileChooser(\n self, from_channel(params[\"element\"]), params[\"isMultiple\"]\n ),\n ),\n )\n self._channel.on(\n \"frameAttached\",\n lambda params: self._on_frame_attached(from_channel(params[\"frame\"])),\n )\n self._channel.on(\n \"frameDetached\",\n lambda params: self._on_frame_detached(from_channel(params[\"frame\"])),\n )\n self._channel.on(\"load\", lambda _: self.emit(Page.Events.Load))\n self._channel.on(\n \"pageError\",\n lambda params: self.emit(\n Page.Events.PageError, parse_error(params[\"error\"][\"error\"])\n ),\n )\n self._channel.on(\n \"request\",\n lambda params: self.emit(\n Page.Events.Request, from_channel(params[\"request\"])\n ),\n )\n self._channel.on(\n \"requestFailed\",\n lambda params: self._on_request_failed(\n from_channel(params[\"request\"]),\n params[\"responseEndTiming\"],\n params[\"failureText\"],\n ),\n )\n self._channel.on(\n \"requestFinished\",\n lambda params: self._on_request_finished(\n from_channel(params[\"request\"]), params[\"responseEndTiming\"]\n ),\n )\n self._channel.on(\n \"response\",\n lambda params: self.emit(\n Page.Events.Response, from_channel(params[\"response\"])\n ),\n )\n self._channel.on(\n \"route\",\n lambda params: self._on_route(\n from_channel(params[\"route\"]), from_channel(params[\"request\"])\n ),\n )\n self._channel.on(\"video\", lambda params: self._on_video(params))\n self._channel.on(\n \"webSocket\",\n lambda params: self.emit(\n Page.Events.WebSocket, from_channel(params[\"webSocket\"])\n ),\n )\n self._channel.on(\n \"worker\", lambda params: self._on_worker(from_channel(params[\"worker\"]))\n )\n\n def __repr__(self) -> str:\n return f\"<Page url={self.url!r}>\"\n\n def _set_browser_context(self, context: \"BrowserContext\") -> None:\n self._browser_context = context\n self._timeout_settings = TimeoutSettings(context._timeout_settings)\n\n def _on_request_failed(\n self,\n request: Request,\n response_end_timing: float,\n failure_text: str = None,\n ) -> None:\n request._failure_text = failure_text\n if request._timing:\n request._timing[\"responseEnd\"] = response_end_timing\n self.emit(Page.Events.RequestFailed, request)\n\n def _on_request_finished(\n self, request: Request, response_end_timing: float\n ) -> None:\n if request._timing:\n request._timing[\"responseEnd\"] = response_end_timing\n self.emit(Page.Events.RequestFinished, request)\n\n def _on_frame_attached(self, frame: Frame) -> None:\n frame._page = self\n self._frames.append(frame)\n self.emit(Page.Events.FrameAttached, frame)\n\n def _on_frame_detached(self, frame: Frame) -> None:\n self._frames.remove(frame)\n frame._detached = True\n self.emit(Page.Events.FrameDetached, frame)\n\n def _on_route(self, route: Route, request: Request) -> None:\n for handler_entry in self._routes:\n if handler_entry.matcher.matches(request.url):\n result = cast(Any, handler_entry.handler)(route, request)\n if inspect.iscoroutine(result):\n asyncio.create_task(result)\n return\n self._browser_context._on_route(route, request)\n\n def _on_binding(self, binding_call: \"BindingCall\") -> None:\n func = self._bindings.get(binding_call._initializer[\"name\"])\n if func:\n asyncio.create_task(binding_call.call(func))\n self._browser_context._on_binding(binding_call)\n\n def _on_worker(self, worker: \"Worker\") -> None:\n self._workers.append(worker)\n worker._page = self\n self.emit(Page.Events.Worker, worker)\n\n def _on_close(self) -> None:\n self._is_closed = True\n self._browser_context._pages.remove(self)\n self.emit(Page.Events.Close)\n\n def _on_crash(self) -> None:\n self.emit(Page.Events.Crash)\n\n def _on_dialog(self, params: Any) -> None:\n dialog = from_channel(params[\"dialog\"])\n if self.listeners(Page.Events.Dialog):\n self.emit(Page.Events.Dialog, dialog)\n else:\n asyncio.create_task(dialog.dismiss())\n\n def _on_download(self, params: Any) -> None:\n url = params[\"url\"]\n suggested_filename = params[\"suggestedFilename\"]\n artifact = from_channel(params[\"artifact\"])\n self.emit(\n Page.Events.Download, Download(self, url, suggested_filename, artifact)\n )\n\n def _on_video(self, params: Any) -> None:\n artifact = from_channel(params[\"artifact\"])\n cast(Video, self.video)._artifact_ready(artifact)\n\n def _add_event_handler(self, event: str, k: Any, v: Any) -> None:\n if event == Page.Events.FileChooser and len(self.listeners(event)) == 0:\n self._channel.send_no_reply(\n \"setFileChooserInterceptedNoReply\", {\"intercepted\": True}\n )\n super()._add_event_handler(event, k, v)\n\n def remove_listener(self, event: str, f: Any) -> None:\n super().remove_listener(event, f)\n if event == Page.Events.FileChooser and len(self.listeners(event)) == 0:\n self._channel.send_no_reply(\n \"setFileChooserInterceptedNoReply\", {\"intercepted\": False}\n )\n\n @property\n def context(self) -> \"BrowserContext\":\n return self._browser_context\n\n async def opener(self) -> Optional[\"Page\"]:\n if self._opener and self._opener.is_closed():\n return None\n return self._opener\n\n @property\n def main_frame(self) -> Frame:\n return self._main_frame\n\n def frame(self, name: str = None, url: URLMatch = None) -> Optional[Frame]:\n matcher = URLMatcher(url) if url else None\n for frame in self._frames:\n if name and frame.name == name:\n return frame\n if url and matcher and matcher.matches(frame.url):\n return frame\n return None\n\n @property\n def frames(self) -> List[Frame]:\n return self._frames.copy()\n\n def set_default_navigation_timeout(self, timeout: float) -> None:\n self._timeout_settings.set_navigation_timeout(timeout)\n self._channel.send_no_reply(\n \"setDefaultNavigationTimeoutNoReply\", dict(timeout=timeout)\n )\n\n def set_default_timeout(self, timeout: float) -> None:\n self._timeout_settings.set_timeout(timeout)\n self._channel.send_no_reply(\"setDefaultTimeoutNoReply\", dict(timeout=timeout))\n\n async def query_selector(self, selector: str) -> Optional[ElementHandle]:\n return await self._main_frame.query_selector(selector)\n\n async def query_selector_all(self, selector: str) -> List[ElementHandle]:\n return await self._main_frame.query_selector_all(selector)\n\n async def wait_for_selector(\n self,\n selector: str,\n timeout: float = None,\n state: Literal[\"attached\", \"detached\", \"hidden\", \"visible\"] = None,\n ) -> Optional[ElementHandle]:\n return await self._main_frame.wait_for_selector(**locals_to_params(locals()))\n\n async def is_checked(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_checked(**locals_to_params(locals()))\n\n async def is_disabled(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_disabled(**locals_to_params(locals()))\n\n async def is_editable(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_editable(**locals_to_params(locals()))\n\n async def is_enabled(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_enabled(**locals_to_params(locals()))\n\n async def is_hidden(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_hidden(**locals_to_params(locals()))\n\n async def is_visible(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_visible(**locals_to_params(locals()))\n\n async def dispatch_event(\n self, selector: str, type: str, eventInit: Dict = None, timeout: float = None\n ) -> None:\n return await self._main_frame.dispatch_event(**locals_to_params(locals()))\n\n async def evaluate(self, expression: str, arg: Serializable = None) -> Any:\n return await self._main_frame.evaluate(expression, arg)\n\n async def evaluate_handle(\n self, expression: str, arg: Serializable = None\n ) -> JSHandle:\n return await self._main_frame.evaluate_handle(expression, arg)\n\n async def eval_on_selector(\n self,\n selector: str,\n expression: str,\n arg: Serializable = None,\n ) -> Any:\n return await self._main_frame.eval_on_selector(selector, expression, arg)\n\n async def eval_on_selector_all(\n self,\n selector: str,\n expression: str,\n arg: Serializable = None,\n ) -> Any:\n return await self._main_frame.eval_on_selector_all(selector, expression, arg)\n\n async def add_script_tag(\n self,\n url: str = None,\n path: Union[str, Path] = None,\n content: str = None,\n type: str = None,\n ) -> ElementHandle:\n return await self._main_frame.add_script_tag(**locals_to_params(locals()))\n\n async def add_style_tag(\n self, url: str = None, path: Union[str, Path] = None, content: str = None\n ) -> ElementHandle:\n return await self._main_frame.add_style_tag(**locals_to_params(locals()))\n\n async def expose_function(self, name: str, callback: Callable) -> None:\n await self.expose_binding(name, lambda source, *args: callback(*args))\n\n async def expose_binding(\n self, name: str, callback: Callable, handle: bool = None\n ) -> None:\n if name in self._bindings:\n raise Error(f'Function \"{name}\" has been already registered')\n if name in self._browser_context._bindings:\n raise Error(\n f'Function \"{name}\" has been already registered in the browser context'\n )\n self._bindings[name] = callback\n await self._channel.send(\n \"exposeBinding\", dict(name=name, needsHandle=handle or False)\n )\n\n async def set_extra_http_headers(self, headers: Dict[str, str]) -> None:\n await self._channel.send(\n \"setExtraHTTPHeaders\", dict(headers=serialize_headers(headers))\n )\n\n @property\n def url(self) -> str:\n return self._main_frame.url\n\n async def content(self) -> str:\n return await self._main_frame.content()\n\n async def set_content(\n self,\n html: str,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> None:\n return await self._main_frame.set_content(**locals_to_params(locals()))\n\n async def goto(\n self,\n url: str,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n referer: str = None,\n ) -> Optional[Response]:\n return await self._main_frame.goto(**locals_to_params(locals()))\n\n async def reload(\n self,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> Optional[Response]:\n return from_nullable_channel(\n await self._channel.send(\"reload\", locals_to_params(locals()))\n )\n\n async def wait_for_load_state(\n self, state: DocumentLoadState = None, timeout: float = None\n ) -> None:\n return await self._main_frame.wait_for_load_state(**locals_to_params(locals()))\n\n async def wait_for_url(\n self,\n url: URLMatch,\n wait_until: DocumentLoadState = None,\n timeout: float = None,\n ) -> None:\n return await self._main_frame.wait_for_url(**locals_to_params(locals()))\n\n async def wait_for_event(\n self, event: str, predicate: Callable = None, timeout: float = None\n ) -> Any:\n async with self.expect_event(event, predicate, timeout) as event_info:\n pass\n return await event_info\n\n async def go_back(\n self,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> Optional[Response]:\n return from_nullable_channel(\n await self._channel.send(\"goBack\", locals_to_params(locals()))\n )\n\n async def go_forward(\n self,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> Optional[Response]:\n return from_nullable_channel(\n await self._channel.send(\"goForward\", locals_to_params(locals()))\n )\n\n async def emulate_media(\n self,\n media: Literal[\"print\", \"screen\"] = None,\n colorScheme: ColorScheme = None,\n ) -> None:\n await self._channel.send(\"emulateMedia\", locals_to_params(locals()))\n\n async def set_viewport_size(self, viewportSize: ViewportSize) -> None:\n self._viewport_size = viewportSize\n await self._channel.send(\"setViewportSize\", locals_to_params(locals()))\n\n @property\n def viewport_size(self) -> Optional[ViewportSize]:\n return self._viewport_size\n\n async def bring_to_front(self) -> None:\n await self._channel.send(\"bringToFront\")\n\n async def add_init_script(\n self, script: str = None, path: Union[str, Path] = None\n ) -> None:\n if path:\n with open(path, \"r\") as file:\n script = file.read()\n if not isinstance(script, str):\n raise Error(\"Either path or script parameter must be specified\")\n await self._channel.send(\"addInitScript\", dict(source=script))\n\n async def route(self, url: URLMatch, handler: RouteHandler) -> None:\n self._routes.append(RouteHandlerEntry(URLMatcher(url), handler))\n if len(self._routes) == 1:\n await self._channel.send(\n \"setNetworkInterceptionEnabled\", dict(enabled=True)\n )\n\n async def unroute(\n self, url: URLMatch, handler: Optional[RouteHandler] = None\n ) -> None:\n self._routes = list(\n filter(\n lambda r: r.matcher.match != url or (handler and r.handler != handler),\n self._routes,\n )\n )\n if len(self._routes) == 0:\n await self._channel.send(\n \"setNetworkInterceptionEnabled\", dict(enabled=False)\n )\n\n async def screenshot(\n self,\n timeout: float = None,\n type: Literal[\"jpeg\", \"png\"] = None,\n path: Union[str, Path] = None,\n quality: int = None,\n omitBackground: bool = None,\n fullPage: bool = None,\n clip: FloatRect = None,\n ) -> bytes:\n params = locals_to_params(locals())\n if \"path\" in params:\n del params[\"path\"]\n encoded_binary = await self._channel.send(\"screenshot\", params)\n decoded_binary = base64.b64decode(encoded_binary)\n if path:\n make_dirs_for_file(path)\n with open(path, \"wb\") as fd:\n fd.write(decoded_binary)\n return decoded_binary\n\n async def title(self) -> str:\n return await self._main_frame.title()\n\n async def close(self, runBeforeUnload: bool = None) -> None:\n try:\n await self._channel.send(\"close\", locals_to_params(locals()))\n if self._owned_context:\n await self._owned_context.close()\n except Exception as e:\n if not is_safe_close_error(e):\n raise e\n\n def is_closed(self) -> bool:\n return self._is_closed\n\n async def click(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n delay: float = None,\n button: MouseButton = None,\n clickCount: int = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.click(**locals_to_params(locals()))\n\n async def dblclick(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n delay: float = None,\n button: MouseButton = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.dblclick(**locals_to_params(locals()))\n\n async def tap(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.tap(**locals_to_params(locals()))\n\n async def fill(\n self, selector: str, value: str, timeout: float = None, noWaitAfter: bool = None\n ) -> None:\n return await self._main_frame.fill(**locals_to_params(locals()))\n\n async def focus(self, selector: str, timeout: float = None) -> None:\n return await self._main_frame.focus(**locals_to_params(locals()))\n\n async def text_content(self, selector: str, timeout: float = None) -> Optional[str]:\n return await self._main_frame.text_content(**locals_to_params(locals()))\n\n async def inner_text(self, selector: str, timeout: float = None) -> str:\n return await self._main_frame.inner_text(**locals_to_params(locals()))\n\n async def inner_html(self, selector: str, timeout: float = None) -> str:\n return await self._main_frame.inner_html(**locals_to_params(locals()))\n\n async def get_attribute(\n self, selector: str, name: str, timeout: float = None\n ) -> Optional[str]:\n return await self._main_frame.get_attribute(**locals_to_params(locals()))\n\n async def hover(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n ) -> None:\n return await self._main_frame.hover(**locals_to_params(locals()))\n\n async def select_option(\n self,\n selector: str,\n value: Union[str, List[str]] = None,\n index: Union[int, List[int]] = None,\n label: Union[str, List[str]] = None,\n element: Union[\"ElementHandle\", List[\"ElementHandle\"]] = None,\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> List[str]:\n params = locals_to_params(locals())\n return await self._main_frame.select_option(**params)\n\n async def set_input_files(\n self,\n selector: str,\n files: Union[str, Path, FilePayload, List[Union[str, Path]], List[FilePayload]],\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.set_input_files(**locals_to_params(locals()))\n\n async def type(\n self,\n selector: str,\n text: str,\n delay: float = None,\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.type(**locals_to_params(locals()))\n\n async def press(\n self,\n selector: str,\n key: str,\n delay: float = None,\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.press(**locals_to_params(locals()))\n\n async def check(\n self,\n selector: str,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.check(**locals_to_params(locals()))\n\n async def uncheck(\n self,\n selector: str,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.uncheck(**locals_to_params(locals()))\n\n async def wait_for_timeout(self, timeout: float) -> None:\n await self._main_frame.wait_for_timeout(timeout)\n\n async def wait_for_function(\n self,\n expression: str,\n arg: Serializable = None,\n timeout: float = None,\n polling: Union[float, Literal[\"raf\"]] = None,\n ) -> JSHandle:\n return await self._main_frame.wait_for_function(**locals_to_params(locals()))\n\n @property\n def workers(self) -> List[\"Worker\"]:\n return self._workers.copy()\n\n async def pause(self) -> None:\n await self._browser_context._pause()\n\n async def pdf(\n self,\n scale: float = None,\n displayHeaderFooter: bool = None,\n headerTemplate: str = None,\n footerTemplate: str = None,\n printBackground: bool = None,\n landscape: bool = None,\n pageRanges: str = None,\n format: str = None,\n width: Union[str, float] = None,\n height: Union[str, float] = None,\n preferCSSPageSize: bool = None,\n margin: PdfMargins = None,\n path: Union[str, Path] = None,\n ) -> bytes:\n params = locals_to_params(locals())\n if \"path\" in params:\n del params[\"path\"]\n encoded_binary = await self._channel.send(\"pdf\", params)\n decoded_binary = base64.b64decode(encoded_binary)\n if path:\n make_dirs_for_file(path)\n with open(path, \"wb\") as fd:\n fd.write(decoded_binary)\n return decoded_binary\n\n @property\n def video(\n self,\n ) -> Optional[Video]:\n if \"recordVideo\" not in self._browser_context._options:\n return None\n if not self._video:\n self._video = Video(self)\n return self._video\n\n def expect_event(\n self,\n event: str,\n predicate: Callable = None,\n timeout: float = None,\n ) -> EventContextManagerImpl:\n if timeout is None:\n timeout = self._timeout_settings.timeout()\n wait_helper = WaitHelper(self, f\"page.expect_event({event})\")\n wait_helper.reject_on_timeout(\n timeout, f'Timeout while waiting for event \"{event}\"'\n )\n if event != Page.Events.Crash:\n wait_helper.reject_on_event(self, Page.Events.Crash, Error(\"Page crashed\"))\n if event != Page.Events.Close:\n wait_helper.reject_on_event(self, Page.Events.Close, Error(\"Page closed\"))\n wait_helper.wait_for_event(self, event, predicate)\n return EventContextManagerImpl(wait_helper.result())\n\n def expect_console_message(\n self,\n predicate: Callable[[ConsoleMessage], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[ConsoleMessage]:\n return self.expect_event(Page.Events.Console, predicate, timeout)\n\n def expect_download(\n self,\n predicate: Callable[[Download], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[Download]:\n return self.expect_event(Page.Events.Download, predicate, timeout)\n\n def expect_file_chooser(\n self,\n predicate: Callable[[FileChooser], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[FileChooser]:\n return self.expect_event(Page.Events.FileChooser, predicate, timeout)\n\n def expect_navigation(\n self,\n url: URLMatch = None,\n wait_until: DocumentLoadState = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[Response]:\n return self.main_frame.expect_navigation(url, wait_until, timeout)\n\n def expect_popup(\n self,\n predicate: Callable[[\"Page\"], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[\"Page\"]:\n return self.expect_event(Page.Events.Popup, predicate, timeout)\n\n def expect_request(\n self,\n url_or_predicate: URLMatchRequest,\n timeout: float = None,\n ) -> EventContextManagerImpl[Request]:\n matcher = None if callable(url_or_predicate) else URLMatcher(url_or_predicate)\n predicate = url_or_predicate if callable(url_or_predicate) else None\n\n def my_predicate(request: Request) -> bool:\n if matcher:\n return matcher.matches(request.url)\n if predicate:\n return url_or_predicate(request)\n return True\n\n return self.expect_event(\n Page.Events.Request, predicate=my_predicate, timeout=timeout\n )\n\n def expect_response(\n self,\n url_or_predicate: URLMatchResponse,\n timeout: float = None,\n ) -> EventContextManagerImpl[Response]:\n matcher = None if callable(url_or_predicate) else URLMatcher(url_or_predicate)\n predicate = url_or_predicate if callable(url_or_predicate) else None\n\n def my_predicate(response: Response) -> bool:\n if matcher:\n return matcher.matches(response.url)\n if predicate:\n return predicate(response)\n return True\n\n return self.expect_event(\n Page.Events.Response, predicate=my_predicate, timeout=timeout\n )\n\n def expect_worker(\n self,\n predicate: Callable[[\"Worker\"], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[\"Worker\"]:\n return self.expect_event(\"worker\", predicate, timeout)\n\n\nclass Worker(ChannelOwner):\n Events = SimpleNamespace(Close=\"close\")\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self._channel.on(\"close\", lambda _: self._on_close())\n self._page: Optional[Page] = None\n self._context: Optional[\"BrowserContext\"] = None\n\n def __repr__(self) -> str:\n return f\"<Worker url={self.url!r}>\"\n\n def _on_close(self) -> None:\n if self._page:\n self._page._workers.remove(self)\n if self._context:\n self._context._service_workers.remove(self)\n self.emit(Worker.Events.Close, self)\n\n @property\n def url(self) -> str:\n return self._initializer[\"url\"]\n\n async def evaluate(self, expression: str, arg: Serializable = None) -> Any:\n return parse_result(\n await self._channel.send(\n \"evaluateExpression\",\n dict(\n expression=expression,\n arg=serialize_argument(arg),\n ),\n )\n )\n\n async def evaluate_handle(\n self, expression: str, arg: Serializable = None\n ) -> JSHandle:\n return from_channel(\n await self._channel.send(\n \"evaluateExpressionHandle\",\n dict(\n expression=expression,\n arg=serialize_argument(arg),\n ),\n )\n )\n\n\nclass BindingCall(ChannelOwner):\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n\n async def call(self, func: Callable) -> None:\n try:\n frame = from_channel(self._initializer[\"frame\"])\n source = dict(context=frame._page.context, page=frame._page, frame=frame)\n if self._initializer.get(\"handle\"):\n result = func(source, from_channel(self._initializer[\"handle\"]))\n else:\n func_args = list(map(parse_result, self._initializer[\"args\"]))\n result = func(source, *func_args)\n if inspect.iscoroutine(result):\n result = await result\n await self._channel.send(\"resolve\", dict(result=serialize_argument(result)))\n except Exception as e:\n tb = sys.exc_info()[2]\n asyncio.create_task(\n self._channel.send(\n \"reject\", dict(error=dict(error=serialize_error(e, tb)))\n )\n )\n", "path": "playwright/_impl/_page.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport base64\nimport inspect\nimport sys\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast\n\nfrom playwright._impl._accessibility import Accessibility\nfrom playwright._impl._api_structures import (\n FilePayload,\n FloatRect,\n PdfMargins,\n Position,\n ViewportSize,\n)\nfrom playwright._impl._api_types import Error\nfrom playwright._impl._connection import (\n ChannelOwner,\n from_channel,\n from_nullable_channel,\n)\nfrom playwright._impl._console_message import ConsoleMessage\nfrom playwright._impl._download import Download\nfrom playwright._impl._element_handle import ElementHandle\nfrom playwright._impl._event_context_manager import EventContextManagerImpl\nfrom playwright._impl._file_chooser import FileChooser\nfrom playwright._impl._frame import Frame\nfrom playwright._impl._helper import (\n ColorScheme,\n DocumentLoadState,\n KeyboardModifier,\n MouseButton,\n RouteHandler,\n RouteHandlerEntry,\n TimeoutSettings,\n URLMatch,\n URLMatcher,\n URLMatchRequest,\n URLMatchResponse,\n is_safe_close_error,\n locals_to_params,\n make_dirs_for_file,\n parse_error,\n serialize_error,\n)\nfrom playwright._impl._input import Keyboard, Mouse, Touchscreen\nfrom playwright._impl._js_handle import (\n JSHandle,\n Serializable,\n parse_result,\n serialize_argument,\n)\nfrom playwright._impl._network import Request, Response, Route, serialize_headers\nfrom playwright._impl._video import Video\nfrom playwright._impl._wait_helper import WaitHelper\n\nif sys.version_info >= (3, 8): # pragma: no cover\n from typing import Literal\nelse: # pragma: no cover\n from typing_extensions import Literal\n\nif TYPE_CHECKING: # pragma: no cover\n from playwright._impl._browser_context import BrowserContext\n\n\nclass Page(ChannelOwner):\n\n Events = SimpleNamespace(\n Close=\"close\",\n Crash=\"crash\",\n Console=\"console\",\n Dialog=\"dialog\",\n Download=\"download\",\n FileChooser=\"filechooser\",\n DOMContentLoaded=\"domcontentloaded\",\n PageError=\"pageerror\",\n Request=\"request\",\n Response=\"response\",\n RequestFailed=\"requestfailed\",\n RequestFinished=\"requestfinished\",\n FrameAttached=\"frameattached\",\n FrameDetached=\"framedetached\",\n FrameNavigated=\"framenavigated\",\n Load=\"load\",\n Popup=\"popup\",\n WebSocket=\"websocket\",\n Worker=\"worker\",\n )\n accessibility: Accessibility\n keyboard: Keyboard\n mouse: Mouse\n touchscreen: Touchscreen\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.accessibility = Accessibility(self._channel)\n self.keyboard = Keyboard(self._channel)\n self.mouse = Mouse(self._channel)\n self.touchscreen = Touchscreen(self._channel)\n\n self._main_frame: Frame = from_channel(initializer[\"mainFrame\"])\n self._main_frame._page = self\n self._frames = [self._main_frame]\n self._viewport_size: Optional[ViewportSize] = initializer.get(\"viewportSize\")\n self._is_closed = False\n self._workers: List[\"Worker\"] = []\n self._bindings: Dict[str, Any] = {}\n self._routes: List[RouteHandlerEntry] = []\n self._owned_context: Optional[\"BrowserContext\"] = None\n self._timeout_settings: TimeoutSettings = TimeoutSettings(None)\n self._video: Optional[Video] = None\n self._opener = cast(\"Page\", from_nullable_channel(initializer.get(\"opener\")))\n\n self._channel.on(\n \"bindingCall\",\n lambda params: self._on_binding(from_channel(params[\"binding\"])),\n )\n self._channel.on(\"close\", lambda _: self._on_close())\n self._channel.on(\n \"console\",\n lambda params: self.emit(\n Page.Events.Console, from_channel(params[\"message\"])\n ),\n )\n self._channel.on(\"crash\", lambda _: self._on_crash())\n self._channel.on(\"dialog\", lambda params: self._on_dialog(params))\n self._channel.on(\n \"domcontentloaded\", lambda _: self.emit(Page.Events.DOMContentLoaded)\n )\n self._channel.on(\"download\", lambda params: self._on_download(params))\n self._channel.on(\n \"fileChooser\",\n lambda params: self.emit(\n Page.Events.FileChooser,\n FileChooser(\n self, from_channel(params[\"element\"]), params[\"isMultiple\"]\n ),\n ),\n )\n self._channel.on(\n \"frameAttached\",\n lambda params: self._on_frame_attached(from_channel(params[\"frame\"])),\n )\n self._channel.on(\n \"frameDetached\",\n lambda params: self._on_frame_detached(from_channel(params[\"frame\"])),\n )\n self._channel.on(\"load\", lambda _: self.emit(Page.Events.Load))\n self._channel.on(\n \"pageError\",\n lambda params: self.emit(\n Page.Events.PageError, parse_error(params[\"error\"][\"error\"])\n ),\n )\n self._channel.on(\n \"request\",\n lambda params: self.emit(\n Page.Events.Request, from_channel(params[\"request\"])\n ),\n )\n self._channel.on(\n \"requestFailed\",\n lambda params: self._on_request_failed(\n from_channel(params[\"request\"]),\n params[\"responseEndTiming\"],\n params[\"failureText\"],\n ),\n )\n self._channel.on(\n \"requestFinished\",\n lambda params: self._on_request_finished(\n from_channel(params[\"request\"]), params[\"responseEndTiming\"]\n ),\n )\n self._channel.on(\n \"response\",\n lambda params: self.emit(\n Page.Events.Response, from_channel(params[\"response\"])\n ),\n )\n self._channel.on(\n \"route\",\n lambda params: self._on_route(\n from_channel(params[\"route\"]), from_channel(params[\"request\"])\n ),\n )\n self._channel.on(\"video\", lambda params: self._on_video(params))\n self._channel.on(\n \"webSocket\",\n lambda params: self.emit(\n Page.Events.WebSocket, from_channel(params[\"webSocket\"])\n ),\n )\n self._channel.on(\n \"worker\", lambda params: self._on_worker(from_channel(params[\"worker\"]))\n )\n\n def __repr__(self) -> str:\n return f\"<Page url={self.url!r}>\"\n\n def _set_browser_context(self, context: \"BrowserContext\") -> None:\n self._browser_context = context\n self._timeout_settings = TimeoutSettings(context._timeout_settings)\n\n def _on_request_failed(\n self,\n request: Request,\n response_end_timing: float,\n failure_text: str = None,\n ) -> None:\n request._failure_text = failure_text\n if request._timing:\n request._timing[\"responseEnd\"] = response_end_timing\n self.emit(Page.Events.RequestFailed, request)\n\n def _on_request_finished(\n self, request: Request, response_end_timing: float\n ) -> None:\n if request._timing:\n request._timing[\"responseEnd\"] = response_end_timing\n self.emit(Page.Events.RequestFinished, request)\n\n def _on_frame_attached(self, frame: Frame) -> None:\n frame._page = self\n self._frames.append(frame)\n self.emit(Page.Events.FrameAttached, frame)\n\n def _on_frame_detached(self, frame: Frame) -> None:\n self._frames.remove(frame)\n frame._detached = True\n self.emit(Page.Events.FrameDetached, frame)\n\n def _on_route(self, route: Route, request: Request) -> None:\n for handler_entry in self._routes:\n if handler_entry.matcher.matches(request.url):\n result = cast(Any, handler_entry.handler)(route, request)\n if inspect.iscoroutine(result):\n asyncio.create_task(result)\n return\n self._browser_context._on_route(route, request)\n\n def _on_binding(self, binding_call: \"BindingCall\") -> None:\n func = self._bindings.get(binding_call._initializer[\"name\"])\n if func:\n asyncio.create_task(binding_call.call(func))\n self._browser_context._on_binding(binding_call)\n\n def _on_worker(self, worker: \"Worker\") -> None:\n self._workers.append(worker)\n worker._page = self\n self.emit(Page.Events.Worker, worker)\n\n def _on_close(self) -> None:\n self._is_closed = True\n self._browser_context._pages.remove(self)\n self.emit(Page.Events.Close)\n\n def _on_crash(self) -> None:\n self.emit(Page.Events.Crash)\n\n def _on_dialog(self, params: Any) -> None:\n dialog = from_channel(params[\"dialog\"])\n if self.listeners(Page.Events.Dialog):\n self.emit(Page.Events.Dialog, dialog)\n else:\n asyncio.create_task(dialog.dismiss())\n\n def _on_download(self, params: Any) -> None:\n url = params[\"url\"]\n suggested_filename = params[\"suggestedFilename\"]\n artifact = from_channel(params[\"artifact\"])\n self.emit(\n Page.Events.Download, Download(self, url, suggested_filename, artifact)\n )\n\n def _on_video(self, params: Any) -> None:\n artifact = from_channel(params[\"artifact\"])\n cast(Video, self.video)._artifact_ready(artifact)\n\n def _add_event_handler(self, event: str, k: Any, v: Any) -> None:\n if event == Page.Events.FileChooser and len(self.listeners(event)) == 0:\n self._channel.send_no_reply(\n \"setFileChooserInterceptedNoReply\", {\"intercepted\": True}\n )\n super()._add_event_handler(event, k, v)\n\n def remove_listener(self, event: str, f: Any) -> None:\n super().remove_listener(event, f)\n if event == Page.Events.FileChooser and len(self.listeners(event)) == 0:\n self._channel.send_no_reply(\n \"setFileChooserInterceptedNoReply\", {\"intercepted\": False}\n )\n\n @property\n def context(self) -> \"BrowserContext\":\n return self._browser_context\n\n async def opener(self) -> Optional[\"Page\"]:\n if self._opener and self._opener.is_closed():\n return None\n return self._opener\n\n @property\n def main_frame(self) -> Frame:\n return self._main_frame\n\n def frame(self, name: str = None, url: URLMatch = None) -> Optional[Frame]:\n matcher = URLMatcher(url) if url else None\n for frame in self._frames:\n if name and frame.name == name:\n return frame\n if url and matcher and matcher.matches(frame.url):\n return frame\n return None\n\n @property\n def frames(self) -> List[Frame]:\n return self._frames.copy()\n\n def set_default_navigation_timeout(self, timeout: float) -> None:\n self._timeout_settings.set_navigation_timeout(timeout)\n self._channel.send_no_reply(\n \"setDefaultNavigationTimeoutNoReply\", dict(timeout=timeout)\n )\n\n def set_default_timeout(self, timeout: float) -> None:\n self._timeout_settings.set_timeout(timeout)\n self._channel.send_no_reply(\"setDefaultTimeoutNoReply\", dict(timeout=timeout))\n\n async def query_selector(self, selector: str) -> Optional[ElementHandle]:\n return await self._main_frame.query_selector(selector)\n\n async def query_selector_all(self, selector: str) -> List[ElementHandle]:\n return await self._main_frame.query_selector_all(selector)\n\n async def wait_for_selector(\n self,\n selector: str,\n timeout: float = None,\n state: Literal[\"attached\", \"detached\", \"hidden\", \"visible\"] = None,\n ) -> Optional[ElementHandle]:\n return await self._main_frame.wait_for_selector(**locals_to_params(locals()))\n\n async def is_checked(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_checked(**locals_to_params(locals()))\n\n async def is_disabled(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_disabled(**locals_to_params(locals()))\n\n async def is_editable(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_editable(**locals_to_params(locals()))\n\n async def is_enabled(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_enabled(**locals_to_params(locals()))\n\n async def is_hidden(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_hidden(**locals_to_params(locals()))\n\n async def is_visible(self, selector: str, timeout: float = None) -> bool:\n return await self._main_frame.is_visible(**locals_to_params(locals()))\n\n async def dispatch_event(\n self, selector: str, type: str, eventInit: Dict = None, timeout: float = None\n ) -> None:\n return await self._main_frame.dispatch_event(**locals_to_params(locals()))\n\n async def evaluate(self, expression: str, arg: Serializable = None) -> Any:\n return await self._main_frame.evaluate(expression, arg)\n\n async def evaluate_handle(\n self, expression: str, arg: Serializable = None\n ) -> JSHandle:\n return await self._main_frame.evaluate_handle(expression, arg)\n\n async def eval_on_selector(\n self,\n selector: str,\n expression: str,\n arg: Serializable = None,\n ) -> Any:\n return await self._main_frame.eval_on_selector(selector, expression, arg)\n\n async def eval_on_selector_all(\n self,\n selector: str,\n expression: str,\n arg: Serializable = None,\n ) -> Any:\n return await self._main_frame.eval_on_selector_all(selector, expression, arg)\n\n async def add_script_tag(\n self,\n url: str = None,\n path: Union[str, Path] = None,\n content: str = None,\n type: str = None,\n ) -> ElementHandle:\n return await self._main_frame.add_script_tag(**locals_to_params(locals()))\n\n async def add_style_tag(\n self, url: str = None, path: Union[str, Path] = None, content: str = None\n ) -> ElementHandle:\n return await self._main_frame.add_style_tag(**locals_to_params(locals()))\n\n async def expose_function(self, name: str, callback: Callable) -> None:\n await self.expose_binding(name, lambda source, *args: callback(*args))\n\n async def expose_binding(\n self, name: str, callback: Callable, handle: bool = None\n ) -> None:\n if name in self._bindings:\n raise Error(f'Function \"{name}\" has been already registered')\n if name in self._browser_context._bindings:\n raise Error(\n f'Function \"{name}\" has been already registered in the browser context'\n )\n self._bindings[name] = callback\n await self._channel.send(\n \"exposeBinding\", dict(name=name, needsHandle=handle or False)\n )\n\n async def set_extra_http_headers(self, headers: Dict[str, str]) -> None:\n await self._channel.send(\n \"setExtraHTTPHeaders\", dict(headers=serialize_headers(headers))\n )\n\n @property\n def url(self) -> str:\n return self._main_frame.url\n\n async def content(self) -> str:\n return await self._main_frame.content()\n\n async def set_content(\n self,\n html: str,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> None:\n return await self._main_frame.set_content(**locals_to_params(locals()))\n\n async def goto(\n self,\n url: str,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n referer: str = None,\n ) -> Optional[Response]:\n return await self._main_frame.goto(**locals_to_params(locals()))\n\n async def reload(\n self,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> Optional[Response]:\n return from_nullable_channel(\n await self._channel.send(\"reload\", locals_to_params(locals()))\n )\n\n async def wait_for_load_state(\n self, state: DocumentLoadState = None, timeout: float = None\n ) -> None:\n return await self._main_frame.wait_for_load_state(**locals_to_params(locals()))\n\n async def wait_for_url(\n self,\n url: URLMatch,\n wait_until: DocumentLoadState = None,\n timeout: float = None,\n ) -> None:\n return await self._main_frame.wait_for_url(**locals_to_params(locals()))\n\n async def wait_for_event(\n self, event: str, predicate: Callable = None, timeout: float = None\n ) -> Any:\n async with self.expect_event(event, predicate, timeout) as event_info:\n pass\n return await event_info\n\n async def go_back(\n self,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> Optional[Response]:\n return from_nullable_channel(\n await self._channel.send(\"goBack\", locals_to_params(locals()))\n )\n\n async def go_forward(\n self,\n timeout: float = None,\n waitUntil: DocumentLoadState = None,\n ) -> Optional[Response]:\n return from_nullable_channel(\n await self._channel.send(\"goForward\", locals_to_params(locals()))\n )\n\n async def emulate_media(\n self,\n media: Literal[\"print\", \"screen\"] = None,\n colorScheme: ColorScheme = None,\n ) -> None:\n await self._channel.send(\"emulateMedia\", locals_to_params(locals()))\n\n async def set_viewport_size(self, viewportSize: ViewportSize) -> None:\n self._viewport_size = viewportSize\n await self._channel.send(\"setViewportSize\", locals_to_params(locals()))\n\n @property\n def viewport_size(self) -> Optional[ViewportSize]:\n return self._viewport_size\n\n async def bring_to_front(self) -> None:\n await self._channel.send(\"bringToFront\")\n\n async def add_init_script(\n self, script: str = None, path: Union[str, Path] = None\n ) -> None:\n if path:\n with open(path, \"r\") as file:\n script = file.read()\n if not isinstance(script, str):\n raise Error(\"Either path or script parameter must be specified\")\n await self._channel.send(\"addInitScript\", dict(source=script))\n\n async def route(self, url: URLMatch, handler: RouteHandler) -> None:\n self._routes.append(RouteHandlerEntry(URLMatcher(url), handler))\n if len(self._routes) == 1:\n await self._channel.send(\n \"setNetworkInterceptionEnabled\", dict(enabled=True)\n )\n\n async def unroute(\n self, url: URLMatch, handler: Optional[RouteHandler] = None\n ) -> None:\n self._routes = list(\n filter(\n lambda r: r.matcher.match != url or (handler and r.handler != handler),\n self._routes,\n )\n )\n if len(self._routes) == 0:\n await self._channel.send(\n \"setNetworkInterceptionEnabled\", dict(enabled=False)\n )\n\n async def screenshot(\n self,\n timeout: float = None,\n type: Literal[\"jpeg\", \"png\"] = None,\n path: Union[str, Path] = None,\n quality: int = None,\n omitBackground: bool = None,\n fullPage: bool = None,\n clip: FloatRect = None,\n ) -> bytes:\n params = locals_to_params(locals())\n if \"path\" in params:\n del params[\"path\"]\n encoded_binary = await self._channel.send(\"screenshot\", params)\n decoded_binary = base64.b64decode(encoded_binary)\n if path:\n make_dirs_for_file(path)\n with open(path, \"wb\") as fd:\n fd.write(decoded_binary)\n return decoded_binary\n\n async def title(self) -> str:\n return await self._main_frame.title()\n\n async def close(self, runBeforeUnload: bool = None) -> None:\n try:\n await self._channel.send(\"close\", locals_to_params(locals()))\n if self._owned_context:\n await self._owned_context.close()\n except Exception as e:\n if not is_safe_close_error(e):\n raise e\n\n def is_closed(self) -> bool:\n return self._is_closed\n\n async def click(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n delay: float = None,\n button: MouseButton = None,\n clickCount: int = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.click(**locals_to_params(locals()))\n\n async def dblclick(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n delay: float = None,\n button: MouseButton = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.dblclick(**locals_to_params(locals()))\n\n async def tap(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.tap(**locals_to_params(locals()))\n\n async def fill(\n self, selector: str, value: str, timeout: float = None, noWaitAfter: bool = None\n ) -> None:\n return await self._main_frame.fill(**locals_to_params(locals()))\n\n async def focus(self, selector: str, timeout: float = None) -> None:\n return await self._main_frame.focus(**locals_to_params(locals()))\n\n async def text_content(self, selector: str, timeout: float = None) -> Optional[str]:\n return await self._main_frame.text_content(**locals_to_params(locals()))\n\n async def inner_text(self, selector: str, timeout: float = None) -> str:\n return await self._main_frame.inner_text(**locals_to_params(locals()))\n\n async def inner_html(self, selector: str, timeout: float = None) -> str:\n return await self._main_frame.inner_html(**locals_to_params(locals()))\n\n async def get_attribute(\n self, selector: str, name: str, timeout: float = None\n ) -> Optional[str]:\n return await self._main_frame.get_attribute(**locals_to_params(locals()))\n\n async def hover(\n self,\n selector: str,\n modifiers: List[KeyboardModifier] = None,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n ) -> None:\n return await self._main_frame.hover(**locals_to_params(locals()))\n\n async def select_option(\n self,\n selector: str,\n value: Union[str, List[str]] = None,\n index: Union[int, List[int]] = None,\n label: Union[str, List[str]] = None,\n element: Union[\"ElementHandle\", List[\"ElementHandle\"]] = None,\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> List[str]:\n params = locals_to_params(locals())\n return await self._main_frame.select_option(**params)\n\n async def set_input_files(\n self,\n selector: str,\n files: Union[str, Path, FilePayload, List[Union[str, Path]], List[FilePayload]],\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.set_input_files(**locals_to_params(locals()))\n\n async def type(\n self,\n selector: str,\n text: str,\n delay: float = None,\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.type(**locals_to_params(locals()))\n\n async def press(\n self,\n selector: str,\n key: str,\n delay: float = None,\n timeout: float = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.press(**locals_to_params(locals()))\n\n async def check(\n self,\n selector: str,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.check(**locals_to_params(locals()))\n\n async def uncheck(\n self,\n selector: str,\n position: Position = None,\n timeout: float = None,\n force: bool = None,\n noWaitAfter: bool = None,\n ) -> None:\n return await self._main_frame.uncheck(**locals_to_params(locals()))\n\n async def wait_for_timeout(self, timeout: float) -> None:\n await self._main_frame.wait_for_timeout(timeout)\n\n async def wait_for_function(\n self,\n expression: str,\n arg: Serializable = None,\n timeout: float = None,\n polling: Union[float, Literal[\"raf\"]] = None,\n ) -> JSHandle:\n return await self._main_frame.wait_for_function(**locals_to_params(locals()))\n\n @property\n def workers(self) -> List[\"Worker\"]:\n return self._workers.copy()\n\n async def pause(self) -> None:\n await self._browser_context._pause()\n\n async def pdf(\n self,\n scale: float = None,\n displayHeaderFooter: bool = None,\n headerTemplate: str = None,\n footerTemplate: str = None,\n printBackground: bool = None,\n landscape: bool = None,\n pageRanges: str = None,\n format: str = None,\n width: Union[str, float] = None,\n height: Union[str, float] = None,\n preferCSSPageSize: bool = None,\n margin: PdfMargins = None,\n path: Union[str, Path] = None,\n ) -> bytes:\n params = locals_to_params(locals())\n if \"path\" in params:\n del params[\"path\"]\n encoded_binary = await self._channel.send(\"pdf\", params)\n decoded_binary = base64.b64decode(encoded_binary)\n if path:\n make_dirs_for_file(path)\n with open(path, \"wb\") as fd:\n fd.write(decoded_binary)\n return decoded_binary\n\n @property\n def video(\n self,\n ) -> Optional[Video]:\n if not self._video:\n self._video = Video(self)\n return self._video\n\n def expect_event(\n self,\n event: str,\n predicate: Callable = None,\n timeout: float = None,\n ) -> EventContextManagerImpl:\n if timeout is None:\n timeout = self._timeout_settings.timeout()\n wait_helper = WaitHelper(self, f\"page.expect_event({event})\")\n wait_helper.reject_on_timeout(\n timeout, f'Timeout while waiting for event \"{event}\"'\n )\n if event != Page.Events.Crash:\n wait_helper.reject_on_event(self, Page.Events.Crash, Error(\"Page crashed\"))\n if event != Page.Events.Close:\n wait_helper.reject_on_event(self, Page.Events.Close, Error(\"Page closed\"))\n wait_helper.wait_for_event(self, event, predicate)\n return EventContextManagerImpl(wait_helper.result())\n\n def expect_console_message(\n self,\n predicate: Callable[[ConsoleMessage], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[ConsoleMessage]:\n return self.expect_event(Page.Events.Console, predicate, timeout)\n\n def expect_download(\n self,\n predicate: Callable[[Download], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[Download]:\n return self.expect_event(Page.Events.Download, predicate, timeout)\n\n def expect_file_chooser(\n self,\n predicate: Callable[[FileChooser], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[FileChooser]:\n return self.expect_event(Page.Events.FileChooser, predicate, timeout)\n\n def expect_navigation(\n self,\n url: URLMatch = None,\n wait_until: DocumentLoadState = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[Response]:\n return self.main_frame.expect_navigation(url, wait_until, timeout)\n\n def expect_popup(\n self,\n predicate: Callable[[\"Page\"], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[\"Page\"]:\n return self.expect_event(Page.Events.Popup, predicate, timeout)\n\n def expect_request(\n self,\n url_or_predicate: URLMatchRequest,\n timeout: float = None,\n ) -> EventContextManagerImpl[Request]:\n matcher = None if callable(url_or_predicate) else URLMatcher(url_or_predicate)\n predicate = url_or_predicate if callable(url_or_predicate) else None\n\n def my_predicate(request: Request) -> bool:\n if matcher:\n return matcher.matches(request.url)\n if predicate:\n return url_or_predicate(request)\n return True\n\n return self.expect_event(\n Page.Events.Request, predicate=my_predicate, timeout=timeout\n )\n\n def expect_response(\n self,\n url_or_predicate: URLMatchResponse,\n timeout: float = None,\n ) -> EventContextManagerImpl[Response]:\n matcher = None if callable(url_or_predicate) else URLMatcher(url_or_predicate)\n predicate = url_or_predicate if callable(url_or_predicate) else None\n\n def my_predicate(response: Response) -> bool:\n if matcher:\n return matcher.matches(response.url)\n if predicate:\n return predicate(response)\n return True\n\n return self.expect_event(\n Page.Events.Response, predicate=my_predicate, timeout=timeout\n )\n\n def expect_worker(\n self,\n predicate: Callable[[\"Worker\"], bool] = None,\n timeout: float = None,\n ) -> EventContextManagerImpl[\"Worker\"]:\n return self.expect_event(\"worker\", predicate, timeout)\n\n\nclass Worker(ChannelOwner):\n Events = SimpleNamespace(Close=\"close\")\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self._channel.on(\"close\", lambda _: self._on_close())\n self._page: Optional[Page] = None\n self._context: Optional[\"BrowserContext\"] = None\n\n def __repr__(self) -> str:\n return f\"<Worker url={self.url!r}>\"\n\n def _on_close(self) -> None:\n if self._page:\n self._page._workers.remove(self)\n if self._context:\n self._context._service_workers.remove(self)\n self.emit(Worker.Events.Close, self)\n\n @property\n def url(self) -> str:\n return self._initializer[\"url\"]\n\n async def evaluate(self, expression: str, arg: Serializable = None) -> Any:\n return parse_result(\n await self._channel.send(\n \"evaluateExpression\",\n dict(\n expression=expression,\n arg=serialize_argument(arg),\n ),\n )\n )\n\n async def evaluate_handle(\n self, expression: str, arg: Serializable = None\n ) -> JSHandle:\n return from_channel(\n await self._channel.send(\n \"evaluateExpressionHandle\",\n dict(\n expression=expression,\n arg=serialize_argument(arg),\n ),\n )\n )\n\n\nclass BindingCall(ChannelOwner):\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n\n async def call(self, func: Callable) -> None:\n try:\n frame = from_channel(self._initializer[\"frame\"])\n source = dict(context=frame._page.context, page=frame._page, frame=frame)\n if self._initializer.get(\"handle\"):\n result = func(source, from_channel(self._initializer[\"handle\"]))\n else:\n func_args = list(map(parse_result, self._initializer[\"args\"]))\n result = func(source, *func_args)\n if inspect.iscoroutine(result):\n result = await result\n await self._channel.send(\"resolve\", dict(result=serialize_argument(result)))\n except Exception as e:\n tb = sys.exc_info()[2]\n asyncio.create_task(\n self._channel.send(\n \"reject\", dict(error=dict(error=serialize_error(e, tb)))\n )\n )\n", "path": "playwright/_impl/_page.py" } ]
diff --git a/playwright/_impl/_page.py b/playwright/_impl/_page.py index 340a473dc..5c71a692c 100644 --- a/playwright/_impl/_page.py +++ b/playwright/_impl/_page.py @@ -779,8 +779,6 @@ async def pdf( def video( self, ) -> Optional[Video]: - if "recordVideo" not in self._browser_context._options: - return None if not self._video: self._video = Video(self) return self._video diff --git a/tests/async/test_interception.py b/tests/async/test_interception.py index fa1667cf2..9304bc544 100644 --- a/tests/async/test_interception.py +++ b/tests/async/test_interception.py @@ -580,8 +580,6 @@ async def handle_route(route, request): assert "failed" in exc.value.message -# RELEASE BLOCKER: Temporary upstream issue https://github.com/microsoft/playwright-python/issues/608 [email protected]_browser("chromium") async def test_page_route_should_support_cors_with_POST(page, server): await page.goto(server.EMPTY_PAGE) await page.route( @@ -609,8 +607,6 @@ async def test_page_route_should_support_cors_with_POST(page, server): assert resp == ["electric", "gas"] -# RELEASE BLOCKER: Temporary upstream issue https://github.com/microsoft/playwright-python/issues/608 [email protected]_browser("chromium") async def test_page_route_should_support_cors_for_different_methods(page, server): await page.goto(server.EMPTY_PAGE) await page.route( diff --git a/tests/async/test_video.py b/tests/async/test_video.py index 7d8a900e9..d55e07135 100644 --- a/tests/async/test_video.py +++ b/tests/async/test_video.py @@ -14,10 +14,6 @@ import os -import pytest - -from playwright.async_api import Error - async def test_should_expose_video_path(browser, tmpdir, server): page = await browser.new_page(record_video_dir=tmpdir) @@ -36,10 +32,8 @@ async def test_short_video_should_throw(browser, tmpdir, server): assert os.path.exists(path) -# RELEASE BLOCKER: Temporary upstream issue https://github.com/microsoft/playwright-python/issues/608 [email protected]() async def test_short_video_should_throw_persistent_context( - browser_type, tmpdir, launch_arguments + browser_type, tmpdir, launch_arguments, server ): context = await browser_type.launch_persistent_context( str(tmpdir), @@ -48,7 +42,8 @@ async def test_short_video_should_throw_persistent_context( record_video_dir=str(tmpdir) + "1", ) page = context.pages[0] + await page.goto(server.PREFIX + "/grid.html") await context.close() - with pytest.raises(Error) as exc_info: - await page.video.path() - assert "Page closed" in exc_info.value.message + + path = await page.video.path() + assert str(tmpdir) in str(path) diff --git a/tests/sync/test_video.py b/tests/sync/test_video.py index 2d66ae428..b1bbfa348 100644 --- a/tests/sync/test_video.py +++ b/tests/sync/test_video.py @@ -14,8 +14,6 @@ import os -import pytest - def test_should_expose_video_path(browser, tmpdir, server): page = browser.new_page( @@ -46,8 +44,6 @@ def test_record_video_to_path(browser, tmpdir, server): assert os.path.exists(path) -# RELEASE BLOCKER: Temporary upstream issue https://github.com/microsoft/playwright-python/issues/608 [email protected]() def test_record_video_to_path_persistent( browser_type, tmpdir, server, launch_arguments ): @@ -60,3 +56,16 @@ def test_record_video_to_path_persistent( assert str(tmpdir) in str(path) context.close() assert os.path.exists(path) + + +def test_record_video_can_get_video_path_immediately( + browser_type, tmpdir, launch_arguments +): + context = browser_type.launch_persistent_context( + tmpdir, **launch_arguments, record_video_dir=tmpdir + ) + page = context.pages[0] + path = page.video.path() + assert str(tmpdir) in str(path) + context.close() + assert os.path.exists(path)
dotkom__onlineweb4-1220
Tags with a '.' will crash Ref. http://moonshine.online.ntnu.no/article/10/online-far-ny-nettside
[ { "content": "from django.contrib import admin\nfrom apps.article.models import Article, Tag, ArticleTag\nfrom django.conf import settings\nfrom filebrowser.settings import VERSIONS, ADMIN_THUMBNAIL\n\n\nclass ArticleTagAdmin(admin.ModelAdmin):\n model = ArticleTag\n\n\nclass ArticleTagInline(admin.TabularInline):\n model = ArticleTag\n max_num = 99\n extra = 0\n\n\nclass TagAdmin(admin.ModelAdmin):\n def save_model(self, request, obj, form, change):\n obj.changed_by = request.user\n if not change:\n obj.created_by = request.user\n obj.save()\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n inlines = (ArticleTagInline,)\n list_display = (\"heading\", \"created_by\", \"changed_by\")\n\n # set the created and changed by fields\n def save_model(self, request, obj, form, change):\n if (obj.image):\n obj.image.version_generate(ADMIN_THUMBNAIL).url\n\n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n obj.image.version_generate(ver).url\n\n obj.changed_by = request.user\n\n if not change:\n obj.created_by = request.user\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instances in instances:\n instances.save()\n\nadmin.site.register(Article, ArticleAdmin)\nadmin.site.register(Tag, TagAdmin)\n", "path": "apps/article/admin.py" } ]
[ { "content": "from django.contrib import admin\nfrom apps.article.models import Article, Tag, ArticleTag\nfrom django.conf import settings\nfrom filebrowser.settings import VERSIONS, ADMIN_THUMBNAIL\n\n\nclass ArticleTagAdmin(admin.ModelAdmin):\n model = ArticleTag\n\n\nclass ArticleTagInline(admin.TabularInline):\n model = ArticleTag\n max_num = 99\n extra = 0\n\n\nclass TagAdmin(admin.ModelAdmin):\n def save_model(self, request, obj, form, change):\n obj.changed_by = request.user\n if not change:\n obj.name = obj.name.replace('.', '')\n obj.created_by = request.user\n obj.save()\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n inlines = (ArticleTagInline,)\n list_display = (\"heading\", \"created_by\", \"changed_by\")\n\n # set the created and changed by fields\n def save_model(self, request, obj, form, change):\n if (obj.image):\n obj.image.version_generate(ADMIN_THUMBNAIL).url\n\n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n obj.image.version_generate(ver).url\n\n obj.changed_by = request.user\n\n if not change:\n obj.created_by = request.user\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instances in instances:\n instances.save()\n\nadmin.site.register(Article, ArticleAdmin)\nadmin.site.register(Tag, TagAdmin)\n", "path": "apps/article/admin.py" } ]
diff --git a/apps/article/admin.py b/apps/article/admin.py index ea7facf14..205fab1f4 100755 --- a/apps/article/admin.py +++ b/apps/article/admin.py @@ -18,6 +18,7 @@ class TagAdmin(admin.ModelAdmin): def save_model(self, request, obj, form, change): obj.changed_by = request.user if not change: + obj.name = obj.name.replace('.', '') obj.created_by = request.user obj.save()
gammapy__gammapy-5146
Incorrect return type for `Geom.energy_mask` `Geom.energy_mask` returns a `Map` wit the same geometry as the parent `Geom`. This is incorrect in the currentl documentation which states the return type is `np.array`: https://github.com/gammapy/gammapy/blob/e5aecb334e7aebe304affd96b545a636019f7626/gammapy/maps/geom.py#L628
[ { "content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport copy\nimport html\nimport inspect\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom .io import find_bands_hdu, find_hdu\nfrom .utils import INVALID_INDEX\n\n__all__ = [\"Geom\"]\n\nlog = logging.getLogger(__name__)\n\n\ndef get_shape(param):\n if param is None:\n return tuple()\n\n if not isinstance(param, tuple):\n param = [param]\n\n return max([np.array(p, ndmin=1).shape for p in param])\n\n\ndef pix_tuple_to_idx(pix):\n \"\"\"Convert a tuple of pixel coordinate arrays to a tuple of pixel indices.\n\n Pixel coordinates are rounded to the closest integer value.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates with one element for each dimension.\n\n Returns\n -------\n idx : `~numpy.ndarray`\n Array of pixel indices.\n \"\"\"\n idx = []\n for p in pix:\n p = np.array(p, ndmin=1)\n if np.issubdtype(p.dtype, np.integer):\n idx += [p]\n else:\n with np.errstate(invalid=\"ignore\"):\n p_idx = np.rint(p).astype(int)\n p_idx[~np.isfinite(p)] = INVALID_INDEX.int\n idx += [p_idx]\n\n return tuple(idx)\n\n\nclass Geom(abc.ABC):\n \"\"\"Map geometry base class.\n\n See also: `~gammapy.maps.WcsGeom` and `~gammapy.maps.HpxGeom`.\n \"\"\"\n\n # workaround for the lru_cache pickle issue\n # see e.g. https://github.com/cloudpipe/cloudpickle/issues/178\n def __getstate__(self):\n state = self.__dict__.copy()\n for key, value in state.items():\n func = getattr(value, \"__wrapped__\", None)\n if func is not None:\n state[key] = func\n return state\n\n def _repr_html_(self):\n try:\n return self.to_html()\n except AttributeError:\n return f\"<pre>{html.escape(str(self))}</pre>\"\n\n @property\n @abc.abstractmethod\n def data_shape(self):\n \"\"\"Shape of the Numpy data array matching this geometry.\"\"\"\n pass\n\n def data_nbytes(self, dtype=\"float32\"):\n \"\"\"Estimate memory usage in megabytes of the Numpy data array\n matching this geometry depending on the given type.\n\n Parameters\n ----------\n dtype : str, optional\n The desired data-type for the array. Default is \"float32\".\n\n Returns\n -------\n memory : `~astropy.units.Quantity`\n Estimated memory usage in megabytes (MB).\n \"\"\"\n return (np.empty(self.data_shape, dtype).nbytes * u.byte).to(\"MB\")\n\n @property\n @abc.abstractmethod\n def is_allsky(self):\n pass\n\n @property\n @abc.abstractmethod\n def center_coord(self):\n pass\n\n @property\n @abc.abstractmethod\n def center_pix(self):\n pass\n\n @property\n @abc.abstractmethod\n def center_skydir(self):\n pass\n\n @classmethod\n def from_hdulist(cls, hdulist, hdu=None, hdu_bands=None):\n \"\"\"Load a geometry object from a FITS HDUList.\n\n Parameters\n ----------\n hdulist : `~astropy.io.fits.HDUList`\n HDU list containing HDUs for map data and bands.\n hdu : str or int, optional\n Name or index of the HDU with the map data. Default is None.\n hdu_bands : str, optional\n Name or index of the HDU with the BANDS table. If not\n defined this will be inferred from the FITS header of the\n map HDU. Default is None.\n\n Returns\n -------\n geom : `~Geom`\n Geometry object.\n \"\"\"\n if hdu is None:\n hdu = find_hdu(hdulist)\n else:\n hdu = hdulist[hdu]\n\n if hdu_bands is None:\n hdu_bands = find_bands_hdu(hdulist, hdu)\n\n if hdu_bands is not None:\n hdu_bands = hdulist[hdu_bands]\n\n return cls.from_header(hdu.header, hdu_bands)\n\n def to_bands_hdu(self, hdu_bands=None, format=\"gadf\"):\n table_hdu = self.axes.to_table_hdu(format=format, hdu_bands=hdu_bands)\n cols = table_hdu.columns.columns\n cols.extend(self._make_bands_cols())\n return fits.BinTableHDU.from_columns(\n cols, header=table_hdu.header, name=table_hdu.name\n )\n\n @abc.abstractmethod\n def _make_bands_cols(self):\n pass\n\n @abc.abstractmethod\n def get_idx(self, idx=None, local=False, flat=False):\n \"\"\"Get tuple of pixel indices for this geometry.\n\n Returns all pixels in the geometry by default. Pixel indices\n for a single image plane can be accessed by setting ``idx``\n to the index tuple of a plane.\n\n Parameters\n ----------\n idx : tuple, optional\n A tuple of indices with one index for each non-spatial\n dimension. If defined only pixels for the image plane with\n this index will be returned. If none then all pixels\n will be returned. Default is None.\n local : bool, optional\n Flag to return local or global pixel indices. Local\n indices run from 0 to the number of pixels in a given\n image plane. Default is False.\n flat : bool, optional\n Return a flattened array containing only indices for\n pixels contained in the geometry. Default is False.\n\n Returns\n -------\n idx : tuple\n Tuple of pixel index vectors with one vector for each\n dimension.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_coord(self, idx=None, flat=False):\n \"\"\"Get the coordinate array for this geometry.\n\n Returns a coordinate array with the same shape as the data\n array. Pixels outside the geometry are set to NaN.\n Coordinates for a single image plane can be accessed by\n setting ``idx`` to the index tuple of a plane.\n\n Parameters\n ----------\n idx : tuple, optional\n A tuple of indices with one index for each non-spatial\n dimension. If defined only coordinates for the image\n plane with this index will be returned. If none then\n coordinates for all pixels will be returned. Default is None.\n flat : bool, optional\n Return a flattened array containing only coordinates for\n pixels contained in the geometry. Default is False.\n\n Returns\n -------\n coords : tuple\n Tuple of coordinate vectors with one vector for each\n dimension.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def coord_to_pix(self, coords):\n \"\"\"Convert map coordinates to pixel coordinates.\n\n Parameters\n ----------\n coords : tuple\n Coordinate values in each dimension of the map. This can\n either be a tuple of numpy arrays or a MapCoord object.\n If passed as a tuple then the ordering should be\n (longitude, latitude, c_0, ..., c_N) where c_i is the\n coordinate vector for axis i.\n\n Returns\n -------\n pix : tuple\n Tuple of pixel coordinates in image and band dimensions.\n \"\"\"\n pass\n\n def coord_to_idx(self, coords, clip=False):\n \"\"\"Convert map coordinates to pixel indices.\n\n Parameters\n ----------\n coords : tuple or `~MapCoord`\n Coordinate values in each dimension of the map. This can\n either be a tuple of numpy arrays or a MapCoord object.\n If passed as a tuple then the ordering should be\n (longitude, latitude, c_0, ..., c_N) where c_i is the\n coordinate vector for axis i.\n clip : bool\n Choose whether to clip indices to the valid range of the\n geometry. If False then indices for coordinates outside\n the geometry range will be set -1. Default is False.\n\n Returns\n -------\n pix : tuple\n Tuple of pixel indices in image and band dimensions.\n Elements set to -1 correspond to coordinates outside the\n map.\n \"\"\"\n pix = self.coord_to_pix(coords)\n return self.pix_to_idx(pix, clip=clip)\n\n @abc.abstractmethod\n def pix_to_coord(self, pix):\n \"\"\"Convert pixel coordinates to map coordinates.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates.\n\n Returns\n -------\n coords : tuple\n Tuple of map coordinates.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def pix_to_idx(self, pix, clip=False):\n \"\"\"Convert pixel coordinates to pixel indices.\n\n Returns -1 for pixel coordinates that lie outside the map.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates.\n clip : bool\n Choose whether to clip indices to the valid range of the\n geometry. If False then indices for coordinates outside\n the geometry range will be set -1. Default is False.\n\n Returns\n -------\n idx : tuple\n Tuple of pixel indices.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def contains(self, coords):\n \"\"\"Check if a given map coordinate is contained in the geometry.\n\n Parameters\n ----------\n coords : tuple or `~gammapy.maps.MapCoord`\n Tuple of map coordinates.\n\n Returns\n -------\n containment : `~numpy.ndarray`\n Bool array.\n \"\"\"\n pass\n\n def contains_pix(self, pix):\n \"\"\"Check if a given pixel coordinate is contained in the geometry.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates.\n\n Returns\n -------\n containment : `~numpy.ndarray`\n Bool array.\n \"\"\"\n idx = self.pix_to_idx(pix)\n return np.all(np.stack([t != INVALID_INDEX.int for t in idx]), axis=0)\n\n def slice_by_idx(self, slices):\n \"\"\"Create a new geometry by slicing the non-spatial axes.\n\n Parameters\n ----------\n slices : dict\n Dictionary of axes names and integers or `slice` object pairs. Contains one\n element for each non-spatial dimension. For integer indexing the\n corresponding axes is dropped from the map. Axes not specified in the\n dict are kept unchanged.\n\n Returns\n -------\n geom : `~Geom`\n Sliced geometry.\n \"\"\"\n axes = self.axes.slice_by_idx(slices)\n return self._init_copy(axes=axes)\n\n def rename_axes(self, names, new_names):\n \"\"\"Rename axes contained in the geometry.\n\n Parameters\n ----------\n names : list or str\n Names of the axes.\n new_names : list or str\n New names of the axes. The list must be of same length than `names`.\n\n Returns\n -------\n geom : `~Geom`\n Renamed geometry.\n \"\"\"\n axes = self.axes.rename_axes(names=names, new_names=new_names)\n return self._init_copy(axes=axes)\n\n @property\n def as_energy_true(self):\n \"\"\"If the geom contains an axis named 'energy' rename it to 'energy_true'.\"\"\"\n return self.rename_axes(names=\"energy\", new_names=\"energy_true\")\n\n @property\n def has_energy_axis(self):\n \"\"\"Whether geom has an energy axis (either 'energy' or 'energy_true').\"\"\"\n return (\"energy\" in self.axes.names) ^ (\"energy_true\" in self.axes.names)\n\n @abc.abstractmethod\n def to_image(self):\n \"\"\"Create a 2D image geometry (drop non-spatial dimensions).\n\n Returns\n -------\n geom : `~Geom`\n Image geometry.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def to_cube(self, axes):\n \"\"\"Append non-spatial axes to create a higher-dimensional geometry.\n\n This will result in a new geometry with\n N+M dimensions where N is the number of current dimensions and\n M is the number of axes in the list.\n\n Parameters\n ----------\n axes : list\n Axes that will be appended to this geometry.\n\n Returns\n -------\n geom : `~Geom`\n Map geometry.\n \"\"\"\n pass\n\n def squash(self, axis_name):\n \"\"\"Squash geom axis.\n\n Parameters\n ----------\n axis_name : str\n Axis to squash.\n\n Returns\n -------\n geom : `Geom`\n Geom with squashed axis.\n \"\"\"\n axes = self.axes.squash(axis_name=axis_name)\n return self.to_image().to_cube(axes=axes)\n\n def drop(self, axis_name):\n \"\"\"Drop an axis from the geom.\n\n Parameters\n ----------\n axis_name : str\n Name of the axis to remove.\n\n Returns\n -------\n geom : `Geom`\n New geom with the axis removed.\n \"\"\"\n axes = self.axes.drop(axis_name=axis_name)\n return self.to_image().to_cube(axes=axes)\n\n def pad(self, pad_width, axis_name):\n \"\"\"\n Pad the geometry at the edges.\n\n Parameters\n ----------\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n axis_name : str\n Name of the axis to pad.\n\n Returns\n -------\n geom : `~Geom`\n Padded geometry.\n \"\"\"\n if axis_name is None:\n return self._pad_spatial(pad_width)\n else:\n axes = self.axes.pad(axis_name=axis_name, pad_width=pad_width)\n return self.to_image().to_cube(axes)\n\n @abc.abstractmethod\n def _pad_spatial(self, pad_width):\n pass\n\n @abc.abstractmethod\n def crop(self, crop_width):\n \"\"\"\n Crop the geometry at the edges.\n\n Parameters\n ----------\n crop_width : {sequence, array_like, int}\n Number of values cropped from the edges of each axis.\n\n Returns\n -------\n geom : `~Geom`\n Cropped geometry.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def downsample(self, factor, axis_name):\n \"\"\"Downsample the spatial dimension of the geometry by a given factor.\n\n Parameters\n ----------\n factor : int\n Downsampling factor.\n axis_name : str\n Axis to downsample.\n\n Returns\n -------\n geom : `~Geom`\n Downsampled geometry.\n\n \"\"\"\n pass\n\n @abc.abstractmethod\n def upsample(self, factor, axis_name=None):\n \"\"\"Upsample the spatial dimension of the geometry by a given factor.\n\n Parameters\n ----------\n factor : int\n Upsampling factor.\n axis_name : str\n Axis to upsample.\n\n Returns\n -------\n geom : `~Geom`\n Upsampled geometry.\n\n \"\"\"\n pass\n\n def resample_axis(self, axis):\n \"\"\"Resample geom to a new axis binning.\n\n This method groups the existing bins into a new binning.\n\n Parameters\n ----------\n axis : `MapAxis`\n New map axis.\n\n Returns\n -------\n map : `Geom`\n Geom with resampled axis.\n \"\"\"\n axes = self.axes.resample(axis=axis)\n return self._init_copy(axes=axes)\n\n def replace_axis(self, axis):\n \"\"\"Replace axis with a new one.\n\n Parameters\n ----------\n axis : `MapAxis`\n New map axis.\n\n Returns\n -------\n map : `Geom`\n Geom with replaced axis.\n \"\"\"\n axes = self.axes.replace(axis=axis)\n return self._init_copy(axes=axes)\n\n @abc.abstractmethod\n def solid_angle(self):\n \"\"\"Solid angle as a `~astropy.units.Quantity` object (in ``sr``).\"\"\"\n pass\n\n @property\n def is_image(self):\n \"\"\"Whether the geom is an image without extra dimensions.\"\"\"\n if self.axes is None:\n return True\n return len(self.axes) == 0\n\n @property\n def is_flat(self):\n \"\"\"Whether the geom non-spatial axes have length 1, equivalent to an image.\"\"\"\n if self.is_image:\n return True\n else:\n valid = True\n for axis in self.axes:\n valid = valid and (axis.nbin == 1)\n return valid\n\n def _init_copy(self, **kwargs):\n \"\"\"Init map geom instance by copying missing init arguments from self.\"\"\"\n argnames = inspect.getfullargspec(self.__init__).args\n argnames.remove(\"self\")\n\n for arg in argnames:\n value = getattr(self, \"_\" + arg)\n if arg not in kwargs:\n kwargs[arg] = copy.deepcopy(value)\n\n return self.__class__(**kwargs)\n\n def copy(self, **kwargs):\n \"\"\"Copy and overwrite given attributes.\n\n Parameters\n ----------\n **kwargs : dict\n Keyword arguments to overwrite in the map geometry constructor.\n\n Returns\n -------\n copy : `Geom`\n Copied map geometry.\n \"\"\"\n return self._init_copy(**kwargs)\n\n def energy_mask(self, energy_min=None, energy_max=None, round_to_edge=False):\n \"\"\"Create a mask for a given energy range.\n\n The energy bin must be fully contained to be included in the mask.\n\n Parameters\n ----------\n energy_min, energy_max : `~astropy.units.Quantity`\n Energy range.\n\n Returns\n -------\n mask : `~numpy.ndarray`\n Energy mask.\n \"\"\"\n from . import Map\n\n # get energy axes and values\n energy_axis = self.axes[\"energy\"]\n\n if round_to_edge:\n energy_min, energy_max = energy_axis.round([energy_min, energy_max])\n\n # TODO: make this more general\n shape = (-1, 1) if self.is_hpx else (-1, 1, 1)\n energy_edges = energy_axis.edges.reshape(shape)\n\n # set default values\n energy_min = energy_min if energy_min is not None else energy_edges[0]\n energy_max = energy_max if energy_max is not None else energy_edges[-1]\n\n mask = (energy_edges[:-1] >= energy_min) & (energy_edges[1:] <= energy_max)\n data = np.broadcast_to(mask, shape=self.data_shape)\n return Map.from_geom(geom=self, data=data, dtype=data.dtype)\n", "path": "gammapy/maps/geom.py" } ]
[ { "content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport copy\nimport html\nimport inspect\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom .io import find_bands_hdu, find_hdu\nfrom .utils import INVALID_INDEX\n\n__all__ = [\"Geom\"]\n\nlog = logging.getLogger(__name__)\n\n\ndef get_shape(param):\n if param is None:\n return tuple()\n\n if not isinstance(param, tuple):\n param = [param]\n\n return max([np.array(p, ndmin=1).shape for p in param])\n\n\ndef pix_tuple_to_idx(pix):\n \"\"\"Convert a tuple of pixel coordinate arrays to a tuple of pixel indices.\n\n Pixel coordinates are rounded to the closest integer value.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates with one element for each dimension.\n\n Returns\n -------\n idx : `~numpy.ndarray`\n Array of pixel indices.\n \"\"\"\n idx = []\n for p in pix:\n p = np.array(p, ndmin=1)\n if np.issubdtype(p.dtype, np.integer):\n idx += [p]\n else:\n with np.errstate(invalid=\"ignore\"):\n p_idx = np.rint(p).astype(int)\n p_idx[~np.isfinite(p)] = INVALID_INDEX.int\n idx += [p_idx]\n\n return tuple(idx)\n\n\nclass Geom(abc.ABC):\n \"\"\"Map geometry base class.\n\n See also: `~gammapy.maps.WcsGeom` and `~gammapy.maps.HpxGeom`.\n \"\"\"\n\n # workaround for the lru_cache pickle issue\n # see e.g. https://github.com/cloudpipe/cloudpickle/issues/178\n def __getstate__(self):\n state = self.__dict__.copy()\n for key, value in state.items():\n func = getattr(value, \"__wrapped__\", None)\n if func is not None:\n state[key] = func\n return state\n\n def _repr_html_(self):\n try:\n return self.to_html()\n except AttributeError:\n return f\"<pre>{html.escape(str(self))}</pre>\"\n\n @property\n @abc.abstractmethod\n def data_shape(self):\n \"\"\"Shape of the Numpy data array matching this geometry.\"\"\"\n pass\n\n def data_nbytes(self, dtype=\"float32\"):\n \"\"\"Estimate memory usage in megabytes of the Numpy data array\n matching this geometry depending on the given type.\n\n Parameters\n ----------\n dtype : str, optional\n The desired data-type for the array. Default is \"float32\".\n\n Returns\n -------\n memory : `~astropy.units.Quantity`\n Estimated memory usage in megabytes (MB).\n \"\"\"\n return (np.empty(self.data_shape, dtype).nbytes * u.byte).to(\"MB\")\n\n @property\n @abc.abstractmethod\n def is_allsky(self):\n pass\n\n @property\n @abc.abstractmethod\n def center_coord(self):\n pass\n\n @property\n @abc.abstractmethod\n def center_pix(self):\n pass\n\n @property\n @abc.abstractmethod\n def center_skydir(self):\n pass\n\n @classmethod\n def from_hdulist(cls, hdulist, hdu=None, hdu_bands=None):\n \"\"\"Load a geometry object from a FITS HDUList.\n\n Parameters\n ----------\n hdulist : `~astropy.io.fits.HDUList`\n HDU list containing HDUs for map data and bands.\n hdu : str or int, optional\n Name or index of the HDU with the map data. Default is None.\n hdu_bands : str, optional\n Name or index of the HDU with the BANDS table. If not\n defined this will be inferred from the FITS header of the\n map HDU. Default is None.\n\n Returns\n -------\n geom : `~Geom`\n Geometry object.\n \"\"\"\n if hdu is None:\n hdu = find_hdu(hdulist)\n else:\n hdu = hdulist[hdu]\n\n if hdu_bands is None:\n hdu_bands = find_bands_hdu(hdulist, hdu)\n\n if hdu_bands is not None:\n hdu_bands = hdulist[hdu_bands]\n\n return cls.from_header(hdu.header, hdu_bands)\n\n def to_bands_hdu(self, hdu_bands=None, format=\"gadf\"):\n table_hdu = self.axes.to_table_hdu(format=format, hdu_bands=hdu_bands)\n cols = table_hdu.columns.columns\n cols.extend(self._make_bands_cols())\n return fits.BinTableHDU.from_columns(\n cols, header=table_hdu.header, name=table_hdu.name\n )\n\n @abc.abstractmethod\n def _make_bands_cols(self):\n pass\n\n @abc.abstractmethod\n def get_idx(self, idx=None, local=False, flat=False):\n \"\"\"Get tuple of pixel indices for this geometry.\n\n Returns all pixels in the geometry by default. Pixel indices\n for a single image plane can be accessed by setting ``idx``\n to the index tuple of a plane.\n\n Parameters\n ----------\n idx : tuple, optional\n A tuple of indices with one index for each non-spatial\n dimension. If defined only pixels for the image plane with\n this index will be returned. If none then all pixels\n will be returned. Default is None.\n local : bool, optional\n Flag to return local or global pixel indices. Local\n indices run from 0 to the number of pixels in a given\n image plane. Default is False.\n flat : bool, optional\n Return a flattened array containing only indices for\n pixels contained in the geometry. Default is False.\n\n Returns\n -------\n idx : tuple\n Tuple of pixel index vectors with one vector for each\n dimension.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_coord(self, idx=None, flat=False):\n \"\"\"Get the coordinate array for this geometry.\n\n Returns a coordinate array with the same shape as the data\n array. Pixels outside the geometry are set to NaN.\n Coordinates for a single image plane can be accessed by\n setting ``idx`` to the index tuple of a plane.\n\n Parameters\n ----------\n idx : tuple, optional\n A tuple of indices with one index for each non-spatial\n dimension. If defined only coordinates for the image\n plane with this index will be returned. If none then\n coordinates for all pixels will be returned. Default is None.\n flat : bool, optional\n Return a flattened array containing only coordinates for\n pixels contained in the geometry. Default is False.\n\n Returns\n -------\n coords : tuple\n Tuple of coordinate vectors with one vector for each\n dimension.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def coord_to_pix(self, coords):\n \"\"\"Convert map coordinates to pixel coordinates.\n\n Parameters\n ----------\n coords : tuple\n Coordinate values in each dimension of the map. This can\n either be a tuple of numpy arrays or a MapCoord object.\n If passed as a tuple then the ordering should be\n (longitude, latitude, c_0, ..., c_N) where c_i is the\n coordinate vector for axis i.\n\n Returns\n -------\n pix : tuple\n Tuple of pixel coordinates in image and band dimensions.\n \"\"\"\n pass\n\n def coord_to_idx(self, coords, clip=False):\n \"\"\"Convert map coordinates to pixel indices.\n\n Parameters\n ----------\n coords : tuple or `~MapCoord`\n Coordinate values in each dimension of the map. This can\n either be a tuple of numpy arrays or a MapCoord object.\n If passed as a tuple then the ordering should be\n (longitude, latitude, c_0, ..., c_N) where c_i is the\n coordinate vector for axis i.\n clip : bool\n Choose whether to clip indices to the valid range of the\n geometry. If False then indices for coordinates outside\n the geometry range will be set -1. Default is False.\n\n Returns\n -------\n pix : tuple\n Tuple of pixel indices in image and band dimensions.\n Elements set to -1 correspond to coordinates outside the\n map.\n \"\"\"\n pix = self.coord_to_pix(coords)\n return self.pix_to_idx(pix, clip=clip)\n\n @abc.abstractmethod\n def pix_to_coord(self, pix):\n \"\"\"Convert pixel coordinates to map coordinates.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates.\n\n Returns\n -------\n coords : tuple\n Tuple of map coordinates.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def pix_to_idx(self, pix, clip=False):\n \"\"\"Convert pixel coordinates to pixel indices.\n\n Returns -1 for pixel coordinates that lie outside the map.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates.\n clip : bool\n Choose whether to clip indices to the valid range of the\n geometry. If False then indices for coordinates outside\n the geometry range will be set -1. Default is False.\n\n Returns\n -------\n idx : tuple\n Tuple of pixel indices.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def contains(self, coords):\n \"\"\"Check if a given map coordinate is contained in the geometry.\n\n Parameters\n ----------\n coords : tuple or `~gammapy.maps.MapCoord`\n Tuple of map coordinates.\n\n Returns\n -------\n containment : `~numpy.ndarray`\n Bool array.\n \"\"\"\n pass\n\n def contains_pix(self, pix):\n \"\"\"Check if a given pixel coordinate is contained in the geometry.\n\n Parameters\n ----------\n pix : tuple\n Tuple of pixel coordinates.\n\n Returns\n -------\n containment : `~numpy.ndarray`\n Bool array.\n \"\"\"\n idx = self.pix_to_idx(pix)\n return np.all(np.stack([t != INVALID_INDEX.int for t in idx]), axis=0)\n\n def slice_by_idx(self, slices):\n \"\"\"Create a new geometry by slicing the non-spatial axes.\n\n Parameters\n ----------\n slices : dict\n Dictionary of axes names and integers or `slice` object pairs. Contains one\n element for each non-spatial dimension. For integer indexing the\n corresponding axes is dropped from the map. Axes not specified in the\n dict are kept unchanged.\n\n Returns\n -------\n geom : `~Geom`\n Sliced geometry.\n \"\"\"\n axes = self.axes.slice_by_idx(slices)\n return self._init_copy(axes=axes)\n\n def rename_axes(self, names, new_names):\n \"\"\"Rename axes contained in the geometry.\n\n Parameters\n ----------\n names : list or str\n Names of the axes.\n new_names : list or str\n New names of the axes. The list must be of same length than `names`.\n\n Returns\n -------\n geom : `~Geom`\n Renamed geometry.\n \"\"\"\n axes = self.axes.rename_axes(names=names, new_names=new_names)\n return self._init_copy(axes=axes)\n\n @property\n def as_energy_true(self):\n \"\"\"If the geom contains an axis named 'energy' rename it to 'energy_true'.\"\"\"\n return self.rename_axes(names=\"energy\", new_names=\"energy_true\")\n\n @property\n def has_energy_axis(self):\n \"\"\"Whether geom has an energy axis (either 'energy' or 'energy_true').\"\"\"\n return (\"energy\" in self.axes.names) ^ (\"energy_true\" in self.axes.names)\n\n @abc.abstractmethod\n def to_image(self):\n \"\"\"Create a 2D image geometry (drop non-spatial dimensions).\n\n Returns\n -------\n geom : `~Geom`\n Image geometry.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def to_cube(self, axes):\n \"\"\"Append non-spatial axes to create a higher-dimensional geometry.\n\n This will result in a new geometry with\n N+M dimensions where N is the number of current dimensions and\n M is the number of axes in the list.\n\n Parameters\n ----------\n axes : list\n Axes that will be appended to this geometry.\n\n Returns\n -------\n geom : `~Geom`\n Map geometry.\n \"\"\"\n pass\n\n def squash(self, axis_name):\n \"\"\"Squash geom axis.\n\n Parameters\n ----------\n axis_name : str\n Axis to squash.\n\n Returns\n -------\n geom : `Geom`\n Geom with squashed axis.\n \"\"\"\n axes = self.axes.squash(axis_name=axis_name)\n return self.to_image().to_cube(axes=axes)\n\n def drop(self, axis_name):\n \"\"\"Drop an axis from the geom.\n\n Parameters\n ----------\n axis_name : str\n Name of the axis to remove.\n\n Returns\n -------\n geom : `Geom`\n New geom with the axis removed.\n \"\"\"\n axes = self.axes.drop(axis_name=axis_name)\n return self.to_image().to_cube(axes=axes)\n\n def pad(self, pad_width, axis_name):\n \"\"\"\n Pad the geometry at the edges.\n\n Parameters\n ----------\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n axis_name : str\n Name of the axis to pad.\n\n Returns\n -------\n geom : `~Geom`\n Padded geometry.\n \"\"\"\n if axis_name is None:\n return self._pad_spatial(pad_width)\n else:\n axes = self.axes.pad(axis_name=axis_name, pad_width=pad_width)\n return self.to_image().to_cube(axes)\n\n @abc.abstractmethod\n def _pad_spatial(self, pad_width):\n pass\n\n @abc.abstractmethod\n def crop(self, crop_width):\n \"\"\"\n Crop the geometry at the edges.\n\n Parameters\n ----------\n crop_width : {sequence, array_like, int}\n Number of values cropped from the edges of each axis.\n\n Returns\n -------\n geom : `~Geom`\n Cropped geometry.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def downsample(self, factor, axis_name):\n \"\"\"Downsample the spatial dimension of the geometry by a given factor.\n\n Parameters\n ----------\n factor : int\n Downsampling factor.\n axis_name : str\n Axis to downsample.\n\n Returns\n -------\n geom : `~Geom`\n Downsampled geometry.\n\n \"\"\"\n pass\n\n @abc.abstractmethod\n def upsample(self, factor, axis_name=None):\n \"\"\"Upsample the spatial dimension of the geometry by a given factor.\n\n Parameters\n ----------\n factor : int\n Upsampling factor.\n axis_name : str\n Axis to upsample.\n\n Returns\n -------\n geom : `~Geom`\n Upsampled geometry.\n\n \"\"\"\n pass\n\n def resample_axis(self, axis):\n \"\"\"Resample geom to a new axis binning.\n\n This method groups the existing bins into a new binning.\n\n Parameters\n ----------\n axis : `MapAxis`\n New map axis.\n\n Returns\n -------\n map : `Geom`\n Geom with resampled axis.\n \"\"\"\n axes = self.axes.resample(axis=axis)\n return self._init_copy(axes=axes)\n\n def replace_axis(self, axis):\n \"\"\"Replace axis with a new one.\n\n Parameters\n ----------\n axis : `MapAxis`\n New map axis.\n\n Returns\n -------\n map : `Geom`\n Geom with replaced axis.\n \"\"\"\n axes = self.axes.replace(axis=axis)\n return self._init_copy(axes=axes)\n\n @abc.abstractmethod\n def solid_angle(self):\n \"\"\"Solid angle as a `~astropy.units.Quantity` object (in ``sr``).\"\"\"\n pass\n\n @property\n def is_image(self):\n \"\"\"Whether the geom is an image without extra dimensions.\"\"\"\n if self.axes is None:\n return True\n return len(self.axes) == 0\n\n @property\n def is_flat(self):\n \"\"\"Whether the geom non-spatial axes have length 1, equivalent to an image.\"\"\"\n if self.is_image:\n return True\n else:\n valid = True\n for axis in self.axes:\n valid = valid and (axis.nbin == 1)\n return valid\n\n def _init_copy(self, **kwargs):\n \"\"\"Init map geom instance by copying missing init arguments from self.\"\"\"\n argnames = inspect.getfullargspec(self.__init__).args\n argnames.remove(\"self\")\n\n for arg in argnames:\n value = getattr(self, \"_\" + arg)\n if arg not in kwargs:\n kwargs[arg] = copy.deepcopy(value)\n\n return self.__class__(**kwargs)\n\n def copy(self, **kwargs):\n \"\"\"Copy and overwrite given attributes.\n\n Parameters\n ----------\n **kwargs : dict\n Keyword arguments to overwrite in the map geometry constructor.\n\n Returns\n -------\n copy : `Geom`\n Copied map geometry.\n \"\"\"\n return self._init_copy(**kwargs)\n\n def energy_mask(self, energy_min=None, energy_max=None, round_to_edge=False):\n \"\"\"Create a mask for a given energy range.\n\n The energy bin must be fully contained to be included in the mask.\n\n Parameters\n ----------\n energy_min, energy_max : `~astropy.units.Quantity`\n Energy range.\n\n Returns\n -------\n mask : `~gammapy.maps.Map`\n Map containing the energy mask. The geometry of the map\n is the same as the geometry of the instance which called this method.\n \"\"\"\n from . import Map\n\n # get energy axes and values\n energy_axis = self.axes[\"energy\"]\n\n if round_to_edge:\n energy_min, energy_max = energy_axis.round([energy_min, energy_max])\n\n # TODO: make this more general\n shape = (-1, 1) if self.is_hpx else (-1, 1, 1)\n energy_edges = energy_axis.edges.reshape(shape)\n\n # set default values\n energy_min = energy_min if energy_min is not None else energy_edges[0]\n energy_max = energy_max if energy_max is not None else energy_edges[-1]\n\n mask = (energy_edges[:-1] >= energy_min) & (energy_edges[1:] <= energy_max)\n data = np.broadcast_to(mask, shape=self.data_shape)\n return Map.from_geom(geom=self, data=data, dtype=data.dtype)\n", "path": "gammapy/maps/geom.py" } ]
diff --git a/gammapy/maps/geom.py b/gammapy/maps/geom.py index 5afe261137..5d8cefde7c 100644 --- a/gammapy/maps/geom.py +++ b/gammapy/maps/geom.py @@ -625,8 +625,9 @@ def energy_mask(self, energy_min=None, energy_max=None, round_to_edge=False): Returns ------- - mask : `~numpy.ndarray` - Energy mask. + mask : `~gammapy.maps.Map` + Map containing the energy mask. The geometry of the map + is the same as the geometry of the instance which called this method. """ from . import Map
conan-io__conan-center-index-7286
[package] aws-c-event-stream/0.2.7: conflicting openssl versions ``` ERROR: Conflict in s2n/1.0.11: 's2n/1.0.11' requires 'openssl/1.1.1k' while 'aws-c-cal/0.5.11' requires 'openssl/1.1.1l'. To fix this conflict you need to override the package 'openssl' in your root package. ``` seems like it was introduced by #7260 ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **aws-c-event-stream/0.2.7** * Conan version: **conan 1.39.0** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` [settings] arch=x86_64 arch_build=x86_64 build_type=Release compiler=gcc compiler.libcxx=libstdc++ compiler.version=7 os=Linux os_build=Linux [options] [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) conan install --build missing aws-c-event-stream/0.2.7@
[ { "content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\nclass S2n(ConanFile):\n name = \"s2n\"\n description = \"An implementation of the TLS/SSL protocols\"\n topics = (\"conan\", \"aws\", \"amazon\", \"cloud\", )\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/aws/s2n-tls\"\n license = \"Apache-2.0\",\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def requirements(self):\n self.requires(\"openssl/1.1.1k\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Not supported (yet)\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"UNSAFE_TREAT_WARNINGS_AS_ERRORS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"s2n\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.names[\"cmake_find_package\"] = \"AWS\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"AWS\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].libs = [\"s2n\"]\n self.cpp_info.components[\"s2n-lib\"].requires = [\"openssl::crypto\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.components[\"s2n-lib\"].system_libs = [\"m\", \"pthread\"]\n", "path": "recipes/s2n/all/conanfile.py" } ]
[ { "content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\nclass S2n(ConanFile):\n name = \"s2n\"\n description = \"An implementation of the TLS/SSL protocols\"\n topics = (\"conan\", \"aws\", \"amazon\", \"cloud\", )\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/aws/s2n-tls\"\n license = \"Apache-2.0\",\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def requirements(self):\n self.requires(\"openssl/1.1.1l\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Not supported (yet)\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"UNSAFE_TREAT_WARNINGS_AS_ERRORS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"s2n\"))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.names[\"cmake_find_package\"] = \"AWS\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"AWS\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].names[\"cmake_find_package_multi\"] = \"s2n\"\n self.cpp_info.components[\"s2n-lib\"].libs = [\"s2n\"]\n self.cpp_info.components[\"s2n-lib\"].requires = [\"openssl::crypto\"]\n if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.components[\"s2n-lib\"].system_libs = [\"m\", \"pthread\"]\n", "path": "recipes/s2n/all/conanfile.py" } ]
diff --git a/recipes/s2n/all/conanfile.py b/recipes/s2n/all/conanfile.py index 12cf8ff20e2f6..dbe79bb113a09 100644 --- a/recipes/s2n/all/conanfile.py +++ b/recipes/s2n/all/conanfile.py @@ -36,7 +36,7 @@ def configure(self): del self.settings.compiler.libcxx def requirements(self): - self.requires("openssl/1.1.1k") + self.requires("openssl/1.1.1l") def source(self): tools.get(**self.conan_data["sources"][self.version],
ansible__ansible-modules-core-4989
fstab parameter of mount module won't work ##### Issue Type: - Bug Report ##### Ansible Version: ansible 2.0.0.2 config file = /etc/ansible/ansible.cfg configured module search path = /usr/share/ansible ##### Ansible Configuration: ##### Environment: Ubuntu 14.04 64Bit ##### Summary: set fstab=**/tmp/fstab** in mount module result in 'can't find mount point in **/etc/fstab**' ##### Steps To Reproduce: ``` ./test-module -m ../lib/ansible/modules/core/system/mount.py -a "src=/dev/sda9 name=/tmp/mnt fstype=ext3 state=mounted **fstab=/tmp/fstab**" ``` ##### Expected Results: ``` changed: ok ``` and a line in /tmp/fstab and 'mount -a' should show the mounted device ##### Actual Results: ``` *********************************** PARSED OUTPUT { "failed": true, "invocation": { "module_args": { "dump": null, "fstab": **"/tmp/fstab"**, "fstype": "ext3", "name": "/tmp/mnt", "opts": null, "passno": null, "src": "/dev/sda9", "state": "mounted" } }, "msg": "Error mounting /tmp/mnt: mount: /tmp/mnt konnte nicht in **/etc/fstab oder /etc/mtab** gefunden werden\n" } ```
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Red Hat, inc\n# Written by Seth Vidal\n# based on the mount modules from salt and puppet\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.basic import get_platform\nfrom ansible.module_utils.ismount import ismount\nfrom ansible.module_utils.pycompat24 import get_exception\nfrom ansible.module_utils.six import iteritems\nimport os\nimport re\n\n\nDOCUMENTATION = '''\n---\nmodule: mount\nshort_description: Control active and configured mount points\ndescription:\n - This module controls active and configured mount points in C(/etc/fstab).\nauthor:\n - Ansible Core Team\n - Seth Vidal\nversion_added: \"0.6\"\noptions:\n name:\n description:\n - Path to the mount point (e.g. C(/mnt/files))\n required: true\n src:\n description:\n - Device to be mounted on I(name). Required when I(state) set to\n C(present) or C(mounted).\n required: false\n default: null\n fstype:\n description:\n - Filesystem type. Required when I(state) is C(present) or C(mounted).\n required: false\n default: null\n opts:\n description:\n - Mount options (see fstab(5), or vfstab(4) on Solaris).\n required: false\n default: null\n dump:\n description:\n - Dump (see fstab(5)). Note that if set to C(null) and I(state) set to\n C(present), it will cease to work and duplicate entries will be made\n with subsequent runs.\n - Has no effect on Solaris systems.\n required: false\n default: 0\n passno:\n description:\n - Passno (see fstab(5)). Note that if set to C(null) and I(state) set to\n C(present), it will cease to work and duplicate entries will be made\n with subsequent runs.\n - Deprecated on Solaris systems.\n required: false\n default: 0\n state:\n description:\n - If C(mounted) or C(unmounted), the device will be actively mounted or\n unmounted as needed and appropriately configured in I(fstab).\n - C(absent) and C(present) only deal with I(fstab) but will not affect\n current mounting.\n - If specifying C(mounted) and the mount point is not present, the mount\n point will be created. Similarly.\n - Specifying C(absent) will remove the mount point directory.\n required: true\n choices: [\"present\", \"absent\", \"mounted\", \"unmounted\"]\n fstab:\n description:\n - File to use instead of C(/etc/fstab). You shouldn't use that option\n unless you really know what you are doing. This might be useful if\n you need to configure mountpoints in a chroot environment.\n required: false\n default: /etc/fstab (/etc/vfstab on Solaris)\n boot:\n version_added: 2.2\n description:\n - Determines if the filesystem should be mounted on boot.\n - Only applies to Solaris systems.\n required: false\n default: yes\n choices: [\"yes\", \"no\"]\n'''\n\nEXAMPLES = '''\n- name: Mount DVD read-only\n mount:\n name: /mnt/dvd\n src: /dev/sr0\n fstype: iso9660\n opts: ro\n state: present\n\n- name: Mount up device by label\n mount:\n name: /srv/disk\n src: LABEL=SOME_LABEL\n fstype: ext4\n state: present\n\n- name: Mount up device by UUID\n mount:\n name: /home\n src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077\n fstype: xfs\n opts: noatime\n state: present\n'''\n\n\ndef write_fstab(lines, dest):\n fs_w = open(dest, 'w')\n\n for l in lines:\n fs_w.write(l)\n\n fs_w.flush()\n fs_w.close()\n\n\ndef _escape_fstab(v):\n \"\"\"Escape invalid characters in fstab fields.\n\n space (040)\n ampersand (046)\n backslash (134)\n \"\"\"\n\n if isinstance(v, int):\n return v\n else:\n return(\n v.\n replace('\\\\', '\\\\134').\n replace(' ', '\\\\040').\n replace('&', '\\\\046'))\n\n\ndef set_mount(module, **kwargs):\n \"\"\"Set/change a mount point location in fstab.\"\"\"\n\n # solaris kwargs:\n # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab\n # linux:\n # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab\n if get_platform() == 'SunOS':\n args = dict(\n opts='-',\n passno='-',\n fstab='/etc/vfstab',\n boot='yes'\n )\n new_line = (\n '%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\\n')\n else:\n args = dict(\n opts='defaults',\n dump='0',\n passno='0',\n fstab='/etc/fstab'\n )\n new_line = (\n '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\\n')\n args.update(kwargs)\n\n to_write = []\n exists = False\n changed = False\n escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])\n for line in open(args['fstab'], 'r').readlines():\n if not line.strip():\n to_write.append(line)\n\n continue\n\n if line.strip().startswith('#'):\n to_write.append(line)\n\n continue\n\n if len(line.split()) != 6 and get_platform() != 'SunOS':\n # Not sure what this is or why it is here but it is not our fault\n # so leave it be\n to_write.append(line)\n\n continue\n\n ld = {}\n if get_platform() == 'SunOS':\n (\n ld['src'],\n dash,\n ld['name'],\n ld['fstype'],\n ld['passno'],\n ld['boot'],\n ld['opts']\n ) = line.split()\n else:\n (\n ld['src'],\n ld['name'],\n ld['fstype'],\n ld['opts'],\n ld['dump'],\n ld['passno']\n ) = line.split()\n\n if ld['name'] != escaped_args['name']:\n to_write.append(line)\n\n continue\n\n # It exists - now see if what we have is different\n exists = True\n\n if get_platform() == 'SunOS':\n for t in ('src', 'fstype', 'passno', 'boot', 'opts'):\n if ld[t] != escaped_args[t]:\n changed = True\n ld[t] = escaped_args[t]\n else:\n for t in ('src', 'fstype', 'opts', 'dump', 'passno'):\n if ld[t] != escaped_args[t]:\n changed = True\n ld[t] = escaped_args[t]\n\n if changed:\n to_write.append(new_line % ld)\n else:\n to_write.append(line)\n\n if not exists:\n to_write.append(new_line % escaped_args)\n changed = True\n\n if changed and not module.check_mode:\n write_fstab(to_write, args['fstab'])\n\n return (args['name'], changed)\n\n\ndef unset_mount(module, **kwargs):\n \"\"\"Remove a mount point from fstab.\"\"\"\n\n # solaris kwargs:\n # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab\n # linux kwargs:\n # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab\n if get_platform() == 'SunOS':\n args = dict(\n opts='-',\n passno='-',\n fstab='/etc/vfstab',\n boot='yes'\n )\n else:\n args = dict(\n opts='default',\n dump='0',\n passno='0',\n fstab='/etc/fstab'\n )\n args.update(kwargs)\n\n to_write = []\n changed = False\n escaped_name = _escape_fstab(args['name'])\n for line in open(args['fstab'], 'r').readlines():\n if not line.strip():\n to_write.append(line)\n\n continue\n\n if line.strip().startswith('#'):\n to_write.append(line)\n\n continue\n\n if len(line.split()) != 6 and get_platform() != 'SunOS':\n # Not sure what this is or why it is here but it is not our fault\n # so leave it be\n to_write.append(line)\n\n continue\n\n ld = {}\n\n if get_platform() == 'SunOS':\n (\n ld['src'],\n dash,\n ld['name'],\n ld['fstype'],\n ld['passno'],\n ld['boot'],\n ld['opts']\n ) = line.split()\n else:\n (\n ld['src'],\n ld['name'],\n ld['fstype'],\n ld['opts'],\n ld['dump'],\n ld['passno']\n ) = line.split()\n\n if ld['name'] != escaped_name:\n to_write.append(line)\n\n continue\n\n # If we got here we found a match - continue and mark changed\n changed = True\n\n if changed and not module.check_mode:\n write_fstab(to_write, args['fstab'])\n\n return (args['name'], changed)\n\n\ndef mount(module, **kwargs):\n \"\"\"Mount up a path or remount if needed.\"\"\"\n\n # solaris kwargs:\n # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab\n # linux kwargs:\n # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab\n if get_platform() == 'SunOS':\n args = dict(\n opts='-',\n passno='-',\n fstab='/etc/vfstab',\n boot='yes'\n )\n else:\n args = dict(\n opts='default',\n dump='0',\n passno='0',\n fstab='/etc/fstab'\n )\n args.update(kwargs)\n\n mount_bin = module.get_bin_path('mount', required=True)\n name = kwargs['name']\n cmd = [mount_bin]\n\n if ismount(name):\n cmd += ['-o', 'remount']\n\n if get_platform().lower() == 'freebsd':\n cmd += ['-F', args['fstab']]\n elif get_platform().lower() == 'linux':\n cmd += ['-T', args['fstab']]\n\n cmd += [name]\n\n rc, out, err = module.run_command(cmd)\n\n if rc == 0:\n return 0, ''\n else:\n return rc, out+err\n\n\ndef umount(module, **kwargs):\n \"\"\"Unmount a path.\"\"\"\n\n umount_bin = module.get_bin_path('umount', required=True)\n name = kwargs['name']\n cmd = [umount_bin, name]\n\n rc, out, err = module.run_command(cmd)\n\n if rc == 0:\n return 0, ''\n else:\n return rc, out+err\n\n\n# Note if we wanted to put this into module_utils we'd have to get permission\n# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923\n# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439\n# and @abadger to relicense from GPLv3+\ndef is_bind_mounted(module, dest, src=None, fstype=None):\n \"\"\"Return whether the dest is bind mounted\n\n :arg module: The AnsibleModule (used for helper functions)\n :arg dest: The directory to be mounted under. This is the primary means\n of identifying whether the destination is mounted.\n :kwarg src: The source directory. If specified, this is used to help\n ensure that we are detecting that the correct source is mounted there.\n :kwarg fstype: The filesystem type. If specified this is also used to\n help ensure that we are detecting the right mount.\n :returns: True if the dest is mounted with src otherwise False.\n \"\"\"\n\n is_mounted = False\n bin_path = module.get_bin_path('mount', required=True)\n cmd = '%s -l' % bin_path\n\n if get_platform().lower() == 'linux':\n bin_path = module.get_bin_path('findmnt', required=True)\n cmd = '%s -nr %s' % (bin_path, dest)\n\n rc, out, err = module.run_command(cmd)\n mounts = []\n\n if len(out):\n mounts = out.strip().split('\\n')\n\n mount_pattern = re.compile('\\[(.*)\\]')\n\n for mnt in mounts:\n arguments = mnt.split()\n\n if get_platform().lower() == 'linux':\n result = mount_pattern.search(arguments[1])\n\n if len(result.groups()) == 1:\n if arguments[0] == dest:\n is_mounted = True\n elif (\n (arguments[0] == src or src is None) and\n arguments[2] == dest and\n (arguments[4] == fstype or fstype is None)):\n is_mounted = True\n\n if is_mounted:\n break\n\n return is_mounted\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n boot=dict(default='yes', choices=['yes', 'no']),\n dump=dict(),\n fstab=dict(default='/etc/fstab'),\n fstype=dict(),\n name=dict(required=True, type='path'),\n opts=dict(),\n passno=dict(type='str'),\n src=dict(type='path'),\n state=dict(\n required=True,\n choices=['present', 'absent', 'mounted', 'unmounted']),\n ),\n supports_check_mode=True,\n required_if=(\n ['state', 'mounted', ['src', 'fstype']],\n ['state', 'present', ['src', 'fstype']]\n )\n )\n\n changed = False\n args = {\n 'name': module.params['name']\n }\n\n if module.params['src'] is not None:\n args['src'] = module.params['src']\n if module.params['fstype'] is not None:\n args['fstype'] = module.params['fstype']\n if module.params['passno'] is not None:\n args['passno'] = module.params['passno']\n if module.params['opts'] is not None:\n args['opts'] = module.params['opts']\n if module.params['dump'] is not None:\n args['dump'] = module.params['dump']\n if get_platform() == 'SunOS' and module.params['fstab'] == '/etc/fstab':\n args['fstab'] = '/etc/vfstab'\n elif module.params['fstab'] is not None:\n args['fstab'] = module.params['fstab']\n\n # If fstab file does not exist, we first need to create it. This mainly\n # happens when fstab option is passed to the module.\n if not os.path.exists(args['fstab']):\n if not os.path.exists(os.path.dirname(args['fstab'])):\n os.makedirs(os.path.dirname(args['fstab']))\n\n open(args['fstab'], 'a').close()\n\n # absent:\n # Remove from fstab and unmounted.\n # unmounted:\n # Do not change fstab state, but unmount.\n # present:\n # Add to fstab, do not change mount state.\n # mounted:\n # Add to fstab if not there and make sure it is mounted. If it has\n # changed in fstab then remount it.\n\n state = module.params['state']\n name = module.params['name']\n\n if state == 'absent':\n name, changed = unset_mount(module, **args)\n\n if changed and not module.check_mode:\n if ismount(name) or is_bind_mounted(module, name):\n res, msg = umount(module, **args)\n\n if res:\n module.fail_json(\n msg=\"Error unmounting %s: %s\" % (name, msg))\n\n if os.path.exists(name):\n try:\n os.rmdir(name)\n except (OSError, IOError):\n e = get_exception()\n module.fail_json(msg=\"Error rmdir %s: %s\" % (name, str(e)))\n elif state == 'unmounted':\n if ismount(name) or is_bind_mounted(module, name):\n if not module.check_mode:\n res, msg = umount(module, **args)\n\n if res:\n module.fail_json(\n msg=\"Error unmounting %s: %s\" % (name, msg))\n\n changed = True\n elif state == 'mounted':\n if not os.path.exists(name) and not module.check_mode:\n try:\n os.makedirs(name)\n except (OSError, IOError):\n e = get_exception()\n module.fail_json(\n msg=\"Error making dir %s: %s\" % (name, str(e)))\n\n name, changed = set_mount(module, **args)\n res = 0\n\n if ismount(name):\n if not module.check_mode:\n res, msg = mount(module, **args)\n changed = True\n elif 'bind' in args.get('opts', []):\n changed = True\n\n if is_bind_mounted(module, name, args['src'], args['fstype']):\n changed = False\n\n if changed and not module.check_mode:\n res, msg = mount(module, **args)\n else:\n changed = True\n\n if not module.check_mode:\n res, msg = mount(module, **args)\n\n if res:\n module.fail_json(msg=\"Error mounting %s: %s\" % (name, msg))\n elif state == 'present':\n name, changed = set_mount(module, **args)\n else:\n module.fail_json(msg='Unexpected position reached')\n\n module.exit_json(changed=changed, **args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "system/mount.py" } ]
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Red Hat, inc\n# Written by Seth Vidal\n# based on the mount modules from salt and puppet\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.basic import get_platform\nfrom ansible.module_utils.ismount import ismount\nfrom ansible.module_utils.pycompat24 import get_exception\nfrom ansible.module_utils.six import iteritems\nimport os\nimport re\n\n\nDOCUMENTATION = '''\n---\nmodule: mount\nshort_description: Control active and configured mount points\ndescription:\n - This module controls active and configured mount points in C(/etc/fstab).\nauthor:\n - Ansible Core Team\n - Seth Vidal\nversion_added: \"0.6\"\noptions:\n name:\n description:\n - Path to the mount point (e.g. C(/mnt/files))\n required: true\n src:\n description:\n - Device to be mounted on I(name). Required when I(state) set to\n C(present) or C(mounted).\n required: false\n default: null\n fstype:\n description:\n - Filesystem type. Required when I(state) is C(present) or C(mounted).\n required: false\n default: null\n opts:\n description:\n - Mount options (see fstab(5), or vfstab(4) on Solaris).\n required: false\n default: null\n dump:\n description:\n - Dump (see fstab(5)). Note that if set to C(null) and I(state) set to\n C(present), it will cease to work and duplicate entries will be made\n with subsequent runs.\n - Has no effect on Solaris systems.\n required: false\n default: 0\n passno:\n description:\n - Passno (see fstab(5)). Note that if set to C(null) and I(state) set to\n C(present), it will cease to work and duplicate entries will be made\n with subsequent runs.\n - Deprecated on Solaris systems.\n required: false\n default: 0\n state:\n description:\n - If C(mounted) or C(unmounted), the device will be actively mounted or\n unmounted as needed and appropriately configured in I(fstab).\n - C(absent) and C(present) only deal with I(fstab) but will not affect\n current mounting.\n - If specifying C(mounted) and the mount point is not present, the mount\n point will be created. Similarly.\n - Specifying C(absent) will remove the mount point directory.\n required: true\n choices: [\"present\", \"absent\", \"mounted\", \"unmounted\"]\n fstab:\n description:\n - File to use instead of C(/etc/fstab). You shouldn't use that option\n unless you really know what you are doing. This might be useful if\n you need to configure mountpoints in a chroot environment.\n required: false\n default: /etc/fstab (/etc/vfstab on Solaris)\n boot:\n version_added: 2.2\n description:\n - Determines if the filesystem should be mounted on boot.\n - Only applies to Solaris systems.\n required: false\n default: yes\n choices: [\"yes\", \"no\"]\n'''\n\nEXAMPLES = '''\n- name: Mount DVD read-only\n mount:\n name: /mnt/dvd\n src: /dev/sr0\n fstype: iso9660\n opts: ro\n state: present\n\n- name: Mount up device by label\n mount:\n name: /srv/disk\n src: LABEL=SOME_LABEL\n fstype: ext4\n state: present\n\n- name: Mount up device by UUID\n mount:\n name: /home\n src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077\n fstype: xfs\n opts: noatime\n state: present\n'''\n\n\ndef write_fstab(lines, dest):\n fs_w = open(dest, 'w')\n\n for l in lines:\n fs_w.write(l)\n\n fs_w.flush()\n fs_w.close()\n\n\ndef _escape_fstab(v):\n \"\"\"Escape invalid characters in fstab fields.\n\n space (040)\n ampersand (046)\n backslash (134)\n \"\"\"\n\n if isinstance(v, int):\n return v\n else:\n return(\n v.\n replace('\\\\', '\\\\134').\n replace(' ', '\\\\040').\n replace('&', '\\\\046'))\n\n\ndef set_mount(module, **kwargs):\n \"\"\"Set/change a mount point location in fstab.\"\"\"\n\n # solaris kwargs:\n # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab\n # linux:\n # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab\n if get_platform() == 'SunOS':\n args = dict(\n opts='-',\n passno='-',\n fstab='/etc/vfstab',\n boot='yes'\n )\n new_line = (\n '%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\\n')\n else:\n args = dict(\n opts='defaults',\n dump='0',\n passno='0',\n fstab='/etc/fstab'\n )\n new_line = (\n '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\\n')\n args.update(kwargs)\n\n to_write = []\n exists = False\n changed = False\n escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])\n for line in open(args['fstab'], 'r').readlines():\n if not line.strip():\n to_write.append(line)\n\n continue\n\n if line.strip().startswith('#'):\n to_write.append(line)\n\n continue\n\n if len(line.split()) != 6 and get_platform() != 'SunOS':\n # Not sure what this is or why it is here but it is not our fault\n # so leave it be\n to_write.append(line)\n\n continue\n\n ld = {}\n if get_platform() == 'SunOS':\n (\n ld['src'],\n dash,\n ld['name'],\n ld['fstype'],\n ld['passno'],\n ld['boot'],\n ld['opts']\n ) = line.split()\n else:\n (\n ld['src'],\n ld['name'],\n ld['fstype'],\n ld['opts'],\n ld['dump'],\n ld['passno']\n ) = line.split()\n\n if ld['name'] != escaped_args['name']:\n to_write.append(line)\n\n continue\n\n # It exists - now see if what we have is different\n exists = True\n\n if get_platform() == 'SunOS':\n for t in ('src', 'fstype', 'passno', 'boot', 'opts'):\n if ld[t] != escaped_args[t]:\n changed = True\n ld[t] = escaped_args[t]\n else:\n for t in ('src', 'fstype', 'opts', 'dump', 'passno'):\n if ld[t] != escaped_args[t]:\n changed = True\n ld[t] = escaped_args[t]\n\n if changed:\n to_write.append(new_line % ld)\n else:\n to_write.append(line)\n\n if not exists:\n to_write.append(new_line % escaped_args)\n changed = True\n\n if changed and not module.check_mode:\n write_fstab(to_write, args['fstab'])\n\n return (args['name'], changed)\n\n\ndef unset_mount(module, **kwargs):\n \"\"\"Remove a mount point from fstab.\"\"\"\n\n # solaris kwargs:\n # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab\n # linux kwargs:\n # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab\n if get_platform() == 'SunOS':\n args = dict(\n opts='-',\n passno='-',\n fstab='/etc/vfstab',\n boot='yes'\n )\n else:\n args = dict(\n opts='default',\n dump='0',\n passno='0',\n fstab='/etc/fstab'\n )\n args.update(kwargs)\n\n to_write = []\n changed = False\n escaped_name = _escape_fstab(args['name'])\n for line in open(args['fstab'], 'r').readlines():\n if not line.strip():\n to_write.append(line)\n\n continue\n\n if line.strip().startswith('#'):\n to_write.append(line)\n\n continue\n\n if len(line.split()) != 6 and get_platform() != 'SunOS':\n # Not sure what this is or why it is here but it is not our fault\n # so leave it be\n to_write.append(line)\n\n continue\n\n ld = {}\n\n if get_platform() == 'SunOS':\n (\n ld['src'],\n dash,\n ld['name'],\n ld['fstype'],\n ld['passno'],\n ld['boot'],\n ld['opts']\n ) = line.split()\n else:\n (\n ld['src'],\n ld['name'],\n ld['fstype'],\n ld['opts'],\n ld['dump'],\n ld['passno']\n ) = line.split()\n\n if ld['name'] != escaped_name:\n to_write.append(line)\n\n continue\n\n # If we got here we found a match - continue and mark changed\n changed = True\n\n if changed and not module.check_mode:\n write_fstab(to_write, args['fstab'])\n\n return (args['name'], changed)\n\n\ndef mount(module, **kwargs):\n \"\"\"Mount up a path or remount if needed.\"\"\"\n\n # solaris kwargs:\n # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab\n # linux kwargs:\n # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab\n if get_platform() == 'SunOS':\n args = dict(\n opts='-',\n passno='-',\n fstab='/etc/vfstab',\n boot='yes'\n )\n else:\n args = dict(\n opts='default',\n dump='0',\n passno='0',\n fstab='/etc/fstab'\n )\n args.update(kwargs)\n\n mount_bin = module.get_bin_path('mount', required=True)\n name = kwargs['name']\n cmd = [mount_bin]\n\n if ismount(name):\n cmd += ['-o', 'remount']\n\n if get_platform().lower() == 'freebsd':\n cmd += ['-F', args['fstab']]\n elif get_platform().lower() == 'linux' and args['fstab'] != '/etc/fstab':\n cmd += ['-T', args['fstab']]\n\n cmd += [name]\n\n rc, out, err = module.run_command(cmd)\n\n if rc == 0:\n return 0, ''\n else:\n return rc, out+err\n\n\ndef umount(module, **kwargs):\n \"\"\"Unmount a path.\"\"\"\n\n umount_bin = module.get_bin_path('umount', required=True)\n name = kwargs['name']\n cmd = [umount_bin, name]\n\n rc, out, err = module.run_command(cmd)\n\n if rc == 0:\n return 0, ''\n else:\n return rc, out+err\n\n\n# Note if we wanted to put this into module_utils we'd have to get permission\n# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923\n# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439\n# and @abadger to relicense from GPLv3+\ndef is_bind_mounted(module, dest, src=None, fstype=None):\n \"\"\"Return whether the dest is bind mounted\n\n :arg module: The AnsibleModule (used for helper functions)\n :arg dest: The directory to be mounted under. This is the primary means\n of identifying whether the destination is mounted.\n :kwarg src: The source directory. If specified, this is used to help\n ensure that we are detecting that the correct source is mounted there.\n :kwarg fstype: The filesystem type. If specified this is also used to\n help ensure that we are detecting the right mount.\n :returns: True if the dest is mounted with src otherwise False.\n \"\"\"\n\n is_mounted = False\n bin_path = module.get_bin_path('mount', required=True)\n cmd = '%s -l' % bin_path\n\n if get_platform().lower() == 'linux':\n bin_path = module.get_bin_path('findmnt', required=True)\n cmd = '%s -nr %s' % (bin_path, dest)\n\n rc, out, err = module.run_command(cmd)\n mounts = []\n\n if len(out):\n mounts = out.strip().split('\\n')\n\n mount_pattern = re.compile('\\[(.*)\\]')\n\n for mnt in mounts:\n arguments = mnt.split()\n\n if get_platform().lower() == 'linux':\n result = mount_pattern.search(arguments[1])\n\n if len(result.groups()) == 1:\n if arguments[0] == dest:\n is_mounted = True\n elif (\n (arguments[0] == src or src is None) and\n arguments[2] == dest and\n (arguments[4] == fstype or fstype is None)):\n is_mounted = True\n\n if is_mounted:\n break\n\n return is_mounted\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n boot=dict(default='yes', choices=['yes', 'no']),\n dump=dict(),\n fstab=dict(default='/etc/fstab'),\n fstype=dict(),\n name=dict(required=True, type='path'),\n opts=dict(),\n passno=dict(type='str'),\n src=dict(type='path'),\n state=dict(\n required=True,\n choices=['present', 'absent', 'mounted', 'unmounted']),\n ),\n supports_check_mode=True,\n required_if=(\n ['state', 'mounted', ['src', 'fstype']],\n ['state', 'present', ['src', 'fstype']]\n )\n )\n\n changed = False\n args = {\n 'name': module.params['name']\n }\n\n if module.params['src'] is not None:\n args['src'] = module.params['src']\n if module.params['fstype'] is not None:\n args['fstype'] = module.params['fstype']\n if module.params['passno'] is not None:\n args['passno'] = module.params['passno']\n if module.params['opts'] is not None:\n args['opts'] = module.params['opts']\n if module.params['dump'] is not None:\n args['dump'] = module.params['dump']\n if get_platform() == 'SunOS' and module.params['fstab'] == '/etc/fstab':\n args['fstab'] = '/etc/vfstab'\n elif module.params['fstab'] is not None:\n args['fstab'] = module.params['fstab']\n\n # If fstab file does not exist, we first need to create it. This mainly\n # happens when fstab option is passed to the module.\n if not os.path.exists(args['fstab']):\n if not os.path.exists(os.path.dirname(args['fstab'])):\n os.makedirs(os.path.dirname(args['fstab']))\n\n open(args['fstab'], 'a').close()\n\n # absent:\n # Remove from fstab and unmounted.\n # unmounted:\n # Do not change fstab state, but unmount.\n # present:\n # Add to fstab, do not change mount state.\n # mounted:\n # Add to fstab if not there and make sure it is mounted. If it has\n # changed in fstab then remount it.\n\n state = module.params['state']\n name = module.params['name']\n\n if state == 'absent':\n name, changed = unset_mount(module, **args)\n\n if changed and not module.check_mode:\n if ismount(name) or is_bind_mounted(module, name):\n res, msg = umount(module, **args)\n\n if res:\n module.fail_json(\n msg=\"Error unmounting %s: %s\" % (name, msg))\n\n if os.path.exists(name):\n try:\n os.rmdir(name)\n except (OSError, IOError):\n e = get_exception()\n module.fail_json(msg=\"Error rmdir %s: %s\" % (name, str(e)))\n elif state == 'unmounted':\n if ismount(name) or is_bind_mounted(module, name):\n if not module.check_mode:\n res, msg = umount(module, **args)\n\n if res:\n module.fail_json(\n msg=\"Error unmounting %s: %s\" % (name, msg))\n\n changed = True\n elif state == 'mounted':\n if not os.path.exists(name) and not module.check_mode:\n try:\n os.makedirs(name)\n except (OSError, IOError):\n e = get_exception()\n module.fail_json(\n msg=\"Error making dir %s: %s\" % (name, str(e)))\n\n name, changed = set_mount(module, **args)\n res = 0\n\n if ismount(name):\n if not module.check_mode:\n res, msg = mount(module, **args)\n changed = True\n elif 'bind' in args.get('opts', []):\n changed = True\n\n if is_bind_mounted(module, name, args['src'], args['fstype']):\n changed = False\n\n if changed and not module.check_mode:\n res, msg = mount(module, **args)\n else:\n changed = True\n\n if not module.check_mode:\n res, msg = mount(module, **args)\n\n if res:\n module.fail_json(msg=\"Error mounting %s: %s\" % (name, msg))\n elif state == 'present':\n name, changed = set_mount(module, **args)\n else:\n module.fail_json(msg='Unexpected position reached')\n\n module.exit_json(changed=changed, **args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "system/mount.py" } ]
diff --git a/system/mount.py b/system/mount.py index 38785cd9303..1033c3f80ab 100644 --- a/system/mount.py +++ b/system/mount.py @@ -375,7 +375,7 @@ def mount(module, **kwargs): if get_platform().lower() == 'freebsd': cmd += ['-F', args['fstab']] - elif get_platform().lower() == 'linux': + elif get_platform().lower() == 'linux' and args['fstab'] != '/etc/fstab': cmd += ['-T', args['fstab']] cmd += [name]
zalando__patroni-2309
Patroni fails to completely strip enclosing brackets around IPv6 addresses as part of a list when port is specified **Describe the bug** When specifying PostgreSQL listen IPv6 addresses (postgresql.listen) in the Patroni configuration, Patroni fails to strip all enclosing brackets around IPv6 addresses, leaving the leading bracket on the last IPv6 address in the list. Example: _/etc/patroni/patroni.yml_: ``` ... postgresql: config_dir: /var/lib/pgsql/14/data listen: '127.0.0.1, 2001:db8::11, [2001:db8::1:2]:5678' ... ``` Resulting _/var/lib/pgsql/14/data/postgresql.conf_: ``` ... listen_addresses = '127.0.0.1, 2001:db8::11, [2001:db8::1:2' port = '5678' ... ``` However if only a single IPv6 address is specified, it works correctly: _/etc/patroni/patroni.yml_: ``` ... postgresql: config_dir: /var/lib/pgsql/14/data listen: '[2001:db8::1:2]:5678' ... ``` Resulting _/var/lib/pgsql/14/data/postgresql.conf_: ``` ... listen_addresses = 2001:db8::1:2' port = '5678' ... ``` **To Reproduce** Steps to reproduce the behavior: Configure Patroni with postgresql.listen with a list of IPv6 addresses, with the last address in the list being a bracket-enclosed IPv6 address with a port **Expected behavior** Patroni should correctly strip all enclosing brackets from IPv6 addresses **Environment** - Patroni version: 2.1.3 - PostgreSQL version: 14.1 - DCS (and its version): etcd 3.4.18 **Patroni configuration file** ``` ... postgresql: config_dir: /var/lib/pgsql/14/data listen: '127.0.0.1, 2001:db8::11, [2001:db8::1:2]:5678' ... ``` **Have you tried to use GitHub issue search?** Yes
[ { "content": "import errno\nimport json.decoder as json_decoder\nimport logging\nimport os\nimport platform\nimport random\nimport re\nimport socket\nimport sys\nimport tempfile\nimport time\n\nfrom dateutil import tz\n\nfrom .exceptions import PatroniException\nfrom .version import __version__\n\ntzutc = tz.tzutc()\n\nlogger = logging.getLogger(__name__)\n\nUSER_AGENT = 'Patroni/{0} Python/{1} {2}'.format(__version__, platform.python_version(), platform.system())\nOCT_RE = re.compile(r'^[-+]?0[0-7]*')\nDEC_RE = re.compile(r'^[-+]?(0|[1-9][0-9]*)')\nHEX_RE = re.compile(r'^[-+]?0x[0-9a-fA-F]+')\nDBL_RE = re.compile(r'^[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?')\n\n\ndef deep_compare(obj1, obj2):\n \"\"\"\n >>> deep_compare({'1': None}, {})\n False\n >>> deep_compare({'1': {}}, {'1': None})\n False\n >>> deep_compare({'1': [1]}, {'1': [2]})\n False\n >>> deep_compare({'1': 2}, {'1': '2'})\n True\n >>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})\n True\n \"\"\"\n\n if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys\n return False\n\n for key, value in obj1.items():\n if isinstance(value, dict):\n if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])):\n return False\n elif str(value) != str(obj2[key]):\n return False\n return True\n\n\ndef patch_config(config, data):\n \"\"\"recursively 'patch' `config` with `data`\n :returns: `!True` if the `config` was changed\"\"\"\n is_changed = False\n for name, value in data.items():\n if value is None:\n if config.pop(name, None) is not None:\n is_changed = True\n elif name in config:\n if isinstance(value, dict):\n if isinstance(config[name], dict):\n if patch_config(config[name], value):\n is_changed = True\n else:\n config[name] = value\n is_changed = True\n elif str(config[name]) != str(value):\n config[name] = value\n is_changed = True\n else:\n config[name] = value\n is_changed = True\n return is_changed\n\n\ndef parse_bool(value):\n \"\"\"\n >>> parse_bool(1)\n True\n >>> parse_bool('off')\n False\n >>> parse_bool('foo')\n \"\"\"\n value = str(value).lower()\n if value in ('on', 'true', 'yes', '1'):\n return True\n if value in ('off', 'false', 'no', '0'):\n return False\n\n\ndef strtol(value, strict=True):\n \"\"\"As most as possible close equivalent of strtol(3) function (with base=0),\n used by postgres to parse parameter values.\n >>> strtol(0) == (0, '')\n True\n >>> strtol(1) == (1, '')\n True\n >>> strtol(9) == (9, '')\n True\n >>> strtol(' +0x400MB') == (1024, 'MB')\n True\n >>> strtol(' -070d') == (-56, 'd')\n True\n >>> strtol(' d ') == (None, 'd')\n True\n >>> strtol(' 1 d ') == (1, ' d')\n True\n >>> strtol('9s', False) == (9, 's')\n True\n >>> strtol(' s ', False) == (1, 's')\n True\n \"\"\"\n value = str(value).strip()\n for regex, base in ((HEX_RE, 16), (OCT_RE, 8), (DEC_RE, 10)):\n match = regex.match(value)\n if match:\n end = match.end()\n return int(value[:end], base), value[end:]\n return (None if strict else 1), value\n\n\ndef strtod(value):\n \"\"\"As most as possible close equivalent of strtod(3) function used by postgres to parse parameter values.\n >>> strtod(' A ') == (None, 'A')\n True\n \"\"\"\n value = str(value).strip()\n match = DBL_RE.match(value)\n if match:\n end = match.end()\n return float(value[:end]), value[end:]\n return None, value\n\n\ndef rint(value):\n \"\"\"\n >>> rint(0.5) == 0\n True\n >>> rint(0.501) == 1\n True\n >>> rint(1.5) == 2\n True\n \"\"\"\n\n ret = round(value)\n return 2.0 * round(value / 2.0) if abs(ret - value) == 0.5 else ret\n\n\ndef convert_to_base_unit(value, unit, base_unit):\n convert = {\n 'B': {'B': 1, 'kB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024},\n 'kB': {'B': 1.0 / 1024, 'kB': 1, 'MB': 1024, 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024},\n 'MB': {'B': 1.0 / (1024 * 1024), 'kB': 1.0 / 1024, 'MB': 1, 'GB': 1024, 'TB': 1024 * 1024},\n 'ms': {'us': 1.0 / 1000, 'ms': 1, 's': 1000, 'min': 1000 * 60, 'h': 1000 * 60 * 60, 'd': 1000 * 60 * 60 * 24},\n 's': {'us': 1.0 / (1000 * 1000), 'ms': 1.0 / 1000, 's': 1, 'min': 60, 'h': 60 * 60, 'd': 60 * 60 * 24},\n 'min': {'us': 1.0 / (1000 * 1000 * 60), 'ms': 1.0 / (1000 * 60), 's': 1.0 / 60, 'min': 1, 'h': 60, 'd': 60 * 24}\n }\n\n round_order = {\n 'TB': 'GB', 'GB': 'MB', 'MB': 'kB', 'kB': 'B',\n 'd': 'h', 'h': 'min', 'min': 's', 's': 'ms', 'ms': 'us'\n }\n\n if base_unit and base_unit not in convert:\n base_value, base_unit = strtol(base_unit, False)\n else:\n base_value = 1\n\n if base_unit in convert and unit in convert[base_unit]:\n value *= convert[base_unit][unit] / float(base_value)\n\n if unit in round_order:\n multiplier = convert[base_unit][round_order[unit]]\n value = rint(value / float(multiplier)) * multiplier\n\n return value\n\n\ndef parse_int(value, base_unit=None):\n \"\"\"\n >>> parse_int('1') == 1\n True\n >>> parse_int(' 0x400 MB ', '16384kB') == 64\n True\n >>> parse_int('1MB', 'kB') == 1024\n True\n >>> parse_int('1000 ms', 's') == 1\n True\n >>> parse_int('1TB', 'GB') is None\n True\n >>> parse_int(0) == 0\n True\n >>> parse_int('6GB', '16MB') == 384\n True\n >>> parse_int('4097.4kB', 'kB') == 4097\n True\n >>> parse_int('4097.5kB', 'kB') == 4098\n True\n \"\"\"\n\n val, unit = strtol(value)\n if val is None and unit.startswith('.') or unit and unit[0] in ('.', 'e', 'E'):\n val, unit = strtod(value)\n\n if val is not None:\n unit = unit.strip()\n if not unit:\n return int(rint(val))\n\n val = convert_to_base_unit(val, unit, base_unit)\n if val is not None:\n return int(rint(val))\n\n\ndef parse_real(value, base_unit=None):\n \"\"\"\n >>> parse_real(' +0.0005 ') == 0.0005\n True\n >>> parse_real('0.0005ms', 'ms') == 0.0\n True\n >>> parse_real('0.00051ms', 'ms') == 0.001\n True\n \"\"\"\n val, unit = strtod(value)\n\n if val is not None:\n unit = unit.strip()\n if not unit:\n return val\n\n return convert_to_base_unit(val, unit, base_unit)\n\n\ndef compare_values(vartype, unit, old_value, new_value):\n \"\"\"\n >>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE')\n True\n >>> compare_values('real', None, '1e-06', 0.000001)\n True\n \"\"\"\n\n converters = {\n 'bool': lambda v1, v2: parse_bool(v1),\n 'integer': parse_int,\n 'real': parse_real,\n 'enum': lambda v1, v2: str(v1).lower(),\n 'string': lambda v1, v2: str(v1)\n }\n\n convert = converters.get(vartype) or converters['string']\n old_value = convert(old_value, None)\n new_value = convert(new_value, unit)\n\n return old_value is not None and new_value is not None and old_value == new_value\n\n\ndef _sleep(interval):\n time.sleep(interval)\n\n\nclass RetryFailedError(PatroniException):\n\n \"\"\"Raised when retrying an operation ultimately failed, after retrying the maximum number of attempts.\"\"\"\n\n\nclass Retry(object):\n\n \"\"\"Helper for retrying a method in the face of retry-able exceptions\"\"\"\n\n def __init__(self, max_tries=1, delay=0.1, backoff=2, max_jitter=0.8, max_delay=3600,\n sleep_func=_sleep, deadline=None, retry_exceptions=PatroniException):\n \"\"\"Create a :class:`Retry` instance for retrying function calls\n\n :param max_tries: How many times to retry the command. -1 means infinite tries.\n :param delay: Initial delay between retry attempts.\n :param backoff: Backoff multiplier between retry attempts. Defaults to 2 for exponential backoff.\n :param max_jitter: Additional max jitter period to wait between retry attempts to avoid slamming the server.\n :param max_delay: Maximum delay in seconds, regardless of other backoff settings. Defaults to one hour.\n :param retry_exceptions: single exception or tuple\"\"\"\n\n self.max_tries = max_tries\n self.delay = delay\n self.backoff = backoff\n self.max_jitter = int(max_jitter * 100)\n self.max_delay = float(max_delay)\n self._attempts = 0\n self._cur_delay = delay\n self.deadline = deadline\n self._cur_stoptime = None\n self.sleep_func = sleep_func\n self.retry_exceptions = retry_exceptions\n\n def reset(self):\n \"\"\"Reset the attempt counter\"\"\"\n self._attempts = 0\n self._cur_delay = self.delay\n self._cur_stoptime = None\n\n def copy(self):\n \"\"\"Return a clone of this retry manager\"\"\"\n return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,\n max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,\n deadline=self.deadline, retry_exceptions=self.retry_exceptions)\n\n @property\n def sleeptime(self):\n return self._cur_delay + (random.randint(0, self.max_jitter) / 100.0)\n\n def update_delay(self):\n self._cur_delay = min(self._cur_delay * self.backoff, self.max_delay)\n\n @property\n def stoptime(self):\n return self._cur_stoptime\n\n def __call__(self, func, *args, **kwargs):\n \"\"\"Call a function with arguments until it completes without throwing a `retry_exceptions`\n\n :param func: Function to call\n :param args: Positional arguments to call the function with\n :params kwargs: Keyword arguments to call the function with\n\n The function will be called until it doesn't throw one of the retryable exceptions\"\"\"\n self.reset()\n\n while True:\n try:\n if self.deadline is not None and self._cur_stoptime is None:\n self._cur_stoptime = time.time() + self.deadline\n return func(*args, **kwargs)\n except self.retry_exceptions as e:\n # Note: max_tries == -1 means infinite tries.\n if self._attempts == self.max_tries:\n logger.warning('Retry got exception: %s', e)\n raise RetryFailedError(\"Too many retry attempts\")\n self._attempts += 1\n sleeptime = hasattr(e, 'sleeptime') and e.sleeptime or self.sleeptime\n\n if self._cur_stoptime is not None and time.time() + sleeptime >= self._cur_stoptime:\n logger.warning('Retry got exception: %s', e)\n raise RetryFailedError(\"Exceeded retry deadline\")\n logger.debug('Retry got exception: %s', e)\n self.sleep_func(sleeptime)\n self.update_delay()\n\n\ndef polling_loop(timeout, interval=1):\n \"\"\"Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.\"\"\"\n start_time = time.time()\n iteration = 0\n end_time = start_time + timeout\n while time.time() < end_time:\n yield iteration\n iteration += 1\n time.sleep(interval)\n\n\ndef split_host_port(value, default_port):\n t = value.rsplit(':', 1)\n if ':' in t[0]:\n t[0] = t[0].strip('[]')\n t.append(default_port)\n return t[0], int(t[1])\n\n\ndef uri(proto, netloc, path='', user=None):\n host, port = netloc if isinstance(netloc, (list, tuple)) else split_host_port(netloc, 0)\n if host and ':' in host and host[0] != '[' and host[-1] != ']':\n host = '[{0}]'.format(host)\n port = ':{0}'.format(port) if port else ''\n path = '/{0}'.format(path) if path and not path.startswith('/') else path\n user = '{0}@'.format(user) if user else ''\n return '{0}://{1}{2}{3}{4}'.format(proto, user, host, port, path)\n\n\ndef iter_response_objects(response):\n prev = ''\n decoder = json_decoder.JSONDecoder()\n for chunk in response.read_chunked(decode_content=False):\n if isinstance(chunk, bytes):\n chunk = chunk.decode('utf-8')\n chunk = prev + chunk\n\n length = len(chunk)\n idx = json_decoder.WHITESPACE.match(chunk, 0).end()\n while idx < length:\n try:\n message, idx = decoder.raw_decode(chunk, idx)\n except ValueError: # malformed or incomplete JSON, unlikely to happen\n break\n else:\n yield message\n idx = json_decoder.WHITESPACE.match(chunk, idx).end()\n prev = chunk[idx:]\n\n\ndef is_standby_cluster(config):\n # Check whether or not provided configuration describes a standby cluster\n return isinstance(config, dict) and (config.get('host') or config.get('port') or config.get('restore_command'))\n\n\ndef cluster_as_json(cluster):\n leader_name = cluster.leader.name if cluster.leader else None\n cluster_lsn = cluster.last_lsn or 0\n\n ret = {'members': []}\n for m in cluster.members:\n if m.name == leader_name:\n config = cluster.config.data if cluster.config and cluster.config.modify_index else {}\n role = 'standby_leader' if is_standby_cluster(config.get('standby_cluster')) else 'leader'\n elif m.name in cluster.sync.members:\n role = 'sync_standby'\n else:\n role = 'replica'\n\n member = {'name': m.name, 'role': role, 'state': m.data.get('state', ''), 'api_url': m.api_url}\n conn_kwargs = m.conn_kwargs()\n if conn_kwargs.get('host'):\n member['host'] = conn_kwargs['host']\n if conn_kwargs.get('port'):\n member['port'] = int(conn_kwargs['port'])\n optional_attributes = ('timeline', 'pending_restart', 'scheduled_restart', 'tags')\n member.update({n: m.data[n] for n in optional_attributes if n in m.data})\n\n if m.name != leader_name:\n lsn = m.data.get('xlog_location')\n if lsn is None:\n member['lag'] = 'unknown'\n elif cluster_lsn >= lsn:\n member['lag'] = cluster_lsn - lsn\n else:\n member['lag'] = 0\n\n ret['members'].append(member)\n\n # sort members by name for consistency\n ret['members'].sort(key=lambda m: m['name'])\n if cluster.is_paused():\n ret['pause'] = True\n if cluster.failover and cluster.failover.scheduled_at:\n ret['scheduled_switchover'] = {'at': cluster.failover.scheduled_at.isoformat()}\n if cluster.failover.leader:\n ret['scheduled_switchover']['from'] = cluster.failover.leader\n if cluster.failover.candidate:\n ret['scheduled_switchover']['to'] = cluster.failover.candidate\n return ret\n\n\ndef is_subpath(d1, d2):\n real_d1 = os.path.realpath(d1) + os.path.sep\n real_d2 = os.path.realpath(os.path.join(real_d1, d2))\n return os.path.commonprefix([real_d1, real_d2 + os.path.sep]) == real_d1\n\n\ndef validate_directory(d, msg=\"{} {}\"):\n if not os.path.exists(d):\n try:\n os.makedirs(d)\n except OSError as e:\n logger.error(e)\n if e.errno != errno.EEXIST:\n raise PatroniException(msg.format(d, \"couldn't create the directory\"))\n elif os.path.isdir(d):\n try:\n fd, tmpfile = tempfile.mkstemp(dir=d)\n os.close(fd)\n os.remove(tmpfile)\n except OSError:\n raise PatroniException(msg.format(d, \"the directory is not writable\"))\n else:\n raise PatroniException(msg.format(d, \"is not a directory\"))\n\n\ndef data_directory_is_empty(data_dir):\n if not os.path.exists(data_dir):\n return True\n return all(os.name != 'nt' and (n.startswith('.') or n == 'lost+found') for n in os.listdir(data_dir))\n\n\ndef keepalive_intvl(timeout, idle, cnt=3):\n return max(1, int(float(timeout - idle) / cnt))\n\n\ndef keepalive_socket_options(timeout, idle, cnt=3):\n yield (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n\n if sys.platform.startswith('linux'):\n yield (socket.SOL_TCP, 18, int(timeout * 1000)) # TCP_USER_TIMEOUT\n TCP_KEEPIDLE = getattr(socket, 'TCP_KEEPIDLE', None)\n TCP_KEEPINTVL = getattr(socket, 'TCP_KEEPINTVL', None)\n TCP_KEEPCNT = getattr(socket, 'TCP_KEEPCNT', None)\n elif sys.platform.startswith('darwin'):\n TCP_KEEPIDLE = 0x10 # (named \"TCP_KEEPALIVE\" in C)\n TCP_KEEPINTVL = 0x101\n TCP_KEEPCNT = 0x102\n else:\n return\n\n intvl = keepalive_intvl(timeout, idle, cnt)\n yield (socket.IPPROTO_TCP, TCP_KEEPIDLE, idle)\n yield (socket.IPPROTO_TCP, TCP_KEEPINTVL, intvl)\n yield (socket.IPPROTO_TCP, TCP_KEEPCNT, cnt)\n\n\ndef enable_keepalive(sock, timeout, idle, cnt=3):\n SIO_KEEPALIVE_VALS = getattr(socket, 'SIO_KEEPALIVE_VALS', None)\n if SIO_KEEPALIVE_VALS is not None: # Windows\n intvl = keepalive_intvl(timeout, idle, cnt)\n return sock.ioctl(SIO_KEEPALIVE_VALS, (1, idle * 1000, intvl * 1000))\n\n for opt in keepalive_socket_options(timeout, idle, cnt):\n sock.setsockopt(*opt)\n\n\ndef find_executable(executable, path=None):\n _, ext = os.path.splitext(executable)\n\n if (sys.platform == 'win32') and (ext != '.exe'):\n executable = executable + '.exe'\n\n if os.path.isfile(executable):\n return executable\n\n if path is None:\n path = os.environ.get('PATH', os.defpath)\n\n for p in path.split(os.pathsep):\n f = os.path.join(p, executable)\n if os.path.isfile(f):\n return f\n", "path": "patroni/utils.py" } ]
[ { "content": "import errno\nimport json.decoder as json_decoder\nimport logging\nimport os\nimport platform\nimport random\nimport re\nimport socket\nimport sys\nimport tempfile\nimport time\n\nfrom dateutil import tz\n\nfrom .exceptions import PatroniException\nfrom .version import __version__\n\ntzutc = tz.tzutc()\n\nlogger = logging.getLogger(__name__)\n\nUSER_AGENT = 'Patroni/{0} Python/{1} {2}'.format(__version__, platform.python_version(), platform.system())\nOCT_RE = re.compile(r'^[-+]?0[0-7]*')\nDEC_RE = re.compile(r'^[-+]?(0|[1-9][0-9]*)')\nHEX_RE = re.compile(r'^[-+]?0x[0-9a-fA-F]+')\nDBL_RE = re.compile(r'^[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?')\n\n\ndef deep_compare(obj1, obj2):\n \"\"\"\n >>> deep_compare({'1': None}, {})\n False\n >>> deep_compare({'1': {}}, {'1': None})\n False\n >>> deep_compare({'1': [1]}, {'1': [2]})\n False\n >>> deep_compare({'1': 2}, {'1': '2'})\n True\n >>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})\n True\n \"\"\"\n\n if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys\n return False\n\n for key, value in obj1.items():\n if isinstance(value, dict):\n if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])):\n return False\n elif str(value) != str(obj2[key]):\n return False\n return True\n\n\ndef patch_config(config, data):\n \"\"\"recursively 'patch' `config` with `data`\n :returns: `!True` if the `config` was changed\"\"\"\n is_changed = False\n for name, value in data.items():\n if value is None:\n if config.pop(name, None) is not None:\n is_changed = True\n elif name in config:\n if isinstance(value, dict):\n if isinstance(config[name], dict):\n if patch_config(config[name], value):\n is_changed = True\n else:\n config[name] = value\n is_changed = True\n elif str(config[name]) != str(value):\n config[name] = value\n is_changed = True\n else:\n config[name] = value\n is_changed = True\n return is_changed\n\n\ndef parse_bool(value):\n \"\"\"\n >>> parse_bool(1)\n True\n >>> parse_bool('off')\n False\n >>> parse_bool('foo')\n \"\"\"\n value = str(value).lower()\n if value in ('on', 'true', 'yes', '1'):\n return True\n if value in ('off', 'false', 'no', '0'):\n return False\n\n\ndef strtol(value, strict=True):\n \"\"\"As most as possible close equivalent of strtol(3) function (with base=0),\n used by postgres to parse parameter values.\n >>> strtol(0) == (0, '')\n True\n >>> strtol(1) == (1, '')\n True\n >>> strtol(9) == (9, '')\n True\n >>> strtol(' +0x400MB') == (1024, 'MB')\n True\n >>> strtol(' -070d') == (-56, 'd')\n True\n >>> strtol(' d ') == (None, 'd')\n True\n >>> strtol(' 1 d ') == (1, ' d')\n True\n >>> strtol('9s', False) == (9, 's')\n True\n >>> strtol(' s ', False) == (1, 's')\n True\n \"\"\"\n value = str(value).strip()\n for regex, base in ((HEX_RE, 16), (OCT_RE, 8), (DEC_RE, 10)):\n match = regex.match(value)\n if match:\n end = match.end()\n return int(value[:end], base), value[end:]\n return (None if strict else 1), value\n\n\ndef strtod(value):\n \"\"\"As most as possible close equivalent of strtod(3) function used by postgres to parse parameter values.\n >>> strtod(' A ') == (None, 'A')\n True\n \"\"\"\n value = str(value).strip()\n match = DBL_RE.match(value)\n if match:\n end = match.end()\n return float(value[:end]), value[end:]\n return None, value\n\n\ndef rint(value):\n \"\"\"\n >>> rint(0.5) == 0\n True\n >>> rint(0.501) == 1\n True\n >>> rint(1.5) == 2\n True\n \"\"\"\n\n ret = round(value)\n return 2.0 * round(value / 2.0) if abs(ret - value) == 0.5 else ret\n\n\ndef convert_to_base_unit(value, unit, base_unit):\n convert = {\n 'B': {'B': 1, 'kB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024},\n 'kB': {'B': 1.0 / 1024, 'kB': 1, 'MB': 1024, 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024},\n 'MB': {'B': 1.0 / (1024 * 1024), 'kB': 1.0 / 1024, 'MB': 1, 'GB': 1024, 'TB': 1024 * 1024},\n 'ms': {'us': 1.0 / 1000, 'ms': 1, 's': 1000, 'min': 1000 * 60, 'h': 1000 * 60 * 60, 'd': 1000 * 60 * 60 * 24},\n 's': {'us': 1.0 / (1000 * 1000), 'ms': 1.0 / 1000, 's': 1, 'min': 60, 'h': 60 * 60, 'd': 60 * 60 * 24},\n 'min': {'us': 1.0 / (1000 * 1000 * 60), 'ms': 1.0 / (1000 * 60), 's': 1.0 / 60, 'min': 1, 'h': 60, 'd': 60 * 24}\n }\n\n round_order = {\n 'TB': 'GB', 'GB': 'MB', 'MB': 'kB', 'kB': 'B',\n 'd': 'h', 'h': 'min', 'min': 's', 's': 'ms', 'ms': 'us'\n }\n\n if base_unit and base_unit not in convert:\n base_value, base_unit = strtol(base_unit, False)\n else:\n base_value = 1\n\n if base_unit in convert and unit in convert[base_unit]:\n value *= convert[base_unit][unit] / float(base_value)\n\n if unit in round_order:\n multiplier = convert[base_unit][round_order[unit]]\n value = rint(value / float(multiplier)) * multiplier\n\n return value\n\n\ndef parse_int(value, base_unit=None):\n \"\"\"\n >>> parse_int('1') == 1\n True\n >>> parse_int(' 0x400 MB ', '16384kB') == 64\n True\n >>> parse_int('1MB', 'kB') == 1024\n True\n >>> parse_int('1000 ms', 's') == 1\n True\n >>> parse_int('1TB', 'GB') is None\n True\n >>> parse_int(0) == 0\n True\n >>> parse_int('6GB', '16MB') == 384\n True\n >>> parse_int('4097.4kB', 'kB') == 4097\n True\n >>> parse_int('4097.5kB', 'kB') == 4098\n True\n \"\"\"\n\n val, unit = strtol(value)\n if val is None and unit.startswith('.') or unit and unit[0] in ('.', 'e', 'E'):\n val, unit = strtod(value)\n\n if val is not None:\n unit = unit.strip()\n if not unit:\n return int(rint(val))\n\n val = convert_to_base_unit(val, unit, base_unit)\n if val is not None:\n return int(rint(val))\n\n\ndef parse_real(value, base_unit=None):\n \"\"\"\n >>> parse_real(' +0.0005 ') == 0.0005\n True\n >>> parse_real('0.0005ms', 'ms') == 0.0\n True\n >>> parse_real('0.00051ms', 'ms') == 0.001\n True\n \"\"\"\n val, unit = strtod(value)\n\n if val is not None:\n unit = unit.strip()\n if not unit:\n return val\n\n return convert_to_base_unit(val, unit, base_unit)\n\n\ndef compare_values(vartype, unit, old_value, new_value):\n \"\"\"\n >>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE')\n True\n >>> compare_values('real', None, '1e-06', 0.000001)\n True\n \"\"\"\n\n converters = {\n 'bool': lambda v1, v2: parse_bool(v1),\n 'integer': parse_int,\n 'real': parse_real,\n 'enum': lambda v1, v2: str(v1).lower(),\n 'string': lambda v1, v2: str(v1)\n }\n\n convert = converters.get(vartype) or converters['string']\n old_value = convert(old_value, None)\n new_value = convert(new_value, unit)\n\n return old_value is not None and new_value is not None and old_value == new_value\n\n\ndef _sleep(interval):\n time.sleep(interval)\n\n\nclass RetryFailedError(PatroniException):\n\n \"\"\"Raised when retrying an operation ultimately failed, after retrying the maximum number of attempts.\"\"\"\n\n\nclass Retry(object):\n\n \"\"\"Helper for retrying a method in the face of retry-able exceptions\"\"\"\n\n def __init__(self, max_tries=1, delay=0.1, backoff=2, max_jitter=0.8, max_delay=3600,\n sleep_func=_sleep, deadline=None, retry_exceptions=PatroniException):\n \"\"\"Create a :class:`Retry` instance for retrying function calls\n\n :param max_tries: How many times to retry the command. -1 means infinite tries.\n :param delay: Initial delay between retry attempts.\n :param backoff: Backoff multiplier between retry attempts. Defaults to 2 for exponential backoff.\n :param max_jitter: Additional max jitter period to wait between retry attempts to avoid slamming the server.\n :param max_delay: Maximum delay in seconds, regardless of other backoff settings. Defaults to one hour.\n :param retry_exceptions: single exception or tuple\"\"\"\n\n self.max_tries = max_tries\n self.delay = delay\n self.backoff = backoff\n self.max_jitter = int(max_jitter * 100)\n self.max_delay = float(max_delay)\n self._attempts = 0\n self._cur_delay = delay\n self.deadline = deadline\n self._cur_stoptime = None\n self.sleep_func = sleep_func\n self.retry_exceptions = retry_exceptions\n\n def reset(self):\n \"\"\"Reset the attempt counter\"\"\"\n self._attempts = 0\n self._cur_delay = self.delay\n self._cur_stoptime = None\n\n def copy(self):\n \"\"\"Return a clone of this retry manager\"\"\"\n return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,\n max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,\n deadline=self.deadline, retry_exceptions=self.retry_exceptions)\n\n @property\n def sleeptime(self):\n return self._cur_delay + (random.randint(0, self.max_jitter) / 100.0)\n\n def update_delay(self):\n self._cur_delay = min(self._cur_delay * self.backoff, self.max_delay)\n\n @property\n def stoptime(self):\n return self._cur_stoptime\n\n def __call__(self, func, *args, **kwargs):\n \"\"\"Call a function with arguments until it completes without throwing a `retry_exceptions`\n\n :param func: Function to call\n :param args: Positional arguments to call the function with\n :params kwargs: Keyword arguments to call the function with\n\n The function will be called until it doesn't throw one of the retryable exceptions\"\"\"\n self.reset()\n\n while True:\n try:\n if self.deadline is not None and self._cur_stoptime is None:\n self._cur_stoptime = time.time() + self.deadline\n return func(*args, **kwargs)\n except self.retry_exceptions as e:\n # Note: max_tries == -1 means infinite tries.\n if self._attempts == self.max_tries:\n logger.warning('Retry got exception: %s', e)\n raise RetryFailedError(\"Too many retry attempts\")\n self._attempts += 1\n sleeptime = hasattr(e, 'sleeptime') and e.sleeptime or self.sleeptime\n\n if self._cur_stoptime is not None and time.time() + sleeptime >= self._cur_stoptime:\n logger.warning('Retry got exception: %s', e)\n raise RetryFailedError(\"Exceeded retry deadline\")\n logger.debug('Retry got exception: %s', e)\n self.sleep_func(sleeptime)\n self.update_delay()\n\n\ndef polling_loop(timeout, interval=1):\n \"\"\"Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.\"\"\"\n start_time = time.time()\n iteration = 0\n end_time = start_time + timeout\n while time.time() < end_time:\n yield iteration\n iteration += 1\n time.sleep(interval)\n\n\ndef split_host_port(value, default_port):\n t = value.rsplit(':', 1)\n if ':' in t[0]:\n t[0] = ','.join([h.strip().strip('[]') for h in t[0].split(',')])\n t.append(default_port)\n return t[0], int(t[1])\n\n\ndef uri(proto, netloc, path='', user=None):\n host, port = netloc if isinstance(netloc, (list, tuple)) else split_host_port(netloc, 0)\n if host and ':' in host and host[0] != '[' and host[-1] != ']':\n host = '[{0}]'.format(host)\n port = ':{0}'.format(port) if port else ''\n path = '/{0}'.format(path) if path and not path.startswith('/') else path\n user = '{0}@'.format(user) if user else ''\n return '{0}://{1}{2}{3}{4}'.format(proto, user, host, port, path)\n\n\ndef iter_response_objects(response):\n prev = ''\n decoder = json_decoder.JSONDecoder()\n for chunk in response.read_chunked(decode_content=False):\n if isinstance(chunk, bytes):\n chunk = chunk.decode('utf-8')\n chunk = prev + chunk\n\n length = len(chunk)\n idx = json_decoder.WHITESPACE.match(chunk, 0).end()\n while idx < length:\n try:\n message, idx = decoder.raw_decode(chunk, idx)\n except ValueError: # malformed or incomplete JSON, unlikely to happen\n break\n else:\n yield message\n idx = json_decoder.WHITESPACE.match(chunk, idx).end()\n prev = chunk[idx:]\n\n\ndef is_standby_cluster(config):\n # Check whether or not provided configuration describes a standby cluster\n return isinstance(config, dict) and (config.get('host') or config.get('port') or config.get('restore_command'))\n\n\ndef cluster_as_json(cluster):\n leader_name = cluster.leader.name if cluster.leader else None\n cluster_lsn = cluster.last_lsn or 0\n\n ret = {'members': []}\n for m in cluster.members:\n if m.name == leader_name:\n config = cluster.config.data if cluster.config and cluster.config.modify_index else {}\n role = 'standby_leader' if is_standby_cluster(config.get('standby_cluster')) else 'leader'\n elif m.name in cluster.sync.members:\n role = 'sync_standby'\n else:\n role = 'replica'\n\n member = {'name': m.name, 'role': role, 'state': m.data.get('state', ''), 'api_url': m.api_url}\n conn_kwargs = m.conn_kwargs()\n if conn_kwargs.get('host'):\n member['host'] = conn_kwargs['host']\n if conn_kwargs.get('port'):\n member['port'] = int(conn_kwargs['port'])\n optional_attributes = ('timeline', 'pending_restart', 'scheduled_restart', 'tags')\n member.update({n: m.data[n] for n in optional_attributes if n in m.data})\n\n if m.name != leader_name:\n lsn = m.data.get('xlog_location')\n if lsn is None:\n member['lag'] = 'unknown'\n elif cluster_lsn >= lsn:\n member['lag'] = cluster_lsn - lsn\n else:\n member['lag'] = 0\n\n ret['members'].append(member)\n\n # sort members by name for consistency\n ret['members'].sort(key=lambda m: m['name'])\n if cluster.is_paused():\n ret['pause'] = True\n if cluster.failover and cluster.failover.scheduled_at:\n ret['scheduled_switchover'] = {'at': cluster.failover.scheduled_at.isoformat()}\n if cluster.failover.leader:\n ret['scheduled_switchover']['from'] = cluster.failover.leader\n if cluster.failover.candidate:\n ret['scheduled_switchover']['to'] = cluster.failover.candidate\n return ret\n\n\ndef is_subpath(d1, d2):\n real_d1 = os.path.realpath(d1) + os.path.sep\n real_d2 = os.path.realpath(os.path.join(real_d1, d2))\n return os.path.commonprefix([real_d1, real_d2 + os.path.sep]) == real_d1\n\n\ndef validate_directory(d, msg=\"{} {}\"):\n if not os.path.exists(d):\n try:\n os.makedirs(d)\n except OSError as e:\n logger.error(e)\n if e.errno != errno.EEXIST:\n raise PatroniException(msg.format(d, \"couldn't create the directory\"))\n elif os.path.isdir(d):\n try:\n fd, tmpfile = tempfile.mkstemp(dir=d)\n os.close(fd)\n os.remove(tmpfile)\n except OSError:\n raise PatroniException(msg.format(d, \"the directory is not writable\"))\n else:\n raise PatroniException(msg.format(d, \"is not a directory\"))\n\n\ndef data_directory_is_empty(data_dir):\n if not os.path.exists(data_dir):\n return True\n return all(os.name != 'nt' and (n.startswith('.') or n == 'lost+found') for n in os.listdir(data_dir))\n\n\ndef keepalive_intvl(timeout, idle, cnt=3):\n return max(1, int(float(timeout - idle) / cnt))\n\n\ndef keepalive_socket_options(timeout, idle, cnt=3):\n yield (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n\n if sys.platform.startswith('linux'):\n yield (socket.SOL_TCP, 18, int(timeout * 1000)) # TCP_USER_TIMEOUT\n TCP_KEEPIDLE = getattr(socket, 'TCP_KEEPIDLE', None)\n TCP_KEEPINTVL = getattr(socket, 'TCP_KEEPINTVL', None)\n TCP_KEEPCNT = getattr(socket, 'TCP_KEEPCNT', None)\n elif sys.platform.startswith('darwin'):\n TCP_KEEPIDLE = 0x10 # (named \"TCP_KEEPALIVE\" in C)\n TCP_KEEPINTVL = 0x101\n TCP_KEEPCNT = 0x102\n else:\n return\n\n intvl = keepalive_intvl(timeout, idle, cnt)\n yield (socket.IPPROTO_TCP, TCP_KEEPIDLE, idle)\n yield (socket.IPPROTO_TCP, TCP_KEEPINTVL, intvl)\n yield (socket.IPPROTO_TCP, TCP_KEEPCNT, cnt)\n\n\ndef enable_keepalive(sock, timeout, idle, cnt=3):\n SIO_KEEPALIVE_VALS = getattr(socket, 'SIO_KEEPALIVE_VALS', None)\n if SIO_KEEPALIVE_VALS is not None: # Windows\n intvl = keepalive_intvl(timeout, idle, cnt)\n return sock.ioctl(SIO_KEEPALIVE_VALS, (1, idle * 1000, intvl * 1000))\n\n for opt in keepalive_socket_options(timeout, idle, cnt):\n sock.setsockopt(*opt)\n\n\ndef find_executable(executable, path=None):\n _, ext = os.path.splitext(executable)\n\n if (sys.platform == 'win32') and (ext != '.exe'):\n executable = executable + '.exe'\n\n if os.path.isfile(executable):\n return executable\n\n if path is None:\n path = os.environ.get('PATH', os.defpath)\n\n for p in path.split(os.pathsep):\n f = os.path.join(p, executable)\n if os.path.isfile(f):\n return f\n", "path": "patroni/utils.py" } ]
diff --git a/patroni/utils.py b/patroni/utils.py index 1cfad9196..01953d05d 100644 --- a/patroni/utils.py +++ b/patroni/utils.py @@ -362,7 +362,7 @@ def polling_loop(timeout, interval=1): def split_host_port(value, default_port): t = value.rsplit(':', 1) if ':' in t[0]: - t[0] = t[0].strip('[]') + t[0] = ','.join([h.strip().strip('[]') for h in t[0].split(',')]) t.append(default_port) return t[0], int(t[1])
wemake-services__wemake-python-styleguide-1261
Bump flake8-builtins New version of `flake8-builtins` is released: `1.5.2` We need to update our dependency here: https://github.com/wemake-services/wemake-python-styleguide/blob/master/pyproject.toml#L63 Here's how to do it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/CONTRIBUTING.md#dependencies Bump flake8-builtins New version of `flake8-builtins` is released: `1.5.2` We need to update our dependency here: https://github.com/wemake-services/wemake-python-styleguide/blob/master/pyproject.toml#L63 Here's how to do it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/CONTRIBUTING.md#dependencies
[ { "content": "\"\"\"\nProvides configuration options for ``wemake-python-styleguide``.\n\nWe do not like our linter to be highly configurable.\nSince, people may take the wrong path or make wrong decisions.\nWe try to make all defaults as reasonable as possible.\n\nHowever, you can currently adjust some complexity options. Why?\nBecause we are not quite sure about the ideal values.\n\nAll options are configurable via ``flake8`` CLI.\n\n.. code:: ini\n\n flake8 --max-returns=2 --max-arguments=4\n\nOr you can provide options in ``setup.cfg`` or similar supported files.\n\n.. code:: ini\n\n [flake8]\n max-returns = 2\n max-arguments = 4\n\nWe use ``setup.cfg`` as a default way to provide configuration.\n\nYou can also show all options that ``flake8`` supports by running:\n\n.. code:: bash\n\n flake8 --help\n\n.. rubric:: General options\n\n- ``min-name-length`` - minimum number of chars to define a valid\n variable and module name, defaults to\n :str:`wemake_python_styleguide.options.defaults.MIN_NAME_LENGTH`\n- ``max-name-length`` - maximum number of chars to define a valid\n variable and module name, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_NAME_LENGTH`\n- ``i-control-code`` - whether you control ones who use your code,\n more rules are enforced when you do control it,\n opposite to ``--i-dont-control-code``, defaults to\n :str:`wemake_python_styleguide.options.defaults.I_CONTROL_CODE`\n- ``i-dont-control-code`` - whether you control ones who use your code,\n more rules are enforced when you do control it,\n opposite to ``--i-control-code``, defaults to\n :str:`wemake_python_styleguide.options.defaults.I_CONTROL_CODE`\n- ``nested-classes-whitelist`` - list of nested classes' names we allow to use,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.NESTED_CLASSES_WHITELIST`\n- ``max-noqa-comments`` - maximum number of `noqa` allowed in a module,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_NOQA_COMMENTS`\n- ``allowed-domain-names`` - list of allowed domain names, defaults to\n :str:`wemake_python_styleguide.options.defaults.ALLOWED_DOMAIN_NAMES`\n- ``forbidden-domain-names`` - list of forbidden domain names, defaults to\n :str:`wemake_python_styleguide.options.defaults.FORBIDDEN_DOMAIN_NAMES`\n\n.. rubric:: Complexity options\n\n- ``max-returns`` - maximum allowed number of ``return``\n statements in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_RETURNS`\n- ``max-local-variables`` - maximum allowed number of local\n variables in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_LOCAL_VARIABLES`\n- ``max-expressions`` - maximum allowed number of expressions\n in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_EXPRESSIONS`\n- ``max-arguments`` - maximum allowed number of arguments in one function,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ARGUMENTS`\n- ``max-module-members`` - maximum number of classes and functions\n in a single module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_MODULE_MEMBERS`\n- ``max-methods`` - maximum number of methods in a single class,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_METHODS`\n- ``max-line-complexity`` - maximum line complexity measured in number of\n ``ast`` nodes per line, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_LINE_COMPLEXITY`\n- ``max-jones-score`` - maximum Jones score for a module, which is equal\n to the median of all lines complexity sum, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_JONES_SCORE`\n- ``max-imports`` - maximum number of imports in a single module,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_IMPORTS`\n- ``max-imported-names`` - maximum number of imported names\n in a single module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_IMPORTED_NAMES`\n- ``max-base-classes`` - maximum number of parent classes inside a class\n definition, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_BASE_CLASSES`\n- ``max-decorators`` - maximum number of decorators for single function\n or class definition, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_DECORATORS`\n- ``max-string-usages`` - maximum number of repeated string constants\n in your modules, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_DECORATORS`\n- ``max-awaits`` - maximum allowed number of ``await``\n expressions in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_AWAITS`\n- ``max-try-body-length`` - maximum amount of ``try`` node body length,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_TRY_BODY_LENGTH`\n- ``max-module-expressions`` - maximum number of expression\n usages in a module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_MODULE_EXPRESSIONS`\n- ``max-function-expressions`` - maximum number of expression\n usages in a function or method, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_FUNCTION_EXPRESSIONS`\n- ``max-asserts`` - maximum number of ``assert`` statements in a function,\n default to\n :str:`wemake_python_styleguide.options.defaults.MAX_ASSERTS`\n- ``max-access-level`` - maximum number of access level in an expression,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ACCESS_LEVEL`\n- ``max-attributes`` - maximum number of public instance attributes,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ATTRIBUTES`\n- ``max-cognitive-score`` - maximum amount of cognitive complexity\n per function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_COGNITIVE_SCORE`\n- ``max-cognitive-average`` - maximum amount of cognitive complexity\n per module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_COGNITIVE_AVERAGE`\n :str:`wemake_python_styleguide.options.defaults.NESTED_CLASSES_WHITELIST`\n- ``max-call-level`` - maximum number of call chains, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_CALL_LEVEL`\n- ``max-annotation-complexity`` - maximum number of nested annotations,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ANN_COMPLEXITY`\n- ``max-import-from-members`` - maximum number of names that can be imported\n from module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_IMPORT_FROM_MEMBERS`\n\n\"\"\"\n\nfrom typing import ClassVar, Mapping, Optional, Sequence, Union\n\nimport attr\nfrom flake8.options.manager import OptionManager\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.options import defaults\n\nConfigValuesTypes = Union[str, int, bool, Sequence[str]]\n\n\n@final\[email protected](frozen=True, slots=True)\nclass _Option(object):\n \"\"\"Represents ``flake8`` option object.\"\"\"\n\n long_option_name: str\n default: ConfigValuesTypes\n help: str\n type: Optional[str] = 'int' # noqa: A003\n parse_from_config: bool = True\n action: str = 'store'\n comma_separated_list: bool = False\n dest: Optional[str] = None\n\n def __attrs_post_init__(self):\n \"\"\"Is called after regular init is done.\"\"\"\n object.__setattr__( # noqa: WPS609\n self, 'help', ' '.join(\n (self.help, 'Defaults to: %default'), # noqa: WPS323\n ),\n )\n\n def asdict_no_none(self) -> Mapping[str, ConfigValuesTypes]:\n \"\"\"We need this method to return options, but filter out ``None``.\"\"\"\n return {\n key: opt\n for key, opt in attr.asdict(self).items()\n if opt is not None\n }\n\n\n@final\nclass Configuration(object):\n \"\"\"Simple configuration store with all options.\"\"\"\n\n _options: ClassVar[Sequence[_Option]] = [\n # General:\n\n _Option(\n '--min-name-length',\n defaults.MIN_NAME_LENGTH,\n 'Minimum required length of variable and module names.',\n ),\n\n _Option(\n '--max-name-length',\n defaults.MAX_NAME_LENGTH,\n 'Maximum possible length of the variable and module names.',\n ),\n\n _Option(\n '--i-control-code',\n defaults.I_CONTROL_CODE,\n 'Whether you control ones who use your code.',\n action='store_true',\n type=None,\n dest='i_control_code',\n ),\n\n _Option(\n '--i-dont-control-code',\n defaults.I_CONTROL_CODE,\n 'Whether you control ones who use your code.',\n action='store_false',\n type=None,\n dest='i_control_code',\n parse_from_config=False,\n ),\n\n _Option(\n '--max-noqa-comments',\n defaults.MAX_NOQA_COMMENTS,\n 'Maximum amount of `noqa` comments per module.',\n ),\n\n _Option(\n '--nested-classes-whitelist',\n defaults.NESTED_CLASSES_WHITELIST,\n 'List of nested classes names we allow to use.',\n type='string',\n comma_separated_list=True,\n ),\n _Option(\n '--allowed-domain-names',\n defaults.ALLOWED_DOMAIN_NAMES,\n \"Domain names that are removed from variable names' blacklist.\",\n type='string',\n comma_separated_list=True,\n ),\n _Option(\n '--forbidden-domain-names',\n defaults.FORBIDDEN_DOMAIN_NAMES,\n \"Domain names that extends variable names' blacklist.\",\n type='string',\n comma_separated_list=True,\n ),\n\n # Complexity:\n\n _Option(\n '--max-returns',\n defaults.MAX_RETURNS,\n 'Maximum allowed number of return statements in one function.',\n ),\n\n _Option(\n '--max-local-variables',\n defaults.MAX_LOCAL_VARIABLES,\n 'Maximum allowed number of local variables in one function.',\n ),\n\n _Option(\n '--max-expressions',\n defaults.MAX_EXPRESSIONS,\n 'Maximum allowed number of expressions in one function.',\n ),\n\n _Option(\n '--max-arguments',\n defaults.MAX_ARGUMENTS,\n 'Maximum allowed number of arguments in one function.',\n ),\n\n _Option(\n '--max-module-members',\n defaults.MAX_MODULE_MEMBERS,\n 'Maximum number of classes and functions in a single module.',\n ),\n\n _Option(\n '--max-methods',\n defaults.MAX_METHODS,\n 'Maximum number of methods in a single class.',\n ),\n\n _Option(\n '--max-line-complexity',\n defaults.MAX_LINE_COMPLEXITY,\n 'Maximum line complexity, measured in `ast` nodes.',\n ),\n\n _Option(\n '--max-jones-score',\n defaults.MAX_JONES_SCORE,\n 'Maximum median module complexity, based on sum of lines.',\n ),\n\n _Option(\n '--max-imports',\n defaults.MAX_IMPORTS,\n 'Maximum number of imports in a single module.',\n ),\n\n _Option(\n '--max-imported-names',\n defaults.MAX_IMPORTED_NAMES,\n 'Maximum number of imported names in a single module.',\n ),\n\n _Option(\n '--max-base-classes',\n defaults.MAX_BASE_CLASSES,\n 'Maximum number of base classes.',\n ),\n\n _Option(\n '--max-decorators',\n defaults.MAX_DECORATORS,\n 'Maximum number of decorators.',\n ),\n\n _Option(\n '--max-string-usages',\n defaults.MAX_STRING_USAGES,\n 'Maximum number of string constant usages.',\n ),\n\n _Option(\n '--max-awaits',\n defaults.MAX_AWAITS,\n 'Maximum allowed number of await expressions in one function.',\n ),\n\n _Option(\n '--max-try-body-length',\n defaults.MAX_TRY_BODY_LENGTH,\n 'Maximum amount of try block node body length.',\n ),\n\n _Option(\n '--max-module-expressions',\n defaults.MAX_MODULE_EXPRESSIONS,\n 'Maximum amount of expression usages in a module.',\n ),\n\n _Option(\n '--max-function-expressions',\n defaults.MAX_FUNCTION_EXPRESSIONS,\n 'Maximum amount of expression usages in a function or method.',\n ),\n\n _Option(\n '--max-asserts',\n defaults.MAX_ASSERTS,\n 'Maximum allowed number of assert statements in one function.',\n ),\n\n _Option(\n '--max-access-level',\n defaults.MAX_ACCESS_LEVEL,\n 'Maximum number of access level in an expression.',\n ),\n\n _Option(\n '--max-attributes',\n defaults.MAX_ATTRIBUTES,\n 'Maximum number of public instance attributes.',\n ),\n\n _Option(\n '--max-cognitive-score',\n defaults.MAX_COGNITIVE_SCORE,\n 'Maximum amount of cognitive complexity per function.',\n ),\n\n _Option(\n '--max-cognitive-average',\n defaults.MAX_COGNITIVE_AVERAGE,\n 'Maximum amount of average cognitive complexity per module.',\n ),\n\n _Option(\n '--max-call-level',\n defaults.MAX_CALL_LEVEL,\n 'Maximum number of call chains.',\n ),\n _Option(\n '--max-annotation-complexity',\n defaults.MAX_ANN_COMPLEXITY,\n 'Maximum number of nested annotations.',\n ),\n _Option(\n '--max-import-from-members',\n defaults.MAX_IMPORT_FROM_MEMBERS,\n 'Maximum number of names that can be imported from module.',\n ),\n ]\n\n def register_options(self, parser: OptionManager) -> None:\n \"\"\"Registers options for our plugin.\"\"\"\n for option in self._options:\n parser.add_option(**option.asdict_no_none())\n", "path": "wemake_python_styleguide/options/config.py" } ]
[ { "content": "\"\"\"\nProvides configuration options for ``wemake-python-styleguide``.\n\nWe do not like our linter to be highly configurable.\nSince, people may take the wrong path or make wrong decisions.\nWe try to make all defaults as reasonable as possible.\n\nHowever, you can currently adjust some complexity options. Why?\nBecause we are not quite sure about the ideal values.\n\nAll options are configurable via ``flake8`` CLI.\n\n.. code:: ini\n\n flake8 --max-returns=2 --max-arguments=4\n\nOr you can provide options in ``setup.cfg`` or similar supported files.\n\n.. code:: ini\n\n [flake8]\n max-returns = 2\n max-arguments = 4\n\nWe use ``setup.cfg`` as a default way to provide configuration.\n\nYou can also show all options that ``flake8`` supports by running:\n\n.. code:: bash\n\n flake8 --help\n\n.. rubric:: General options\n\n- ``min-name-length`` - minimum number of chars to define a valid\n variable and module name, defaults to\n :str:`wemake_python_styleguide.options.defaults.MIN_NAME_LENGTH`\n- ``max-name-length`` - maximum number of chars to define a valid\n variable and module name, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_NAME_LENGTH`\n- ``i-control-code`` - whether you control ones who use your code,\n more rules are enforced when you do control it,\n opposite to ``--i-dont-control-code``, defaults to\n :str:`wemake_python_styleguide.options.defaults.I_CONTROL_CODE`\n- ``i-dont-control-code`` - whether you control ones who use your code,\n more rules are enforced when you do control it,\n opposite to ``--i-control-code``, defaults to\n :str:`wemake_python_styleguide.options.defaults.I_CONTROL_CODE`\n- ``nested-classes-whitelist`` - list of nested classes' names we allow to use,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.NESTED_CLASSES_WHITELIST`\n- ``max-noqa-comments`` - maximum number of `noqa` allowed in a module,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_NOQA_COMMENTS`\n- ``allowed-domain-names`` - list of allowed domain names, defaults to\n :str:`wemake_python_styleguide.options.defaults.ALLOWED_DOMAIN_NAMES`\n- ``forbidden-domain-names`` - list of forbidden domain names, defaults to\n :str:`wemake_python_styleguide.options.defaults.FORBIDDEN_DOMAIN_NAMES`\n\n.. rubric:: Complexity options\n\n- ``max-returns`` - maximum allowed number of ``return``\n statements in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_RETURNS`\n- ``max-local-variables`` - maximum allowed number of local\n variables in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_LOCAL_VARIABLES`\n- ``max-expressions`` - maximum allowed number of expressions\n in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_EXPRESSIONS`\n- ``max-arguments`` - maximum allowed number of arguments in one function,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ARGUMENTS`\n- ``max-module-members`` - maximum number of classes and functions\n in a single module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_MODULE_MEMBERS`\n- ``max-methods`` - maximum number of methods in a single class,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_METHODS`\n- ``max-line-complexity`` - maximum line complexity measured in number of\n ``ast`` nodes per line, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_LINE_COMPLEXITY`\n- ``max-jones-score`` - maximum Jones score for a module, which is equal\n to the median of all lines complexity sum, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_JONES_SCORE`\n- ``max-imports`` - maximum number of imports in a single module,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_IMPORTS`\n- ``max-imported-names`` - maximum number of imported names\n in a single module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_IMPORTED_NAMES`\n- ``max-base-classes`` - maximum number of parent classes inside a class\n definition, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_BASE_CLASSES`\n- ``max-decorators`` - maximum number of decorators for single function\n or class definition, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_DECORATORS`\n- ``max-string-usages`` - maximum number of repeated string constants\n in your modules, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_DECORATORS`\n- ``max-awaits`` - maximum allowed number of ``await``\n expressions in one function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_AWAITS`\n- ``max-try-body-length`` - maximum amount of ``try`` node body length,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_TRY_BODY_LENGTH`\n- ``max-module-expressions`` - maximum number of expression\n usages in a module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_MODULE_EXPRESSIONS`\n- ``max-function-expressions`` - maximum number of expression\n usages in a function or method, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_FUNCTION_EXPRESSIONS`\n- ``max-asserts`` - maximum number of ``assert`` statements in a function,\n default to\n :str:`wemake_python_styleguide.options.defaults.MAX_ASSERTS`\n- ``max-access-level`` - maximum number of access level in an expression,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ACCESS_LEVEL`\n- ``max-attributes`` - maximum number of public instance attributes,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ATTRIBUTES`\n- ``max-cognitive-score`` - maximum amount of cognitive complexity\n per function, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_COGNITIVE_SCORE`\n- ``max-cognitive-average`` - maximum amount of cognitive complexity\n per module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_COGNITIVE_AVERAGE`\n :str:`wemake_python_styleguide.options.defaults.NESTED_CLASSES_WHITELIST`\n- ``max-call-level`` - maximum number of call chains, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_CALL_LEVEL`\n- ``max-annotation-complexity`` - maximum number of nested annotations,\n defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_ANN_COMPLEXITY`\n- ``max-import-from-members`` - maximum number of names that can be imported\n from module, defaults to\n :str:`wemake_python_styleguide.options.defaults.MAX_IMPORT_FROM_MEMBERS`\n\n\"\"\"\n\nfrom typing import ClassVar, Mapping, Optional, Sequence, Union\n\nimport attr\nfrom flake8.options.manager import OptionManager\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.options import defaults\n\nConfigValuesTypes = Union[str, int, bool, Sequence[str]]\n\n\n@final\[email protected](frozen=True, slots=True)\nclass _Option(object):\n \"\"\"Represents ``flake8`` option object.\"\"\"\n\n long_option_name: str\n default: ConfigValuesTypes\n help: str # noqa: A003\n type: Optional[str] = 'int' # noqa: A003\n parse_from_config: bool = True\n action: str = 'store'\n comma_separated_list: bool = False\n dest: Optional[str] = None\n\n def __attrs_post_init__(self):\n \"\"\"Is called after regular init is done.\"\"\"\n object.__setattr__( # noqa: WPS609\n self, 'help', ' '.join(\n (self.help, 'Defaults to: %default'), # noqa: WPS323\n ),\n )\n\n def asdict_no_none(self) -> Mapping[str, ConfigValuesTypes]:\n \"\"\"We need this method to return options, but filter out ``None``.\"\"\"\n return {\n key: opt\n for key, opt in attr.asdict(self).items()\n if opt is not None\n }\n\n\n@final\nclass Configuration(object):\n \"\"\"Simple configuration store with all options.\"\"\"\n\n _options: ClassVar[Sequence[_Option]] = [\n # General:\n\n _Option(\n '--min-name-length',\n defaults.MIN_NAME_LENGTH,\n 'Minimum required length of variable and module names.',\n ),\n\n _Option(\n '--max-name-length',\n defaults.MAX_NAME_LENGTH,\n 'Maximum possible length of the variable and module names.',\n ),\n\n _Option(\n '--i-control-code',\n defaults.I_CONTROL_CODE,\n 'Whether you control ones who use your code.',\n action='store_true',\n type=None,\n dest='i_control_code',\n ),\n\n _Option(\n '--i-dont-control-code',\n defaults.I_CONTROL_CODE,\n 'Whether you control ones who use your code.',\n action='store_false',\n type=None,\n dest='i_control_code',\n parse_from_config=False,\n ),\n\n _Option(\n '--max-noqa-comments',\n defaults.MAX_NOQA_COMMENTS,\n 'Maximum amount of `noqa` comments per module.',\n ),\n\n _Option(\n '--nested-classes-whitelist',\n defaults.NESTED_CLASSES_WHITELIST,\n 'List of nested classes names we allow to use.',\n type='string',\n comma_separated_list=True,\n ),\n _Option(\n '--allowed-domain-names',\n defaults.ALLOWED_DOMAIN_NAMES,\n \"Domain names that are removed from variable names' blacklist.\",\n type='string',\n comma_separated_list=True,\n ),\n _Option(\n '--forbidden-domain-names',\n defaults.FORBIDDEN_DOMAIN_NAMES,\n \"Domain names that extends variable names' blacklist.\",\n type='string',\n comma_separated_list=True,\n ),\n\n # Complexity:\n\n _Option(\n '--max-returns',\n defaults.MAX_RETURNS,\n 'Maximum allowed number of return statements in one function.',\n ),\n\n _Option(\n '--max-local-variables',\n defaults.MAX_LOCAL_VARIABLES,\n 'Maximum allowed number of local variables in one function.',\n ),\n\n _Option(\n '--max-expressions',\n defaults.MAX_EXPRESSIONS,\n 'Maximum allowed number of expressions in one function.',\n ),\n\n _Option(\n '--max-arguments',\n defaults.MAX_ARGUMENTS,\n 'Maximum allowed number of arguments in one function.',\n ),\n\n _Option(\n '--max-module-members',\n defaults.MAX_MODULE_MEMBERS,\n 'Maximum number of classes and functions in a single module.',\n ),\n\n _Option(\n '--max-methods',\n defaults.MAX_METHODS,\n 'Maximum number of methods in a single class.',\n ),\n\n _Option(\n '--max-line-complexity',\n defaults.MAX_LINE_COMPLEXITY,\n 'Maximum line complexity, measured in `ast` nodes.',\n ),\n\n _Option(\n '--max-jones-score',\n defaults.MAX_JONES_SCORE,\n 'Maximum median module complexity, based on sum of lines.',\n ),\n\n _Option(\n '--max-imports',\n defaults.MAX_IMPORTS,\n 'Maximum number of imports in a single module.',\n ),\n\n _Option(\n '--max-imported-names',\n defaults.MAX_IMPORTED_NAMES,\n 'Maximum number of imported names in a single module.',\n ),\n\n _Option(\n '--max-base-classes',\n defaults.MAX_BASE_CLASSES,\n 'Maximum number of base classes.',\n ),\n\n _Option(\n '--max-decorators',\n defaults.MAX_DECORATORS,\n 'Maximum number of decorators.',\n ),\n\n _Option(\n '--max-string-usages',\n defaults.MAX_STRING_USAGES,\n 'Maximum number of string constant usages.',\n ),\n\n _Option(\n '--max-awaits',\n defaults.MAX_AWAITS,\n 'Maximum allowed number of await expressions in one function.',\n ),\n\n _Option(\n '--max-try-body-length',\n defaults.MAX_TRY_BODY_LENGTH,\n 'Maximum amount of try block node body length.',\n ),\n\n _Option(\n '--max-module-expressions',\n defaults.MAX_MODULE_EXPRESSIONS,\n 'Maximum amount of expression usages in a module.',\n ),\n\n _Option(\n '--max-function-expressions',\n defaults.MAX_FUNCTION_EXPRESSIONS,\n 'Maximum amount of expression usages in a function or method.',\n ),\n\n _Option(\n '--max-asserts',\n defaults.MAX_ASSERTS,\n 'Maximum allowed number of assert statements in one function.',\n ),\n\n _Option(\n '--max-access-level',\n defaults.MAX_ACCESS_LEVEL,\n 'Maximum number of access level in an expression.',\n ),\n\n _Option(\n '--max-attributes',\n defaults.MAX_ATTRIBUTES,\n 'Maximum number of public instance attributes.',\n ),\n\n _Option(\n '--max-cognitive-score',\n defaults.MAX_COGNITIVE_SCORE,\n 'Maximum amount of cognitive complexity per function.',\n ),\n\n _Option(\n '--max-cognitive-average',\n defaults.MAX_COGNITIVE_AVERAGE,\n 'Maximum amount of average cognitive complexity per module.',\n ),\n\n _Option(\n '--max-call-level',\n defaults.MAX_CALL_LEVEL,\n 'Maximum number of call chains.',\n ),\n _Option(\n '--max-annotation-complexity',\n defaults.MAX_ANN_COMPLEXITY,\n 'Maximum number of nested annotations.',\n ),\n _Option(\n '--max-import-from-members',\n defaults.MAX_IMPORT_FROM_MEMBERS,\n 'Maximum number of names that can be imported from module.',\n ),\n ]\n\n def register_options(self, parser: OptionManager) -> None:\n \"\"\"Registers options for our plugin.\"\"\"\n for option in self._options:\n parser.add_option(**option.asdict_no_none())\n", "path": "wemake_python_styleguide/options/config.py" } ]
diff --git a/.github/workflows/wps.yml b/.github/workflows/wps.yml index a876ea97d..e0850dbdd 100644 --- a/.github/workflows/wps.yml +++ b/.github/workflows/wps.yml @@ -17,3 +17,4 @@ jobs: reporter: 'github-pr-review' env: GITHUB_TOKEN: ${{ secrets.github_token }} + continue-on-error: true # this step is optional diff --git a/poetry.lock b/poetry.lock index cc6355a5e..978540b6e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -350,7 +350,7 @@ description = "Check for python builtins being used as variables or parameters." name = "flake8-builtins" optional = false python-versions = "*" -version = "1.4.2" +version = "1.5.2" [package.dependencies] flake8 = "*" @@ -998,7 +998,7 @@ wcwidth = "*" [[package]] category = "dev" description = "Run a subprocess in a pseudo terminal" -marker = "python_version >= \"3.4\" and sys_platform != \"win32\" or sys_platform != \"win32\"" +marker = "python_version >= \"3.4\" and sys_platform != \"win32\" or sys_platform != \"win32\" or python_version >= \"3.4\" and sys_platform != \"win32\" and (python_version >= \"3.4\" and sys_platform != \"win32\" or sys_platform != \"win32\")" name = "ptyprocess" optional = false python-versions = "*" @@ -1543,7 +1543,7 @@ docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] testing = ["jaraco.itertools", "func-timeout"] [metadata] -content-hash = "e8deb40b077abedec266929ad9972460ab948af5e3b99a8a56e31a0a59436770" +content-hash = "57b3f310e944a44b9489424860624bccd7c8e184d287b5f90c0eccb6ac2ad3f1" python-versions = "^3.6" [metadata.files] @@ -1701,8 +1701,8 @@ flake8-bugbear = [ {file = "flake8_bugbear-19.8.0-py35.py36.py37-none-any.whl", hash = "sha256:ded4d282778969b5ab5530ceba7aa1a9f1b86fa7618fc96a19a1d512331640f8"}, ] flake8-builtins = [ - {file = "flake8-builtins-1.4.2.tar.gz", hash = "sha256:c44415fb19162ef3737056e700d5b99d48c3612a533943b4e16419a5d3de3a64"}, - {file = "flake8_builtins-1.4.2-py2.py3-none-any.whl", hash = "sha256:29bc0f7e68af481d088f5c96f8aeb02520abdfc900500484e3af969f42a38a5f"}, + {file = "flake8-builtins-1.5.2.tar.gz", hash = "sha256:fe7be13fe51bfb06bdae6096c6488e328c822c3aa080e24b91b77116a4fbb8b0"}, + {file = "flake8_builtins-1.5.2-py2.py3-none-any.whl", hash = "sha256:a0296d23da92a6f2494243b9f2039bfdb73f34aba20054c1b70b2a60c84745bb"}, ] flake8-commas = [ {file = "flake8-commas-2.0.0.tar.gz", hash = "sha256:d3005899466f51380387df7151fb59afec666a0f4f4a2c6a8995b975de0f44b7"}, diff --git a/pyproject.toml b/pyproject.toml index 5a5a5f82d..a57829844 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,7 +60,7 @@ astor = "^0.8" pygments = "^2.4" importlib-metadata = {version = "*", python = "<3.8"} -flake8-builtins = "^1.4.2" +flake8-builtins = "^1.5.2" flake8-commas = "^2.0" flake8-quotes = "^2.0.1" flake8-comprehensions = "^3.1.0" diff --git a/wemake_python_styleguide/options/config.py b/wemake_python_styleguide/options/config.py index 1fd2ffa86..415def7b3 100644 --- a/wemake_python_styleguide/options/config.py +++ b/wemake_python_styleguide/options/config.py @@ -155,7 +155,7 @@ class _Option(object): long_option_name: str default: ConfigValuesTypes - help: str + help: str # noqa: A003 type: Optional[str] = 'int' # noqa: A003 parse_from_config: bool = True action: str = 'store'
microsoft__playwright-python-13
[BUG]: page.getAttribute returns None Actual: ```py import asyncio from playwright_web import chromium async def run(): browser = await chromium.launch(headless=False) context = await browser.newContext(viewport=0) # 0 stands for no viewport page = await context.newPage() await page.setContent("""" <input id="kekstar"/> """) await page.fill("#kekstar", "Foobar") print(await page.getAttribute("#kekstar", 'value')) await browser.close() asyncio.get_event_loop().run_until_complete(run()) ``` Expected: Returns Foobar On Try Playwright, it works: https://try.playwright.tech/?s=dzmwi
[ { "content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nfrom playwright_web.connection import Channel, ChannelOwner, ConnectionScope, from_channel, from_nullable_channel\nfrom playwright_web.element_handle import ElementHandle, convertSelectOptionValues, ValuesToSelect\nfrom playwright_web.helper import ConsoleMessageLocation, FilePayload, SelectOption, is_function_body, locals_to_params\nfrom playwright_web.js_handle import JSHandle, parse_result, serialize_argument\nfrom playwright_web.network import Request, Response, Route\nfrom typing import Any, Awaitable, Dict, List, Optional, Union\n\nclass Frame(ChannelOwner):\n\n def __init__(self, scope: ConnectionScope, guid: str, initializer: Dict) -> None:\n super().__init__(scope, guid, initializer)\n self._parent_frame = from_nullable_channel(initializer['parentFrame'])\n if self._parent_frame:\n self._parent_frame._child_frames.append(self)\n self._name = initializer['name']\n self._url = initializer['url']\n self._detached = False\n self._child_frames: List[Frame] = list()\n self._page: Optional['Page']\n\n async def goto(self,\n url: str,\n timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,\n referer: str = None) -> Optional[Response]:\n return from_nullable_channel(await self._channel.send('goto', locals_to_params(locals())))\n\n async def waitForNavigation(self,\n timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,\n url: str = None # TODO: add url, callback\n ) -> Optional[Response]:\n return from_nullable_channel(await self._channel.send('waitForNavigation', locals_to_params(locals())))\n\n async def waitForLoadState(self,\n state: str = 'load',\n timeout: int = None) -> None:\n await self._channel.send('waitForLoadState', locals_to_params(locals()))\n\n async def frameElement(self) -> ElementHandle:\n return from_channel(await self._channel.send('frameElement'))\n\n async def evaluate(self, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n if not is_function_body(expression):\n force_expr = True\n return parse_result(await self._channel.send('evaluateExpression', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def evaluateHandle(self, expression: str, arg: Any = None, force_expr: bool = False) -> JSHandle:\n if not is_function_body(expression):\n force_expr = True\n return from_channel(await self._channel.send('evaluateExpressionHandle', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def querySelector(self, selector: str) -> Optional[ElementHandle]:\n return from_nullable_channel(await self._channel.send('querySelector', dict(selector=selector)))\n\n async def waitForSelector(self,\n selector: str,\n timeout: int = None,\n state: str = None, # Literal['attached', 'detached', 'visible', 'hidden'] = None\n ) -> Optional[ElementHandle]:\n return from_nullable_channel(await self._channel.send('waitForSelector', locals_to_params(locals())))\n\n async def dispatchEvent(self,\n selector: str,\n type: str,\n eventInit: Dict = None,\n timeout: int = None) -> None:\n await self._channel.send('dispatchEvent', dict(selector=selector, type=type, eventInit=eventInit))\n\n async def evalOnSelector(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n return parse_result(await self._channel.send('evalOnSelector', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def evalOnSelectorAll(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n return parse_result(await self._channel.send('evalOnSelectorAll', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def content(self) -> str:\n return await self._channel.send('content')\n\n async def setContent(self,\n html: str, timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None\n ) -> None:\n await self._channel.send('setContent', locals_to_params(locals()))\n\n @property\n def name(self) -> str:\n return self._name or ''\n\n @property\n def url(self) -> str:\n return self._url or ''\n\n @property\n def parentFrame(self) -> Optional['Frame']:\n return self._parent_frame\n\n @property\n def childFrames(self) -> List['Frame']:\n return self._child_frames.copy()\n\n def isDetached(self) -> bool:\n return self._detached\n\n async def addScriptTag(self,\n url: str = None,\n path: str = None,\n content: str = None) -> ElementHandle:\n return from_channel(await self._channel.send('addScriptTag', locals_to_params(locals())))\n\n async def addStyleTag(self,\n url: str = None,\n path: str = None,\n content: str = None) -> ElementHandle:\n return from_channel(await self._channel.send('addStyleTag', locals_to_params(locals())))\n\n async def click(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n delay: int = None,\n button: str = None, # Literal['left', 'right', 'middle'] = None,\n clickCount: int = None,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('click', locals_to_params(locals()))\n\n async def dblclick(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n delay: int = None,\n button: str = None, # Literal['left', 'right', 'middle'] = None,\n timeout: int = None,\n force: bool = None) -> None:\n await self._channel.send('dblclick', locals_to_params(locals()))\n\n async def fill(self,\n selector: str,\n value: str,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('fill', locals_to_params(locals()))\n\n async def focus(self,\n selector: str,\n timeout: int = None) -> None:\n await self._channel.send('focus', locals_to_params(locals()))\n\n async def textContent(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('textContent', locals_to_params(locals()))\n\n async def innerText(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('innerText', locals_to_params(locals()))\n\n async def innerHTML(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('innerHTML', locals_to_params(locals()))\n\n async def getAttribute(self,\n selector: str,\n name: str,\n timeout: int = None) -> str:\n await self._channel.send('getAttribute', locals_to_params(locals()))\n\n async def hover(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n timeout: int = None,\n force: bool = None) -> None:\n await self._channel.send('hover', locals_to_params(locals()))\n\n async def selectOption(self,\n selector: str,\n values: ValuesToSelect,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('selectOption', dict(selector=selector, values=convertSelectOptionValues(values), timeout=timeout, noWaitAfter=noWaitAfter))\n\n async def setInputFiles(self,\n selector: str,\n files: Union[str, FilePayload, List[str], List[FilePayload]],\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('setInputFiles', locals_to_params(locals()))\n\n async def type(self,\n selector: str,\n text: str,\n delay: int = None,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('type', locals_to_params(locals()))\n\n async def press(self,\n selector: str,\n key: str,\n delay: int = None,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('press', locals_to_params(locals()))\n\n async def check(self,\n selector: str,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('check', locals_to_params(locals()))\n\n async def uncheck(self,\n selector: str,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('uncheck', locals_to_params(locals()))\n\n async def waitForTimeout(self, timeout: int) -> Awaitable[None]:\n return self._scope._loop.create_task(asyncio.sleep(timeout / 1000))\n\n async def waitForFunction(self,\n expression: str,\n arg: Any = None,\n force_expr: bool = False,\n timeout: int = None,\n polling: Union[int, str] = None # Union[int, Literal[\"raf\"]]\n ) -> JSHandle:\n if not is_function_body(expression):\n force_expr = True\n params = locals_to_params(locals())\n params['isFunction'] = not(force_expr)\n params['arg'] = serialize_argument(arg)\n return from_channel(await self._channel.send('waitForFunction', params))\n\n async def title(self) -> str:\n return await self._channel.send('title')\n", "path": "playwright_web/frame.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nfrom playwright_web.connection import Channel, ChannelOwner, ConnectionScope, from_channel, from_nullable_channel\nfrom playwright_web.element_handle import ElementHandle, convertSelectOptionValues, ValuesToSelect\nfrom playwright_web.helper import ConsoleMessageLocation, FilePayload, SelectOption, is_function_body, locals_to_params\nfrom playwright_web.js_handle import JSHandle, parse_result, serialize_argument\nfrom playwright_web.network import Request, Response, Route\nfrom typing import Any, Awaitable, Dict, List, Optional, Union\n\nclass Frame(ChannelOwner):\n\n def __init__(self, scope: ConnectionScope, guid: str, initializer: Dict) -> None:\n super().__init__(scope, guid, initializer)\n self._parent_frame = from_nullable_channel(initializer['parentFrame'])\n if self._parent_frame:\n self._parent_frame._child_frames.append(self)\n self._name = initializer['name']\n self._url = initializer['url']\n self._detached = False\n self._child_frames: List[Frame] = list()\n self._page: Optional['Page']\n\n async def goto(self,\n url: str,\n timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,\n referer: str = None) -> Optional[Response]:\n return from_nullable_channel(await self._channel.send('goto', locals_to_params(locals())))\n\n async def waitForNavigation(self,\n timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,\n url: str = None # TODO: add url, callback\n ) -> Optional[Response]:\n return from_nullable_channel(await self._channel.send('waitForNavigation', locals_to_params(locals())))\n\n async def waitForLoadState(self,\n state: str = 'load',\n timeout: int = None) -> None:\n await self._channel.send('waitForLoadState', locals_to_params(locals()))\n\n async def frameElement(self) -> ElementHandle:\n return from_channel(await self._channel.send('frameElement'))\n\n async def evaluate(self, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n if not is_function_body(expression):\n force_expr = True\n return parse_result(await self._channel.send('evaluateExpression', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def evaluateHandle(self, expression: str, arg: Any = None, force_expr: bool = False) -> JSHandle:\n if not is_function_body(expression):\n force_expr = True\n return from_channel(await self._channel.send('evaluateExpressionHandle', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def querySelector(self, selector: str) -> Optional[ElementHandle]:\n return from_nullable_channel(await self._channel.send('querySelector', dict(selector=selector)))\n\n async def waitForSelector(self,\n selector: str,\n timeout: int = None,\n state: str = None, # Literal['attached', 'detached', 'visible', 'hidden'] = None\n ) -> Optional[ElementHandle]:\n return from_nullable_channel(await self._channel.send('waitForSelector', locals_to_params(locals())))\n\n async def dispatchEvent(self,\n selector: str,\n type: str,\n eventInit: Dict = None,\n timeout: int = None) -> None:\n await self._channel.send('dispatchEvent', dict(selector=selector, type=type, eventInit=eventInit))\n\n async def evalOnSelector(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n return parse_result(await self._channel.send('evalOnSelector', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def evalOnSelectorAll(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n return parse_result(await self._channel.send('evalOnSelectorAll', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def content(self) -> str:\n return await self._channel.send('content')\n\n async def setContent(self,\n html: str, timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None\n ) -> None:\n await self._channel.send('setContent', locals_to_params(locals()))\n\n @property\n def name(self) -> str:\n return self._name or ''\n\n @property\n def url(self) -> str:\n return self._url or ''\n\n @property\n def parentFrame(self) -> Optional['Frame']:\n return self._parent_frame\n\n @property\n def childFrames(self) -> List['Frame']:\n return self._child_frames.copy()\n\n def isDetached(self) -> bool:\n return self._detached\n\n async def addScriptTag(self,\n url: str = None,\n path: str = None,\n content: str = None) -> ElementHandle:\n return from_channel(await self._channel.send('addScriptTag', locals_to_params(locals())))\n\n async def addStyleTag(self,\n url: str = None,\n path: str = None,\n content: str = None) -> ElementHandle:\n return from_channel(await self._channel.send('addStyleTag', locals_to_params(locals())))\n\n async def click(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n delay: int = None,\n button: str = None, # Literal['left', 'right', 'middle'] = None,\n clickCount: int = None,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('click', locals_to_params(locals()))\n\n async def dblclick(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n delay: int = None,\n button: str = None, # Literal['left', 'right', 'middle'] = None,\n timeout: int = None,\n force: bool = None) -> None:\n await self._channel.send('dblclick', locals_to_params(locals()))\n\n async def fill(self,\n selector: str,\n value: str,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('fill', locals_to_params(locals()))\n\n async def focus(self,\n selector: str,\n timeout: int = None) -> None:\n await self._channel.send('focus', locals_to_params(locals()))\n\n async def textContent(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('textContent', locals_to_params(locals()))\n\n async def innerText(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('innerText', locals_to_params(locals()))\n\n async def innerHTML(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('innerHTML', locals_to_params(locals()))\n\n async def getAttribute(self,\n selector: str,\n name: str,\n timeout: int = None) -> str:\n return await self._channel.send('getAttribute', locals_to_params(locals()))\n\n async def hover(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n timeout: int = None,\n force: bool = None) -> None:\n await self._channel.send('hover', locals_to_params(locals()))\n\n async def selectOption(self,\n selector: str,\n values: ValuesToSelect,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('selectOption', dict(selector=selector, values=convertSelectOptionValues(values), timeout=timeout, noWaitAfter=noWaitAfter))\n\n async def setInputFiles(self,\n selector: str,\n files: Union[str, FilePayload, List[str], List[FilePayload]],\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('setInputFiles', locals_to_params(locals()))\n\n async def type(self,\n selector: str,\n text: str,\n delay: int = None,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('type', locals_to_params(locals()))\n\n async def press(self,\n selector: str,\n key: str,\n delay: int = None,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('press', locals_to_params(locals()))\n\n async def check(self,\n selector: str,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('check', locals_to_params(locals()))\n\n async def uncheck(self,\n selector: str,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('uncheck', locals_to_params(locals()))\n\n async def waitForTimeout(self, timeout: int) -> Awaitable[None]:\n return self._scope._loop.create_task(asyncio.sleep(timeout / 1000))\n\n async def waitForFunction(self,\n expression: str,\n arg: Any = None,\n force_expr: bool = False,\n timeout: int = None,\n polling: Union[int, str] = None # Union[int, Literal[\"raf\"]]\n ) -> JSHandle:\n if not is_function_body(expression):\n force_expr = True\n params = locals_to_params(locals())\n params['isFunction'] = not(force_expr)\n params['arg'] = serialize_argument(arg)\n return from_channel(await self._channel.send('waitForFunction', params))\n\n async def title(self) -> str:\n return await self._channel.send('title')\n", "path": "playwright_web/frame.py" } ]
diff --git a/playwright_web/frame.py b/playwright_web/frame.py index bd0a75df7..0f7ec60e2 100644 --- a/playwright_web/frame.py +++ b/playwright_web/frame.py @@ -181,7 +181,7 @@ async def getAttribute(self, selector: str, name: str, timeout: int = None) -> str: - await self._channel.send('getAttribute', locals_to_params(locals())) + return await self._channel.send('getAttribute', locals_to_params(locals())) async def hover(self, selector: str,
ansible__molecule-649
ansible-lint called from Molecule fails when no Ansible-installed-with-pip is present # Issue Type - Bug report # Molecule and Ansible details ``` # ansible --version ansible 2.2.1.0 (stable-2.2 acad2ba246) last updated 2016/12/11 20:27:02 (GMT +900) lib/ansible/modules/core: (detached HEAD 8139278530) last updated 2016/12/11 20:30:10 (GMT +900) lib/ansible/modules/extras: (detached HEAD f5f1fc934a) last updated 2016/12/11 20:30:10 (GMT +900) config file = configured module search path = Default w/o overrides # molecule --version molecule, version 1.16.1 ``` - Molecule installation method: pip - Ansible installation method: source # Desired Behaviour ``` # molecule verify --> Executing ansible-lint... [ANSIBLE0002] Trailing whitespace playbook.yml:7 - ansible-unix-python-environment ``` # Actual Behaviour (Bug report only) ``` # pip uninstall ansible ((( cut ))) Successfully uninstalled ansible-2.2.0.0 # . /usr/local/src/ansible/hacking/env-setup ((( cut ))) PYTHONPATH=/usr/local/src/ansible/lib: ((( cut ))) # ansible --version ansible 2.2.1.0 (stable-2.2 acad2ba246) last updated 2016/12/11 20:27:02 (GMT +900) lib/ansible/modules/core: (detached HEAD 8139278530) last updated 2016/12/11 20:30:10 (GMT +900) lib/ansible/modules/extras: (detached HEAD f5f1fc934a) last updated 2016/12/11 20:30:10 (GMT +900) config file = configured module search path = Default w/o overrides # molecule --debug verify --> Executing ansible-lint... DEBUG: COMMAND /usr/local/bin/ansible-lint playbook.yml --exclude .git --exclude .vagrant --exclude .molecule Traceback (most recent call last): File "/usr/local/bin/ansible-lint", line 30, in <module> import ansiblelint File "/usr/local/lib/python2.7/site-packages/ansiblelint/__init__.py", line 26, in <module> import ansiblelint.utils File "/usr/local/lib/python2.7/site-packages/ansiblelint/utils.py", line 25, in <module> import ansible.constants as C ImportError: No module named ansible.constants # /usr/local/bin/ansible-lint playbook.yml --exclude .git --exclude .vagrant --exclude .molecule [ANSIBLE0002] Trailing whitespace playbook.yml:7 - ansible-unix-python-environment ``` # Further tests With Ansible 2.2.0 installed with `pip` (regardless if the one from source configured or not; configured in the example below): ``` # pip install ansible ((( cut ))) Successfully installed ansible-2.2.0.0 # . /usr/local/src/ansible/hacking/env-setup ((( cut ))) # ansible --version ansible 2.2.1.0 (stable-2.2 acad2ba246) last updated 2016/12/11 20:27:02 (GMT +900) lib/ansible/modules/core: (detached HEAD 8139278530) last updated 2016/12/11 20:30:10 (GMT +900) lib/ansible/modules/extras: (detached HEAD f5f1fc934a) last updated 2016/12/11 20:30:10 (GMT +900) config file = configured module search path = Default w/o overrides # molecule verify --> Executing ansible-lint... [ANSIBLE0002] Trailing whitespace playbook.yml:7 - ansible-unix-python-environment ```
[ { "content": "# Copyright (c) 2015-2016 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\n\nimport sh\n\nfrom molecule import util\nfrom molecule.verifier import base\n\n\nclass AnsibleLint(base.Base):\n \"\"\"\n This is likely to be the source of issues. The class was implemented to\n bring standardization to roles managed by molecule. How we further refine\n this class, and its usage is up for discussion.\n \"\"\"\n\n def __init__(self, molecule):\n super(AnsibleLint, self).__init__(molecule)\n self._playbook = molecule.config.config['ansible']['playbook']\n self._ignore_paths = molecule.config.config['molecule']['ignore_paths']\n self._debug = molecule.args.get('debug')\n\n def execute(self):\n \"\"\"\n Executes ansible-lint against the configured playbook and returns\n None.\n\n :return: None\n \"\"\"\n env = {\n 'ANSIBLE_CONFIG':\n self._molecule.config.config['ansible']['config_file'],\n 'HOME': os.environ.get('HOME')\n }\n\n if 'ansible_lint' not in self._molecule.disabled:\n msg = 'Executing ansible-lint...'\n util.print_info(msg)\n args = [self._playbook]\n [args.extend([\"--exclude\", path]) for path in self._ignore_paths]\n cmd = sh.ansible_lint.bake(\n *args,\n _env=env,\n _out=util.callback_info,\n _err=util.callback_error)\n util.run_command(cmd, debug=self._debug)\n", "path": "molecule/verifier/ansible_lint.py" } ]
[ { "content": "# Copyright (c) 2015-2016 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\n\nimport sh\n\nfrom molecule import util\nfrom molecule.verifier import base\n\n\nclass AnsibleLint(base.Base):\n \"\"\"\n This is likely to be the source of issues. The class was implemented to\n bring standardization to roles managed by molecule. How we further refine\n this class, and its usage is up for discussion.\n \"\"\"\n\n def __init__(self, molecule):\n super(AnsibleLint, self).__init__(molecule)\n self._playbook = molecule.config.config['ansible']['playbook']\n self._ignore_paths = molecule.config.config['molecule']['ignore_paths']\n self._debug = molecule.args.get('debug')\n\n def execute(self):\n \"\"\"\n Executes ansible-lint against the configured playbook and returns\n None.\n\n :return: None\n \"\"\"\n env = {\n 'ANSIBLE_CONFIG':\n self._molecule.config.config['ansible']['config_file'],\n 'PYTHONPATH': os.environ.get('PYTHONPATH'),\n 'HOME': os.environ.get('HOME')\n }\n\n if 'ansible_lint' not in self._molecule.disabled:\n msg = 'Executing ansible-lint...'\n util.print_info(msg)\n args = [self._playbook]\n [args.extend([\"--exclude\", path]) for path in self._ignore_paths]\n cmd = sh.ansible_lint.bake(\n *args,\n _env=env,\n _out=util.callback_info,\n _err=util.callback_error)\n util.run_command(cmd, debug=self._debug)\n", "path": "molecule/verifier/ansible_lint.py" } ]
diff --git a/molecule/verifier/ansible_lint.py b/molecule/verifier/ansible_lint.py index fc117028c6..078a03e101 100644 --- a/molecule/verifier/ansible_lint.py +++ b/molecule/verifier/ansible_lint.py @@ -49,6 +49,7 @@ def execute(self): env = { 'ANSIBLE_CONFIG': self._molecule.config.config['ansible']['config_file'], + 'PYTHONPATH': os.environ.get('PYTHONPATH'), 'HOME': os.environ.get('HOME') }
freedomofpress__securedrop-2998
securedrop-admin sdconfig erases additional values in site-specific # Bug ## Description securedrop-admin sdconfig erases values in site-specific when they are not prompted for. `securedrop-admin sdconfig` should not erase entries in site-specific, which would help testing (e.g.: releases that are in development or perhaps alpha/beta features). ## Steps to Reproduce * edit `/install_files/ansible-base/group-vars/all/site-specific` and add another value * run ./securedrop-admin sdconfig * open `/install_files/ansible-base/group-vars/all/site-specific` and observe your value has disappeared ## Expected Behavior `securedrop-admin sdconfig` should not erase entries in `/install_files/ansible-base/group-vars/all/site-specific`. ## Actual Behavior `ecuredrop-admin sdconfig` erases entries in `/install_files/ansible-base/group-vars/all/site-specific`.
[ { "content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nSecureDrop Admin Toolkit.\n\nFor use by administrators to install, maintain, and manage their SD\ninstances.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport re\nimport string\nimport subprocess\nimport sys\nimport types\nimport prompt_toolkit\nfrom prompt_toolkit.validation import Validator, ValidationError\nimport yaml\n\nsdlog = logging.getLogger(__name__)\n\n\nclass FingerprintException(Exception):\n pass\n\n\nclass SiteConfig(object):\n\n class ValidateNotEmpty(Validator):\n def validate(self, document):\n if document.text != '':\n return True\n raise ValidationError(\n message=\"Must not be an empty string\")\n\n class ValidateUser(Validator):\n def validate(self, document):\n text = document.text\n if text != '' and text != 'root' and text != 'amnesia':\n return True\n raise ValidationError(\n message=\"Must not be root, amnesia or an empty string\")\n\n class ValidateIP(Validator):\n def validate(self, document):\n if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.|$)){4}$',\n document.text):\n return True\n raise ValidationError(\n message=\"An IP address must be something like 10.240.20.83\")\n\n class ValidateDNS(Validator):\n def validate(self):\n raise Exception() # pragma: no cover\n\n def is_tails(self):\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n return id == 'Tails'\n\n def lookup_fqdn(self, fqdn, dns=None):\n cmd = 'host -W=10 -T -4 ' + fqdn\n if self.is_tails():\n cmd = 'torify ' + cmd\n cmd += ' ' + (dns and dns or '8.8.8.8')\n try:\n result = subprocess.check_output(cmd, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n result = e.output\n sdlog.debug(cmd + ' => ' + result)\n return 'has address' in result\n\n class ValidateDNSServer(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn('gnu.org', document.text):\n return True\n raise ValidationError(\n message='Unable to resolve gnu.org using this DNS')\n\n class ValidateFQDN(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn(document.text):\n return True\n raise ValidationError(\n message='Unable to resolve ' + document.text)\n\n class ValidatePath(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidatePath, self).__init__()\n\n def validate(self, document):\n if document.text == '':\n raise ValidationError(\n message='an existing file name is required')\n path = os.path.join(self.basedir, document.text)\n if os.path.exists(path):\n return True\n raise ValidationError(\n message=path + ' file does not exist')\n\n class ValidateYesNo(Validator):\n def validate(self, document):\n text = document.text.lower()\n if text == 'yes' or text == 'no':\n return True\n raise ValidationError(message=\"Must be either yes or no\")\n\n class ValidateFingerprint(Validator):\n def validate(self, document):\n text = document.text.replace(' ', '')\n if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':\n raise ValidationError(\n message='This is the TEST journalist fingerprint')\n if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':\n raise ValidationError(\n message='This is the TEST admin fingerprint')\n if not re.match('[a-fA-F0-9]{40}$', text):\n raise ValidationError(\n message='fingerprints must be 40 hexadecimal characters')\n return True\n\n class ValidateInt(Validator):\n def validate(self, document):\n if re.match('\\d+$', document.text):\n return True\n raise ValidationError(message=\"Must be an integer\")\n\n class Locales(object):\n def __init__(self, appdir):\n self.translation_dir = os.path.realpath(\n os.path.join(appdir, 'translations'))\n\n def get_translations(self):\n translations = set(['en', 'en_US'])\n for dirname in os.listdir(self.translation_dir):\n if dirname != 'messages.pot':\n translations.add(dirname)\n return translations\n\n class ValidateLocales(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidateLocales, self).__init__()\n\n def validate(self, document):\n desired = document.text.split()\n existing = SiteConfig.Locales(self.basedir).get_translations()\n missing = set(desired) - set(existing)\n if not missing:\n return True\n raise ValidationError(\n message=\"The following locales do not exist \" + \" \".join(\n missing))\n\n class ValidateOSSECUsername(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' not in text and 'test' != text:\n return True\n raise ValidationError(\n message=\"The SASL username should not include the domain name\")\n\n class ValidateOSSECPassword(Validator):\n def validate(self, document):\n text = document.text\n if len(text) >= 8 and 'password123' != text:\n return True\n raise ValidationError(\n message=\"Password for OSSEC email account must be strong\")\n\n class ValidateOSSECEmail(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' in text and '[email protected]' != text:\n return True\n raise ValidationError(\n message=(\"Must contain a @ and be set to \"\n \"something other than [email protected]\"))\n\n def __init__(self, args):\n self.args = args\n translations = SiteConfig.Locales(\n self.args.app_path).get_translations()\n translations = \" \".join(translations)\n self.desc = [\n ['ssh_users', 'sd', str,\n u'Username for SSH access to the servers',\n SiteConfig.ValidateUser(),\n None],\n ['app_ip', '10.20.2.2', str,\n u'Local IPv4 address for the Application Server',\n SiteConfig.ValidateIP(),\n None],\n ['monitor_ip', '10.20.3.2', str,\n u'Local IPv4 address for the Monitor Server',\n SiteConfig.ValidateIP(),\n None],\n ['app_hostname', 'app', str,\n u'Hostname for Application Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['monitor_hostname', 'mon', str,\n u'Hostname for Monitor Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['dns_server', '8.8.8.8', str,\n u'DNS server specified during installation',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['securedrop_app_https_on_source_interface', False, bool,\n u'Whether HTTPS should be enabled on '\n 'Source Interface (requires EV cert)',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,\n u'Local filepath to public key for '\n 'SecureDrop Application GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['securedrop_app_gpg_fingerprint', '', str,\n u'Full fingerprint for the SecureDrop Application GPG Key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_gpg_public_key', 'ossec.pub', str,\n u'Local filepath to OSSEC alerts GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['ossec_gpg_fpr', '', str,\n u'Full fingerprint for the OSSEC alerts GPG public key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_email', '', str,\n u'Admin email address for receiving OSSEC alerts',\n SiteConfig.ValidateOSSECEmail(),\n None],\n ['smtp_relay', \"smtp.gmail.com\", str,\n u'SMTP relay for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['smtp_relay_port', 587, int,\n u'SMTP port for sending OSSEC alerts',\n SiteConfig.ValidateInt(),\n int],\n ['sasl_domain', \"gmail.com\", str,\n u'SASL domain for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['sasl_username', '', str,\n u'SASL username for sending OSSEC alerts',\n SiteConfig.ValidateOSSECUsername(),\n None],\n ['sasl_password', '', str,\n u'SASL password for sending OSSEC alerts',\n SiteConfig.ValidateOSSECPassword(),\n None],\n ['securedrop_supported_locales', [], types.ListType,\n u'Space separated list of additional locales to support '\n '(' + translations + ')',\n SiteConfig.ValidateLocales(self.args.app_path),\n string.split],\n ]\n\n def load_and_update_config(self):\n if self.exists():\n self.config = self.load()\n else:\n self.config = None\n return self.update_config()\n\n def update_config(self):\n self.config = self.user_prompt_config()\n self.save()\n self.validate_gpg_keys()\n return True\n\n def user_prompt_config(self):\n config = {}\n self_config = self.config or {}\n for desc in self.desc:\n (var, default, type, prompt, validator, transform) = desc\n config[var] = self.user_prompt_config_one(desc,\n self_config.get(var))\n return config\n\n def user_prompt_config_one(self, desc, from_config):\n (var, default, type, prompt, validator, transform) = desc\n if from_config is not None:\n default = from_config\n prompt += ': '\n return self.validated_input(prompt, default, validator, transform)\n\n def validated_input(self, prompt, default, validator, transform):\n if type(default) is bool:\n default = default and 'yes' or 'no'\n if type(default) is int:\n default = str(default)\n if isinstance(default, types.ListType):\n default = \" \".join(default)\n if type(default) is not str:\n default = str(default)\n kwargs = {}\n if validator:\n kwargs['validator'] = validator\n value = prompt_toolkit.prompt(prompt,\n default=unicode(default, 'utf-8'),\n **kwargs)\n if transform:\n return transform(value)\n else:\n return value\n\n def sanitize_fingerprint(self, value):\n return value.upper().replace(' ', '')\n\n def validate_gpg_keys(self):\n keys = (('securedrop_app_gpg_public_key',\n 'securedrop_app_gpg_fingerprint'),\n\n ('ossec_alert_gpg_public_key',\n 'ossec_gpg_fpr'))\n for (public_key, fingerprint) in keys:\n validate = os.path.join(\n os.path.dirname(__file__), '..', 'bin',\n 'validate-gpg-key.sh')\n public_key = os.path.join(self.args.ansible_path,\n self.config[public_key])\n fingerprint = self.config[fingerprint]\n try:\n sdlog.debug(subprocess.check_output(\n [validate, public_key, fingerprint],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n raise FingerprintException(\n \"fingerprint {} \".format(fingerprint) +\n \"does not match \" +\n \"the public key {}\".format(public_key))\n return True\n\n def exists(self):\n return os.path.exists(self.args.site_config)\n\n def save(self):\n with open(self.args.site_config, 'w') as site_config_file:\n yaml.safe_dump(self.config,\n site_config_file,\n default_flow_style=False)\n\n def load(self):\n try:\n with open(self.args.site_config) as site_config_file:\n return yaml.safe_load(site_config_file)\n except IOError:\n sdlog.error(\"Config file missing, re-run with sdconfig\")\n raise\n except yaml.YAMLError:\n sdlog.error(\"There was an issue processing {}\".format(\n self.args.site_config))\n raise\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef sdconfig(args):\n \"\"\"Configure SD site settings\"\"\"\n SiteConfig(args).load_and_update_config()\n\n\ndef install_securedrop(args):\n \"\"\"Install/Update SecureDrop\"\"\"\n SiteConfig(args).load()\n\n sdlog.info(\"Now installing SecureDrop on remote servers.\")\n sdlog.info(\"You will be prompted for the sudo password on the \"\n \"servers.\")\n sdlog.info(\"The sudo password is only necessary during initial \"\n \"installation.\")\n subprocess.check_call([os.path.join(args.ansible_path,\n 'securedrop-prod.yml'),\n '--ask-become-pass'], cwd=args.ansible_path)\n\n\ndef backup_securedrop(args):\n \"\"\"Perform backup of the SecureDrop Application Server.\n Creates a tarball of submissions and server config, and fetches\n back to the Admin Workstation. Future `restore` actions can be performed\n with the backup tarball.\"\"\"\n sdlog.info(\"Backing up the SecureDrop Application Server\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-backup.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef restore_securedrop(args):\n \"\"\"Perform restore of the SecureDrop Application Server.\n Requires a tarball of submissions and server config, created via\n the `backup` action.\"\"\"\n sdlog.info(\"Restoring the SecureDrop Application Server from backup\")\n # Canonicalize filepath to backup tarball, so Ansible sees only the\n # basename. The files must live in args.ansible_path,\n # but the securedrop-admin\n # script will be invoked from the repo root, so preceding dirs are likely.\n restore_file_basename = os.path.basename(args.restore_file)\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-restore.yml'),\n '-e',\n \"restore_file='{}'\".format(restore_file_basename),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef run_tails_config(args):\n \"\"\"Configure Tails environment post SD install\"\"\"\n sdlog.info(\"Configuring Tails workstation environment\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n ansible_cmd = [\n os.path.join(args.ansible_path, 'securedrop-tails.yml'),\n \"--ask-become-pass\",\n # Passing an empty inventory file to override the automatic dynamic\n # inventory script, which fails if no site vars are configured.\n '-i', '/dev/null',\n ]\n subprocess.check_call(ansible_cmd,\n cwd=args.ansible_path)\n\n\ndef get_logs(args):\n \"\"\"Get logs for forensics and debugging purposes\"\"\"\n sdlog.info(\"Gathering logs for forensics and debugging\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-logs.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n sdlog.info(\"Encrypt logs and send to [email protected] or upload \"\n \"to the SecureDrop support portal.\")\n\n\ndef set_default_paths(args):\n if not args.ansible_path:\n args.ansible_path = args.root + \"/install_files/ansible-base\"\n args.ansible_path = os.path.realpath(args.ansible_path)\n if not args.site_config:\n args.site_config = args.ansible_path + \"/group_vars/all/site-specific\"\n args.site_config = os.path.realpath(args.site_config)\n if not args.app_path:\n args.app_path = args.root + \"/securedrop\"\n args.app_path = os.path.realpath(args.app_path)\n return args\n\n\ndef parse_argv(argv):\n class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n \"\"\"Needed to combine formatting classes for help output\"\"\"\n pass\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=ArgParseFormatterCombo)\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.add_argument('-d', action='store_true', default=False,\n help=\"Developer mode. Not to be used in production.\")\n parser.add_argument('--root', required=True,\n help=\"path to the root of the SecureDrop repository\")\n parser.add_argument('--site-config',\n help=\"path to the YAML site configuration file\")\n parser.add_argument('--ansible-path',\n help=\"path to the Ansible root\")\n parser.add_argument('--app-path',\n help=\"path to the SecureDrop application root\")\n subparsers = parser.add_subparsers()\n\n parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)\n parse_sdconfig.set_defaults(func=sdconfig)\n\n parse_install = subparsers.add_parser('install',\n help=install_securedrop.__doc__)\n parse_install.set_defaults(func=install_securedrop)\n\n parse_tailsconfig = subparsers.add_parser('tailsconfig',\n help=run_tails_config.__doc__)\n parse_tailsconfig.set_defaults(func=run_tails_config)\n\n parse_backup = subparsers.add_parser('backup',\n help=backup_securedrop.__doc__)\n parse_backup.set_defaults(func=backup_securedrop)\n\n parse_restore = subparsers.add_parser('restore',\n help=restore_securedrop.__doc__)\n parse_restore.set_defaults(func=restore_securedrop)\n parse_restore.add_argument(\"restore_file\")\n\n parse_logs = subparsers.add_parser('logs',\n help=get_logs.__doc__)\n parse_logs.set_defaults(func=get_logs)\n\n return set_default_paths(parser.parse_args(argv))\n\n\ndef main(argv):\n args = parse_argv(argv)\n setup_logger(args.v)\n if args.v:\n args.func(args)\n else:\n try:\n args.func(args)\n except KeyboardInterrupt:\n sys.exit(0)\n except Exception as e:\n raise SystemExit(\n 'ERROR (run with -v for more): {msg}'.format(msg=e))\n else:\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "admin/securedrop_admin/__init__.py" } ]
[ { "content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nSecureDrop Admin Toolkit.\n\nFor use by administrators to install, maintain, and manage their SD\ninstances.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport re\nimport string\nimport subprocess\nimport sys\nimport types\nimport prompt_toolkit\nfrom prompt_toolkit.validation import Validator, ValidationError\nimport yaml\n\nsdlog = logging.getLogger(__name__)\n\n\nclass FingerprintException(Exception):\n pass\n\n\nclass SiteConfig(object):\n\n class ValidateNotEmpty(Validator):\n def validate(self, document):\n if document.text != '':\n return True\n raise ValidationError(\n message=\"Must not be an empty string\")\n\n class ValidateUser(Validator):\n def validate(self, document):\n text = document.text\n if text != '' and text != 'root' and text != 'amnesia':\n return True\n raise ValidationError(\n message=\"Must not be root, amnesia or an empty string\")\n\n class ValidateIP(Validator):\n def validate(self, document):\n if re.match('((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\.|$)){4}$',\n document.text):\n return True\n raise ValidationError(\n message=\"An IP address must be something like 10.240.20.83\")\n\n class ValidateDNS(Validator):\n def validate(self):\n raise Exception() # pragma: no cover\n\n def is_tails(self):\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n return id == 'Tails'\n\n def lookup_fqdn(self, fqdn, dns=None):\n cmd = 'host -W=10 -T -4 ' + fqdn\n if self.is_tails():\n cmd = 'torify ' + cmd\n cmd += ' ' + (dns and dns or '8.8.8.8')\n try:\n result = subprocess.check_output(cmd, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n result = e.output\n sdlog.debug(cmd + ' => ' + result)\n return 'has address' in result\n\n class ValidateDNSServer(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn('gnu.org', document.text):\n return True\n raise ValidationError(\n message='Unable to resolve gnu.org using this DNS')\n\n class ValidateFQDN(ValidateDNS):\n def validate(self, document):\n if self.lookup_fqdn(document.text):\n return True\n raise ValidationError(\n message='Unable to resolve ' + document.text)\n\n class ValidatePath(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidatePath, self).__init__()\n\n def validate(self, document):\n if document.text == '':\n raise ValidationError(\n message='an existing file name is required')\n path = os.path.join(self.basedir, document.text)\n if os.path.exists(path):\n return True\n raise ValidationError(\n message=path + ' file does not exist')\n\n class ValidateYesNo(Validator):\n def validate(self, document):\n text = document.text.lower()\n if text == 'yes' or text == 'no':\n return True\n raise ValidationError(message=\"Must be either yes or no\")\n\n class ValidateFingerprint(Validator):\n def validate(self, document):\n text = document.text.replace(' ', '')\n if text == '65A1B5FF195B56353CC63DFFCC40EF1228271441':\n raise ValidationError(\n message='This is the TEST journalist fingerprint')\n if text == '600BC6D5142C68F35DDBCEA87B597104EDDDC102':\n raise ValidationError(\n message='This is the TEST admin fingerprint')\n if not re.match('[a-fA-F0-9]{40}$', text):\n raise ValidationError(\n message='fingerprints must be 40 hexadecimal characters')\n return True\n\n class ValidateInt(Validator):\n def validate(self, document):\n if re.match('\\d+$', document.text):\n return True\n raise ValidationError(message=\"Must be an integer\")\n\n class Locales(object):\n def __init__(self, appdir):\n self.translation_dir = os.path.realpath(\n os.path.join(appdir, 'translations'))\n\n def get_translations(self):\n translations = set(['en', 'en_US'])\n for dirname in os.listdir(self.translation_dir):\n if dirname != 'messages.pot':\n translations.add(dirname)\n return translations\n\n class ValidateLocales(Validator):\n def __init__(self, basedir):\n self.basedir = basedir\n super(SiteConfig.ValidateLocales, self).__init__()\n\n def validate(self, document):\n desired = document.text.split()\n existing = SiteConfig.Locales(self.basedir).get_translations()\n missing = set(desired) - set(existing)\n if not missing:\n return True\n raise ValidationError(\n message=\"The following locales do not exist \" + \" \".join(\n missing))\n\n class ValidateOSSECUsername(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' not in text and 'test' != text:\n return True\n raise ValidationError(\n message=\"The SASL username should not include the domain name\")\n\n class ValidateOSSECPassword(Validator):\n def validate(self, document):\n text = document.text\n if len(text) >= 8 and 'password123' != text:\n return True\n raise ValidationError(\n message=\"Password for OSSEC email account must be strong\")\n\n class ValidateOSSECEmail(Validator):\n def validate(self, document):\n text = document.text\n if text and '@' in text and '[email protected]' != text:\n return True\n raise ValidationError(\n message=(\"Must contain a @ and be set to \"\n \"something other than [email protected]\"))\n\n def __init__(self, args):\n self.args = args\n translations = SiteConfig.Locales(\n self.args.app_path).get_translations()\n translations = \" \".join(translations)\n self.desc = [\n ['ssh_users', 'sd', str,\n u'Username for SSH access to the servers',\n SiteConfig.ValidateUser(),\n None],\n ['app_ip', '10.20.2.2', str,\n u'Local IPv4 address for the Application Server',\n SiteConfig.ValidateIP(),\n None],\n ['monitor_ip', '10.20.3.2', str,\n u'Local IPv4 address for the Monitor Server',\n SiteConfig.ValidateIP(),\n None],\n ['app_hostname', 'app', str,\n u'Hostname for Application Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['monitor_hostname', 'mon', str,\n u'Hostname for Monitor Server',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['dns_server', '8.8.8.8', str,\n u'DNS server specified during installation',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['securedrop_app_https_on_source_interface', False, bool,\n u'Whether HTTPS should be enabled on '\n 'Source Interface (requires EV cert)',\n SiteConfig.ValidateYesNo(),\n lambda x: x.lower() == 'yes'],\n ['securedrop_app_gpg_public_key', 'SecureDrop.asc', str,\n u'Local filepath to public key for '\n 'SecureDrop Application GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['securedrop_app_gpg_fingerprint', '', str,\n u'Full fingerprint for the SecureDrop Application GPG Key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_gpg_public_key', 'ossec.pub', str,\n u'Local filepath to OSSEC alerts GPG public key',\n SiteConfig.ValidatePath(self.args.ansible_path),\n None],\n ['ossec_gpg_fpr', '', str,\n u'Full fingerprint for the OSSEC alerts GPG public key',\n SiteConfig.ValidateFingerprint(),\n self.sanitize_fingerprint],\n ['ossec_alert_email', '', str,\n u'Admin email address for receiving OSSEC alerts',\n SiteConfig.ValidateOSSECEmail(),\n None],\n ['smtp_relay', \"smtp.gmail.com\", str,\n u'SMTP relay for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['smtp_relay_port', 587, int,\n u'SMTP port for sending OSSEC alerts',\n SiteConfig.ValidateInt(),\n int],\n ['sasl_domain', \"gmail.com\", str,\n u'SASL domain for sending OSSEC alerts',\n SiteConfig.ValidateNotEmpty(),\n None],\n ['sasl_username', '', str,\n u'SASL username for sending OSSEC alerts',\n SiteConfig.ValidateOSSECUsername(),\n None],\n ['sasl_password', '', str,\n u'SASL password for sending OSSEC alerts',\n SiteConfig.ValidateOSSECPassword(),\n None],\n ['securedrop_supported_locales', [], types.ListType,\n u'Space separated list of additional locales to support '\n '(' + translations + ')',\n SiteConfig.ValidateLocales(self.args.app_path),\n string.split],\n ]\n\n def load_and_update_config(self):\n if self.exists():\n self.config = self.load()\n else:\n self.config = None\n return self.update_config()\n\n def update_config(self):\n self.config.update(self.user_prompt_config())\n self.save()\n self.validate_gpg_keys()\n return True\n\n def user_prompt_config(self):\n config = {}\n self_config = self.config or {}\n for desc in self.desc:\n (var, default, type, prompt, validator, transform) = desc\n config[var] = self.user_prompt_config_one(desc,\n self_config.get(var))\n return config\n\n def user_prompt_config_one(self, desc, from_config):\n (var, default, type, prompt, validator, transform) = desc\n if from_config is not None:\n default = from_config\n prompt += ': '\n return self.validated_input(prompt, default, validator, transform)\n\n def validated_input(self, prompt, default, validator, transform):\n if type(default) is bool:\n default = default and 'yes' or 'no'\n if type(default) is int:\n default = str(default)\n if isinstance(default, types.ListType):\n default = \" \".join(default)\n if type(default) is not str:\n default = str(default)\n kwargs = {}\n if validator:\n kwargs['validator'] = validator\n value = prompt_toolkit.prompt(prompt,\n default=unicode(default, 'utf-8'),\n **kwargs)\n if transform:\n return transform(value)\n else:\n return value\n\n def sanitize_fingerprint(self, value):\n return value.upper().replace(' ', '')\n\n def validate_gpg_keys(self):\n keys = (('securedrop_app_gpg_public_key',\n 'securedrop_app_gpg_fingerprint'),\n\n ('ossec_alert_gpg_public_key',\n 'ossec_gpg_fpr'))\n for (public_key, fingerprint) in keys:\n validate = os.path.join(\n os.path.dirname(__file__), '..', 'bin',\n 'validate-gpg-key.sh')\n public_key = os.path.join(self.args.ansible_path,\n self.config[public_key])\n fingerprint = self.config[fingerprint]\n try:\n sdlog.debug(subprocess.check_output(\n [validate, public_key, fingerprint],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n raise FingerprintException(\n \"fingerprint {} \".format(fingerprint) +\n \"does not match \" +\n \"the public key {}\".format(public_key))\n return True\n\n def exists(self):\n return os.path.exists(self.args.site_config)\n\n def save(self):\n with open(self.args.site_config, 'w') as site_config_file:\n yaml.safe_dump(self.config,\n site_config_file,\n default_flow_style=False)\n\n def load(self):\n try:\n with open(self.args.site_config) as site_config_file:\n return yaml.safe_load(site_config_file)\n except IOError:\n sdlog.error(\"Config file missing, re-run with sdconfig\")\n raise\n except yaml.YAMLError:\n sdlog.error(\"There was an issue processing {}\".format(\n self.args.site_config))\n raise\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef sdconfig(args):\n \"\"\"Configure SD site settings\"\"\"\n SiteConfig(args).load_and_update_config()\n\n\ndef install_securedrop(args):\n \"\"\"Install/Update SecureDrop\"\"\"\n SiteConfig(args).load()\n\n sdlog.info(\"Now installing SecureDrop on remote servers.\")\n sdlog.info(\"You will be prompted for the sudo password on the \"\n \"servers.\")\n sdlog.info(\"The sudo password is only necessary during initial \"\n \"installation.\")\n subprocess.check_call([os.path.join(args.ansible_path,\n 'securedrop-prod.yml'),\n '--ask-become-pass'], cwd=args.ansible_path)\n\n\ndef backup_securedrop(args):\n \"\"\"Perform backup of the SecureDrop Application Server.\n Creates a tarball of submissions and server config, and fetches\n back to the Admin Workstation. Future `restore` actions can be performed\n with the backup tarball.\"\"\"\n sdlog.info(\"Backing up the SecureDrop Application Server\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-backup.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef restore_securedrop(args):\n \"\"\"Perform restore of the SecureDrop Application Server.\n Requires a tarball of submissions and server config, created via\n the `backup` action.\"\"\"\n sdlog.info(\"Restoring the SecureDrop Application Server from backup\")\n # Canonicalize filepath to backup tarball, so Ansible sees only the\n # basename. The files must live in args.ansible_path,\n # but the securedrop-admin\n # script will be invoked from the repo root, so preceding dirs are likely.\n restore_file_basename = os.path.basename(args.restore_file)\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-restore.yml'),\n '-e',\n \"restore_file='{}'\".format(restore_file_basename),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n\n\ndef run_tails_config(args):\n \"\"\"Configure Tails environment post SD install\"\"\"\n sdlog.info(\"Configuring Tails workstation environment\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n ansible_cmd = [\n os.path.join(args.ansible_path, 'securedrop-tails.yml'),\n \"--ask-become-pass\",\n # Passing an empty inventory file to override the automatic dynamic\n # inventory script, which fails if no site vars are configured.\n '-i', '/dev/null',\n ]\n subprocess.check_call(ansible_cmd,\n cwd=args.ansible_path)\n\n\ndef get_logs(args):\n \"\"\"Get logs for forensics and debugging purposes\"\"\"\n sdlog.info(\"Gathering logs for forensics and debugging\")\n ansible_cmd = [\n 'ansible-playbook',\n os.path.join(args.ansible_path, 'securedrop-logs.yml'),\n ]\n subprocess.check_call(ansible_cmd, cwd=args.ansible_path)\n sdlog.info(\"Encrypt logs and send to [email protected] or upload \"\n \"to the SecureDrop support portal.\")\n\n\ndef set_default_paths(args):\n if not args.ansible_path:\n args.ansible_path = args.root + \"/install_files/ansible-base\"\n args.ansible_path = os.path.realpath(args.ansible_path)\n if not args.site_config:\n args.site_config = args.ansible_path + \"/group_vars/all/site-specific\"\n args.site_config = os.path.realpath(args.site_config)\n if not args.app_path:\n args.app_path = args.root + \"/securedrop\"\n args.app_path = os.path.realpath(args.app_path)\n return args\n\n\ndef parse_argv(argv):\n class ArgParseFormatterCombo(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawTextHelpFormatter):\n \"\"\"Needed to combine formatting classes for help output\"\"\"\n pass\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=ArgParseFormatterCombo)\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.add_argument('-d', action='store_true', default=False,\n help=\"Developer mode. Not to be used in production.\")\n parser.add_argument('--root', required=True,\n help=\"path to the root of the SecureDrop repository\")\n parser.add_argument('--site-config',\n help=\"path to the YAML site configuration file\")\n parser.add_argument('--ansible-path',\n help=\"path to the Ansible root\")\n parser.add_argument('--app-path',\n help=\"path to the SecureDrop application root\")\n subparsers = parser.add_subparsers()\n\n parse_sdconfig = subparsers.add_parser('sdconfig', help=sdconfig.__doc__)\n parse_sdconfig.set_defaults(func=sdconfig)\n\n parse_install = subparsers.add_parser('install',\n help=install_securedrop.__doc__)\n parse_install.set_defaults(func=install_securedrop)\n\n parse_tailsconfig = subparsers.add_parser('tailsconfig',\n help=run_tails_config.__doc__)\n parse_tailsconfig.set_defaults(func=run_tails_config)\n\n parse_backup = subparsers.add_parser('backup',\n help=backup_securedrop.__doc__)\n parse_backup.set_defaults(func=backup_securedrop)\n\n parse_restore = subparsers.add_parser('restore',\n help=restore_securedrop.__doc__)\n parse_restore.set_defaults(func=restore_securedrop)\n parse_restore.add_argument(\"restore_file\")\n\n parse_logs = subparsers.add_parser('logs',\n help=get_logs.__doc__)\n parse_logs.set_defaults(func=get_logs)\n\n return set_default_paths(parser.parse_args(argv))\n\n\ndef main(argv):\n args = parse_argv(argv)\n setup_logger(args.v)\n if args.v:\n args.func(args)\n else:\n try:\n args.func(args)\n except KeyboardInterrupt:\n sys.exit(0)\n except Exception as e:\n raise SystemExit(\n 'ERROR (run with -v for more): {msg}'.format(msg=e))\n else:\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "admin/securedrop_admin/__init__.py" } ]
diff --git a/admin/securedrop_admin/__init__.py b/admin/securedrop_admin/__init__.py index dc6290e6ce..4f88ccca58 100755 --- a/admin/securedrop_admin/__init__.py +++ b/admin/securedrop_admin/__init__.py @@ -291,7 +291,7 @@ def load_and_update_config(self): return self.update_config() def update_config(self): - self.config = self.user_prompt_config() + self.config.update(self.user_prompt_config()) self.save() self.validate_gpg_keys() return True diff --git a/admin/tests/files/site-specific b/admin/tests/files/site-specific index 9ec6831e83..c335c14b3c 100644 --- a/admin/tests/files/site-specific +++ b/admin/tests/files/site-specific @@ -17,3 +17,4 @@ securedrop_supported_locales: smtp_relay: smtp.gmail.com smtp_relay_port: 587 ssh_users: sd +user_defined_variable: "must not be discarded" diff --git a/admin/tests/test_securedrop-admin.py b/admin/tests/test_securedrop-admin.py index 6b76cb2142..4147de11ea 100644 --- a/admin/tests/test_securedrop-admin.py +++ b/admin/tests/test_securedrop-admin.py @@ -297,6 +297,7 @@ def test_update_config(self, mock_save, mock_validate_input): site_config = securedrop_admin.SiteConfig(args) assert site_config.load_and_update_config() + assert 'user_defined_variable' in site_config.config mock_save.assert_called_once() mock_validate_input.assert_called()
carpentries__amy-770
Names show up multiple times in assignment pulldown 1. Go to an event. 2. Try to assign to assign to someone other than yourself. 3. Selection dialog with pulldown appears so that you can choose person. 4. Some names (currently Greg Wilson and Tracy Teal, possibly others) show up multiple times in that list.
[ { "content": "from functools import reduce\nimport operator\nimport re\n\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n \"\"\"Override this method to allow for additional lookup method: \"\"\"\n # original code from selectable.base.ModelLookup.get_query:\n qs = self.get_queryset()\n if term:\n search_filters = []\n if self.search_fields:\n for field in self.search_fields:\n search_filters.append(Q(**{field: term}))\n\n # tokenizing part\n tokens = re.split('\\s+', term)\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n Q(personal__icontains=name1) & Q(family__icontains=name2)\n ) | (\n Q(personal__icontains=name2) & Q(family__icontains=name1)\n )\n search_filters.append(complex_q)\n\n # this is brilliant: it applies OR to all search filters\n qs = qs.filter(reduce(operator.or_, search_filters))\n\n return qs\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n )\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py" } ]
[ { "content": "from functools import reduce\nimport operator\nimport re\n\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n \"\"\"Override this method to allow for additional lookup method: \"\"\"\n # original code from selectable.base.ModelLookup.get_query:\n qs = self.get_queryset()\n if term:\n search_filters = []\n if self.search_fields:\n for field in self.search_fields:\n search_filters.append(Q(**{field: term}))\n\n # tokenizing part\n tokens = re.split('\\s+', term)\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n Q(personal__icontains=name1) & Q(family__icontains=name2)\n ) | (\n Q(personal__icontains=name2) & Q(family__icontains=name1)\n )\n search_filters.append(complex_q)\n\n # this is brilliant: it applies OR to all search filters\n qs = qs.filter(reduce(operator.or_, search_filters))\n\n return qs\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n ).distinct()\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py" } ]
diff --git a/workshops/lookups.py b/workshops/lookups.py index 2fef53b01..8f1108d38 100644 --- a/workshops/lookups.py +++ b/workshops/lookups.py @@ -83,7 +83,7 @@ def get_query(self, request, term): admin_group = Group.objects.get(name='administrators') results = results.filter( Q(is_superuser=True) | Q(groups__in=[admin_group]) - ) + ).distinct() return results
bokeh__bokeh-1617
BokehJS unrecoverable errors in notebook It seems easy to get the notebook into an unrecoverable state when using `push_notebook` and IPython interactors. Must close the session entirely and start again to regain plots. Executing all the cells in the notebook stored in this gist: https://gist.github.com/bryevdv/b4e9eb68a6234a67a570 should reproduce the problem. To be specific, by "unrecoverable" I mean that clearing all cells and restarting the kernel from the menu does not fix the problem. It is still impossible to generate plots. I have had to close the tab and shutdown from the IPython home notebook list, then re-open a new tab to be able to plot again. @damianavila this is a pretty serious bug. We should try to fix it for 0.7.1 for sure. I will take a look and add any findings but I may need your input/assistance.
[ { "content": "from __future__ import absolute_import\n\nfrom six import iteritems\nfrom collections import OrderedDict\n\nfrom .models import glyphs, markers\nfrom .mixins import FillProps, LineProps\n\ndef _glyph_function(glyphclass, dsnames, argnames, docstring, xfields=[\"x\"], yfields=[\"y\"]):\n\n def func(document_or_plot, *args, **kwargs):\n # Note: We want to reuse the glyph functions by attaching them the Plot\n # class. Imports are here to prevent circular imports.\n from .plotting_helpers import (\n _match_data_params, _update_plot_data_ranges,\n _materialize_colors_and_alpha, _get_legend,\n _make_legend, _get_select_tool\n )\n from .models import ColumnDataSource, GlyphRenderer, Plot, ServerDataSource\n source = kwargs.pop('source', None)\n if isinstance(source, ServerDataSource):\n datasource = ColumnDataSource()\n serversource = source\n elif source is None:\n datasource = ColumnDataSource()\n serversource = None\n else:\n datasource = source\n serversource = None\n\n legend_name = kwargs.pop(\"legend\", None)\n\n from .document import Document\n document = None\n plot = None\n if isinstance(document_or_plot, Plot):\n plot = document_or_plot\n # TODO (bev) this seems like it should be here but invalid kwargs\n # currently get through (see also below)\n # plot.update(**kwargs)\n elif isinstance(document_or_plot, Document):\n document = document_or_plot\n if document.curplot() is not None and document._hold:\n plot = document.curplot()\n # plot.update(**kwargs)\n else:\n plot = document.figure(**kwargs)\n else:\n raise ValueError(\"expected document or plot object for first argument\")\n\n name = kwargs.pop('name', None)\n if name:\n plot._id = name\n\n select_tool = _get_select_tool(plot)\n\n # Process the glyph dataspec parameters\n glyph_params = _match_data_params(dsnames, glyphclass,\n datasource, serversource,\n args, _materialize_colors_and_alpha(kwargs))\n\n x_data_fields = []\n for xx in xfields:\n if not isinstance(glyph_params[xx], dict): continue\n if glyph_params[xx]['units'] == 'data': x_data_fields.append(glyph_params[xx]['field'])\n y_data_fields = []\n for yy in yfields:\n if not isinstance(glyph_params[yy], dict): continue\n if glyph_params[yy]['units'] == 'data': y_data_fields.append(glyph_params[yy]['field'])\n\n _update_plot_data_ranges(plot, datasource, x_data_fields, y_data_fields)\n kwargs.update(glyph_params)\n\n glyph_props = glyphclass.properties() | set(argnames)\n glyph_kwargs = dict((key, value) for (key, value) in iteritems(kwargs) if key in glyph_props)\n glyph = glyphclass(**glyph_kwargs)\n\n nonselection_glyph_params = _materialize_colors_and_alpha(kwargs, prefix='nonselection_', default_alpha=0.1)\n nonselection_glyph = glyph.clone()\n\n if isinstance(nonselection_glyph, FillProps):\n nonselection_glyph.fill_color = nonselection_glyph_params['fill_color']\n nonselection_glyph.fill_alpha = nonselection_glyph_params['fill_alpha']\n\n if isinstance(nonselection_glyph, LineProps):\n nonselection_glyph.line_color = nonselection_glyph_params['line_color']\n nonselection_glyph.line_alpha = nonselection_glyph_params['line_alpha']\n\n glyph_renderer = GlyphRenderer(\n data_source=datasource,\n server_data_source=serversource,\n glyph=glyph,\n nonselection_glyph=nonselection_glyph,\n name=name)\n\n # TODO (bev) hacky, fix up when glyphspecs are simplified/removed\n if 'x_range_name' in kwargs:\n glyph_renderer.x_range_name = kwargs['x_range_name']\n if 'y_range_name' in kwargs:\n glyph_renderer.y_range_name = kwargs['y_range_name']\n\n if legend_name:\n legend = _get_legend(plot)\n if not legend:\n legend = _make_legend(plot)\n legends = OrderedDict(legend.legends)\n legends.setdefault(legend_name, []).append(glyph_renderer)\n legend.legends = list(legends.items())\n\n if select_tool :\n select_tool.renderers.append(glyph_renderer)\n select_tool._dirty = True\n\n plot.renderers.append(glyph_renderer)\n plot._dirty = True\n if document and document.autoadd:\n document.add(plot)\n return plot\n func.__name__ = glyphclass.__view_model__\n func.__doc__ = docstring\n return func\n\nannular_wedge = _glyph_function(glyphs.AnnularWedge, (\"x\", \"y\", \"inner_radius\", \"outer_radius\", \"start_angle\", \"end_angle\"), (\"direction\",),\n\"\"\" The `annular_wedge` glyph renders annular wedges centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n inner_radius (str or list[float]) : values or field names of inner radii\n outer_radius (str or list[float]) : values or field names of outer radii\n start_angle (str or list[float]) : values or field names of starting angles\n end_angle (str or list[float]) : values or field names of ending angles\n direction (\"clock\" or \"anticlock\", optional): direction to turn between starting and ending angles, defaults to \"anticlock\"\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nannulus = _glyph_function(glyphs.Annulus, (\"x\", \"y\" ,\"inner_radius\", \"outer_radius\"), (),\n\"\"\" The `annulus` glyph renders annuli centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n inner_radius (str or list[float]) : values or field names of inner radii\n outer_radius (str or list[float]) : values or field names of outer radii\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\narc = _glyph_function(glyphs.Arc, (\"x\", \"y\", \"radius\" ,\"start_angle\", \"end_angle\"), (\"direction\",),\n\"\"\" The `arc` glyph renders circular arcs centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n radius (str or list[float]) : values or field names of arc radii\n start_angle (str or list[float]) : values or field names of starting angles\n end_angle (str or list[float]) : values or field names of ending angles\n direction (\"clock\" or \"anticlock\", optional): direction to turn between starting and ending angles, defaults to \"anticlock\"\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nasterisk = _glyph_function(markers.Asterisk, (\"x\", \"y\"), (),\n\"\"\" The `asterisk` glyph is a marker that renders asterisks at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nbezier = _glyph_function(glyphs.Bezier, (\"x0\", \"y0\", \"x1\", \"y1\", \"cx0\", \"cy0\", \"cx1\", \"cy1\"), (),\n\"\"\" The bezier glyph displays Bezier curves with the given starting, ending, and control points.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x0 (str or list[float]) : values or field names of starting `x` coordinates\n y0 (str or list[float]) : values or field names of starting `y` coordinates\n x1 (str or list[float]) : values or field names of ending `x` coordinates\n y1 (str or list[float]) : values or field names of ending `y` coordinates\n cx0 (str or list[float]) : values or field names of first control point `x` coordinates\n cy0 (str or list[float]) : values or field names of first control point `y` coordinates\n cx1 (str or list[float]) : values or field names of second control point `x` coordinates\n cy1 (str or list[float]) : values or field names of second control point `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\",\n xfields=['x0', 'x1'], yfields=['y0', 'y1'])\n\ncircle = _glyph_function(markers.Circle, (\"x\", \"y\"), (),\n\"\"\" The `circle` glyph is a marker that renders circles at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float], optional) : values or field names of sizes in screen units\n radius (str or list[float], optional): values or field names of radii\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\nNotes:\n Only one of `size` or `radius` should be provided. Note that `radius` defaults to data units.\n\"\"\"\n)\n\ncircle_cross = _glyph_function(markers.CircleCross, (\"x\", \"y\"), (),\n\"\"\" The `circle_cross` glyph is a marker that renders circles together with a crossbar (+) at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ncircle_x = _glyph_function(markers.CircleX, (\"x\", \"y\"), (),\n\"\"\" The `circle_x` glyph is a marker that renders circles together with a \"X\" glyph at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ncross = _glyph_function(markers.Cross, (\"x\", \"y\"), (),\n\"\"\" The `cross` glyph is a marker that renders crossbars (+) at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ndiamond = _glyph_function(markers.Diamond, (\"x\", \"y\"), (),\n\"\"\" The `diamond` glyph is a marker that renders diamonds at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ndiamond_cross = _glyph_function(markers.DiamondCross, (\"x\", \"y\"), (),\n\"\"\" The `diamond_cross` glyph is a marker that renders diamonds together with a crossbar (+) at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nimage = _glyph_function(glyphs.Image, (\"image\", \"x\", \"y\", \"dw\", \"dh\"), ('palette', 'reserve_color', 'reserve_val', 'color_mapper', 'dilate'),\n\"\"\" The image glyph takes each image as a two-dimensional array of scalar data.\n\nA palette (string name of a built-in palette, currently) must also be supplied to use for color-mapping the scalar image.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n image (str or 2D array_like of float) : value or field names of scalar image data\n x (str or list[float]) : values or field names of lower left `x` coordinates\n y (str or list[float]) : values or field names of lower left `y` coordinates\n dw (str or list[float]) : values or field names of image width distances\n dh (str or list[float]) : values or field names of image height distances\n palette (str or list[str]) : values or field names of palettes to use for color-mapping (see :ref:`bokeh_dot_palettes` for more details)\n colorMapper (LinearColorMapper) : a LinearColorMapper instance\n dilate (bool, optional) : whether to dilate pixel distance computations when drawing, defaults to False\n\nReturns:\n plot\n\nNotes:\n setting `dilate` to True will cause pixel distances (e.g., for `dw` and `dh`) to\n be rounded up, always.\n\"\"\"\n)\n\nimage_rgba = _glyph_function(glyphs.ImageRGBA, (\"image\", \"x\", \"y\", \"dw\", \"dh\"), (\"dilate\",),\n\"\"\" The image_rgba glyph takes each ``image`` as a two-dimensional array of RGBA values (encoded\nas 32-bit integers).\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n image (str or 2D array_like of uint32) : value or field names of RGBA image data\n x (str or list[float]) : values or field names of lower left `x` coordinates\n y (str or list[float]) : values or field names of lower left `y` coordinates\n dw (str or list[float]) : values or field names of image width distances\n dh (str or list[float]) : values or field names of image height distances\n dilate (bool, optional) : whether to dilate pixel distance computations when drawing, defaults to False\n\nReturns:\n plot\n\nNotes:\n setting `dilate` to True will cause pixel distances (e.g., for `dw` and `dh`) to\n be rounded up, always.\n\"\"\"\n)\n\nimage_url = _glyph_function(glyphs.ImageURL, (\"url\", \"x\", \"y\"), (),\n\"\"\"The image_url glyph takes a urls for images to display.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n url (str) : value of RGBA image data\n x (str or list[float]) : values or field names of upper left `x` coordinates\n y (str or list[float]) : values or field names of upper left `y` coordinates\n angle (float) : angle to rotate image by\n\nReturns:\n plot\n\"\"\"\n)\n\ninverted_triangle = _glyph_function(markers.InvertedTriangle, (\"x\", \"y\"), (),\n\"\"\" The `inverted_triangle` glyph is a marker that renders upside-down triangles at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nline = _glyph_function(glyphs.Line, (\"x\", \"y\"), (),\n\"\"\" The line glyph displays a single line that connects several points given by the arrays of coordinates `x` and `y`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of line `x` coordinates\n y (str or list[float]) : values or field names of line `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nmulti_line = _glyph_function(glyphs.MultiLine, (\"xs\", \"ys\"), (),\n\"\"\" The multi_line glyph displays lines, each with points given by the arrays of coordinates that are the elements of xs and ys.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n xs (str or list[list[float]]): values or field names of lines `x` coordinates\n ys (str or list[list[float]]): values or field names of lines `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\n.. note:: For this glyph, the data is not simply an array of scalars, it is really an \"array of arrays\".\n\nReturns:\n plot\n\n\"\"\",\n xfields=[\"xs\"], yfields=[\"ys\"],\n)\n\noval = _glyph_function(glyphs.Oval, (\"x\", \"y\", \"width\", \"height\"), (),\n\"\"\" The oval glyph displays ovals centered on the given coordinates with the given dimensions and angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n width (str or list[float]) : values or field names of widths\n height (str or list[float]) : values or field names of heights\n angle (str or list[float], optional) : values or field names of rotation angles, defaults to 0\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\npatch = _glyph_function(glyphs.Patch, (\"x\", \"y\"), (),\n\"\"\" The patch glyph displays a single polygonal patch that connects several points given by the arrays of coordinates `x` and `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of patch `x` coordinates\n y (str or list[float]) : values or field names of patch `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\npatches = _glyph_function(glyphs.Patches, (\"xs\", \"ys\"), (),\n\"\"\" The patches glyph displays several patches, each with points given by the arrays of coordinates that are the elements of xs and ys.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n xs (str or list[list[float]]): values or field names of patches `x` coordinates\n ys (str or list[list[float]]): values or field names of patches `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\n.. note:: For this glyph, the data is not simply an array of scalars, it is really an \"array of arrays\".\n\nReturns:\n plot\n\n\"\"\",\n xfields=[\"xs\"], yfields=[\"ys\"],\n)\n\nquad = _glyph_function(glyphs.Quad, (\"left\", \"right\", \"top\", \"bottom\"), (),\n\"\"\" The quad glyph displays axis-aligned rectangles with the given dimensions.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n left (str or list[float]) : values or field names of left edges\n right (str or list[float]) : values or field names of right edges\n top (str or list[float]) : values or field names of top edges\n bottom (str or list[float]) : values or field names of bottom edges\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\",\n xfields=[\"left\", \"right\"], yfields=[\"top\", \"bottom\"])\n\nquadratic = _glyph_function(glyphs.Quadratic, (\"x0\", \"y0\", \"x1\", \"y1\", \"cx\", \"cy\"), (),\n\"\"\" The quadratic glyph displays quadratic curves with the given starting, ending, and control points.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x0 (str or list[float]) : values or field names of starting `x` coordinates\n y0 (str or list[float]) : values or field names of starting `y` coordinates\n x1 (str or list[float]) : values or field names of ending `x` coordinates\n y1 (str or list[float]) : values or field names of ending `y` coordinates\n cx (str or list[float]) : values or field names of control point `x` coordinates\n cy (str or list[float]) : values or field names of control point `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot (Plot) : a plot to add this glyph to\n plot\n\"\"\",\n xfields=[\"x0\", \"x1\"], yfields=[\"y0\", \"y1\"])\n\nray = _glyph_function(glyphs.Ray, (\"x\", \"y\", \"length\", \"angle\"), (),\n\"\"\" The ray glyph displays line segments starting at the given coordinate and extending the given length at the given angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n length (str or list[float]) : values or field names of ray lengths in screen units\n angle (str or list[float]) : values or field names of ray angles\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nrect = _glyph_function(glyphs.Rect, (\"x\", \"y\", \"width\", \"height\"), (\"dilate\",),\n\"\"\" The rect glyph displays rectangles centered on the given coordinates with the given dimensions and angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n width (str or list[float]) : values or field names of widths\n height (str or list[float]) : values or field names of heights\n angle (str or list[float], optional) : values or field names of rotation angles, defaults to 0\n dilate (bool, optional) : whether to dilate pixel distance computations when drawing, defaults to False\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\nNotes:\n setting `dilate` to True will cause pixel distances (e.g., for `width` and `height`) to\n be rounded up, always.\n\n\"\"\"\n)\n\nsegment = _glyph_function(glyphs.Segment, (\"x0\", \"y0\", \"x1\", \"y1\"), (),\n\"\"\" The segment glyph displays line segments with the given starting and ending coordinates.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x0 (str or list[float]) : values or field names of starting `x` coordinates\n y0 (str or list[float]) : values or field names of starting `y` coordinates\n x1 (str or list[float]) : values or field names of ending `x` coordinates\n y1 (str or list[float]) : values or field names of ending `y` coordinates\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\",\n xfields=[\"x0\", \"x1\"], yfields=[\"y0\", \"y1\"])\n\nsquare = _glyph_function(markers.Square, (\"x\", \"y\"), (),\n\"\"\" The `square` glyph is a marker that renders squares at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n\nsquare_cross = _glyph_function(markers.SquareCross, (\"x\", \"y\"), (),\n\"\"\" The `square_cross` glyph is a marker that renders squares together with a crossbar (+) at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties` and\n:ref:`userguide_objects_fill_properties` are also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nsquare_x = _glyph_function(markers.SquareX, (\"x\", \"y\"), (),\n\"\"\" The `square_x` glyph is a marker that renders squares together with \"X\" glyphs at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties` and\n:ref:`userguide_objects_fill_properties` are also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n\ntext = _glyph_function(glyphs.Text, (\"x\", \"y\", \"text\"), (),\n\"\"\" The text glyph displays text at the given coordinates rotated by the given angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of text `x` coordinates\n y (str or list[float]) : values or field names of text `y` coordinates\n text (str or list[text]): values or field names of texts\n angle (str or list[float], optional) : values or field names of text angles, defaults to 0\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_text_properties`\nare also accepted as keyword parameters.\n\n.. note:: The location and angle of the text relative to the `x`, `y` coordinates is indicated by the alignment and baseline text properties.\n\nReturns:\n plot\n\"\"\"\n)\n\ntriangle = _glyph_function(markers.Triangle, (\"x\", \"y\"), (),\n\"\"\" The `triangle` glyph is a marker that renders triangles at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n\nwedge = _glyph_function(glyphs.Wedge, (\"x\", \"y\", \"radius\", \"start_angle\", \"end_angle\"), (\"direction\",),\n\"\"\" The `wedge` glyph renders circular wedges centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n radius (str or list[float]) : values or field names of wedge radii\n start_angle (str or list[float]) : values or field names of starting angles\n end_angle (str or list[float]) : values or field names of ending angles\n direction (\"clock\" or \"anticlock\", optional): direction to turn between starting and ending angles, defaults to \"anticlock\"\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties` and\n:ref:`userguide_objects_fill_properties` are also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nx = _glyph_function(markers.X, (\"x\", \"y\"), (),\n\"\"\" The `x` glyph is a marker that renders \"x\" glyphs at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n", "path": "bokeh/_glyph_functions.py" } ]
[ { "content": "from __future__ import absolute_import\n\nfrom six import iteritems\nfrom collections import OrderedDict\n\nfrom .models import glyphs, markers\nfrom .mixins import FillProps, LineProps\n\ndef _glyph_function(glyphclass, dsnames, argnames, docstring, xfields=[\"x\"], yfields=[\"y\"]):\n\n def func(document_or_plot, *args, **kwargs):\n # Note: We want to reuse the glyph functions by attaching them the Plot\n # class. Imports are here to prevent circular imports.\n from .plotting_helpers import (\n _match_data_params, _update_plot_data_ranges,\n _materialize_colors_and_alpha, _get_legend,\n _make_legend, _get_select_tool\n )\n from .models import ColumnDataSource, GlyphRenderer, Plot, ServerDataSource\n source = kwargs.pop('source', None)\n if isinstance(source, ServerDataSource):\n datasource = ColumnDataSource()\n serversource = source\n elif source is None:\n datasource = ColumnDataSource()\n serversource = None\n else:\n datasource = source\n serversource = None\n\n legend_name = kwargs.pop(\"legend\", None)\n\n from .document import Document\n document = None\n plot = None\n if isinstance(document_or_plot, Plot):\n plot = document_or_plot\n # TODO (bev) this seems like it should be here but invalid kwargs\n # currently get through (see also below)\n # plot.update(**kwargs)\n elif isinstance(document_or_plot, Document):\n document = document_or_plot\n if document.curplot() is not None and document._hold:\n plot = document.curplot()\n # plot.update(**kwargs)\n else:\n plot = document.figure(**kwargs)\n else:\n raise ValueError(\"expected document or plot object for first argument\")\n\n name = kwargs.pop('name', None)\n\n select_tool = _get_select_tool(plot)\n\n # Process the glyph dataspec parameters\n glyph_params = _match_data_params(dsnames, glyphclass,\n datasource, serversource,\n args, _materialize_colors_and_alpha(kwargs))\n\n x_data_fields = []\n for xx in xfields:\n if not isinstance(glyph_params[xx], dict): continue\n if glyph_params[xx]['units'] == 'data': x_data_fields.append(glyph_params[xx]['field'])\n y_data_fields = []\n for yy in yfields:\n if not isinstance(glyph_params[yy], dict): continue\n if glyph_params[yy]['units'] == 'data': y_data_fields.append(glyph_params[yy]['field'])\n\n _update_plot_data_ranges(plot, datasource, x_data_fields, y_data_fields)\n kwargs.update(glyph_params)\n\n glyph_props = glyphclass.properties() | set(argnames)\n glyph_kwargs = dict((key, value) for (key, value) in iteritems(kwargs) if key in glyph_props)\n glyph = glyphclass(**glyph_kwargs)\n\n nonselection_glyph_params = _materialize_colors_and_alpha(kwargs, prefix='nonselection_', default_alpha=0.1)\n nonselection_glyph = glyph.clone()\n\n if isinstance(nonselection_glyph, FillProps):\n nonselection_glyph.fill_color = nonselection_glyph_params['fill_color']\n nonselection_glyph.fill_alpha = nonselection_glyph_params['fill_alpha']\n\n if isinstance(nonselection_glyph, LineProps):\n nonselection_glyph.line_color = nonselection_glyph_params['line_color']\n nonselection_glyph.line_alpha = nonselection_glyph_params['line_alpha']\n\n glyph_renderer = GlyphRenderer(\n data_source=datasource,\n server_data_source=serversource,\n glyph=glyph,\n nonselection_glyph=nonselection_glyph,\n name=name)\n\n # TODO (bev) hacky, fix up when glyphspecs are simplified/removed\n if 'x_range_name' in kwargs:\n glyph_renderer.x_range_name = kwargs['x_range_name']\n if 'y_range_name' in kwargs:\n glyph_renderer.y_range_name = kwargs['y_range_name']\n\n if legend_name:\n legend = _get_legend(plot)\n if not legend:\n legend = _make_legend(plot)\n legends = OrderedDict(legend.legends)\n legends.setdefault(legend_name, []).append(glyph_renderer)\n legend.legends = list(legends.items())\n\n if select_tool :\n select_tool.renderers.append(glyph_renderer)\n select_tool._dirty = True\n\n plot.renderers.append(glyph_renderer)\n plot._dirty = True\n if document and document.autoadd:\n document.add(plot)\n return plot\n func.__name__ = glyphclass.__view_model__\n func.__doc__ = docstring\n return func\n\nannular_wedge = _glyph_function(glyphs.AnnularWedge, (\"x\", \"y\", \"inner_radius\", \"outer_radius\", \"start_angle\", \"end_angle\"), (\"direction\",),\n\"\"\" The `annular_wedge` glyph renders annular wedges centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n inner_radius (str or list[float]) : values or field names of inner radii\n outer_radius (str or list[float]) : values or field names of outer radii\n start_angle (str or list[float]) : values or field names of starting angles\n end_angle (str or list[float]) : values or field names of ending angles\n direction (\"clock\" or \"anticlock\", optional): direction to turn between starting and ending angles, defaults to \"anticlock\"\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nannulus = _glyph_function(glyphs.Annulus, (\"x\", \"y\" ,\"inner_radius\", \"outer_radius\"), (),\n\"\"\" The `annulus` glyph renders annuli centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n inner_radius (str or list[float]) : values or field names of inner radii\n outer_radius (str or list[float]) : values or field names of outer radii\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\narc = _glyph_function(glyphs.Arc, (\"x\", \"y\", \"radius\" ,\"start_angle\", \"end_angle\"), (\"direction\",),\n\"\"\" The `arc` glyph renders circular arcs centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n radius (str or list[float]) : values or field names of arc radii\n start_angle (str or list[float]) : values or field names of starting angles\n end_angle (str or list[float]) : values or field names of ending angles\n direction (\"clock\" or \"anticlock\", optional): direction to turn between starting and ending angles, defaults to \"anticlock\"\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nasterisk = _glyph_function(markers.Asterisk, (\"x\", \"y\"), (),\n\"\"\" The `asterisk` glyph is a marker that renders asterisks at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nbezier = _glyph_function(glyphs.Bezier, (\"x0\", \"y0\", \"x1\", \"y1\", \"cx0\", \"cy0\", \"cx1\", \"cy1\"), (),\n\"\"\" The bezier glyph displays Bezier curves with the given starting, ending, and control points.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x0 (str or list[float]) : values or field names of starting `x` coordinates\n y0 (str or list[float]) : values or field names of starting `y` coordinates\n x1 (str or list[float]) : values or field names of ending `x` coordinates\n y1 (str or list[float]) : values or field names of ending `y` coordinates\n cx0 (str or list[float]) : values or field names of first control point `x` coordinates\n cy0 (str or list[float]) : values or field names of first control point `y` coordinates\n cx1 (str or list[float]) : values or field names of second control point `x` coordinates\n cy1 (str or list[float]) : values or field names of second control point `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\",\n xfields=['x0', 'x1'], yfields=['y0', 'y1'])\n\ncircle = _glyph_function(markers.Circle, (\"x\", \"y\"), (),\n\"\"\" The `circle` glyph is a marker that renders circles at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float], optional) : values or field names of sizes in screen units\n radius (str or list[float], optional): values or field names of radii\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\nNotes:\n Only one of `size` or `radius` should be provided. Note that `radius` defaults to data units.\n\"\"\"\n)\n\ncircle_cross = _glyph_function(markers.CircleCross, (\"x\", \"y\"), (),\n\"\"\" The `circle_cross` glyph is a marker that renders circles together with a crossbar (+) at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ncircle_x = _glyph_function(markers.CircleX, (\"x\", \"y\"), (),\n\"\"\" The `circle_x` glyph is a marker that renders circles together with a \"X\" glyph at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ncross = _glyph_function(markers.Cross, (\"x\", \"y\"), (),\n\"\"\" The `cross` glyph is a marker that renders crossbars (+) at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ndiamond = _glyph_function(markers.Diamond, (\"x\", \"y\"), (),\n\"\"\" The `diamond` glyph is a marker that renders diamonds at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\ndiamond_cross = _glyph_function(markers.DiamondCross, (\"x\", \"y\"), (),\n\"\"\" The `diamond_cross` glyph is a marker that renders diamonds together with a crossbar (+) at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nimage = _glyph_function(glyphs.Image, (\"image\", \"x\", \"y\", \"dw\", \"dh\"), ('palette', 'reserve_color', 'reserve_val', 'color_mapper', 'dilate'),\n\"\"\" The image glyph takes each image as a two-dimensional array of scalar data.\n\nA palette (string name of a built-in palette, currently) must also be supplied to use for color-mapping the scalar image.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n image (str or 2D array_like of float) : value or field names of scalar image data\n x (str or list[float]) : values or field names of lower left `x` coordinates\n y (str or list[float]) : values or field names of lower left `y` coordinates\n dw (str or list[float]) : values or field names of image width distances\n dh (str or list[float]) : values or field names of image height distances\n palette (str or list[str]) : values or field names of palettes to use for color-mapping (see :ref:`bokeh_dot_palettes` for more details)\n colorMapper (LinearColorMapper) : a LinearColorMapper instance\n dilate (bool, optional) : whether to dilate pixel distance computations when drawing, defaults to False\n\nReturns:\n plot\n\nNotes:\n setting `dilate` to True will cause pixel distances (e.g., for `dw` and `dh`) to\n be rounded up, always.\n\"\"\"\n)\n\nimage_rgba = _glyph_function(glyphs.ImageRGBA, (\"image\", \"x\", \"y\", \"dw\", \"dh\"), (\"dilate\",),\n\"\"\" The image_rgba glyph takes each ``image`` as a two-dimensional array of RGBA values (encoded\nas 32-bit integers).\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n image (str or 2D array_like of uint32) : value or field names of RGBA image data\n x (str or list[float]) : values or field names of lower left `x` coordinates\n y (str or list[float]) : values or field names of lower left `y` coordinates\n dw (str or list[float]) : values or field names of image width distances\n dh (str or list[float]) : values or field names of image height distances\n dilate (bool, optional) : whether to dilate pixel distance computations when drawing, defaults to False\n\nReturns:\n plot\n\nNotes:\n setting `dilate` to True will cause pixel distances (e.g., for `dw` and `dh`) to\n be rounded up, always.\n\"\"\"\n)\n\nimage_url = _glyph_function(glyphs.ImageURL, (\"url\", \"x\", \"y\"), (),\n\"\"\"The image_url glyph takes a urls for images to display.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n url (str) : value of RGBA image data\n x (str or list[float]) : values or field names of upper left `x` coordinates\n y (str or list[float]) : values or field names of upper left `y` coordinates\n angle (float) : angle to rotate image by\n\nReturns:\n plot\n\"\"\"\n)\n\ninverted_triangle = _glyph_function(markers.InvertedTriangle, (\"x\", \"y\"), (),\n\"\"\" The `inverted_triangle` glyph is a marker that renders upside-down triangles at `x`, `y` with size `size` or `radius`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nline = _glyph_function(glyphs.Line, (\"x\", \"y\"), (),\n\"\"\" The line glyph displays a single line that connects several points given by the arrays of coordinates `x` and `y`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of line `x` coordinates\n y (str or list[float]) : values or field names of line `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nmulti_line = _glyph_function(glyphs.MultiLine, (\"xs\", \"ys\"), (),\n\"\"\" The multi_line glyph displays lines, each with points given by the arrays of coordinates that are the elements of xs and ys.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n xs (str or list[list[float]]): values or field names of lines `x` coordinates\n ys (str or list[list[float]]): values or field names of lines `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\n.. note:: For this glyph, the data is not simply an array of scalars, it is really an \"array of arrays\".\n\nReturns:\n plot\n\n\"\"\",\n xfields=[\"xs\"], yfields=[\"ys\"],\n)\n\noval = _glyph_function(glyphs.Oval, (\"x\", \"y\", \"width\", \"height\"), (),\n\"\"\" The oval glyph displays ovals centered on the given coordinates with the given dimensions and angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n width (str or list[float]) : values or field names of widths\n height (str or list[float]) : values or field names of heights\n angle (str or list[float], optional) : values or field names of rotation angles, defaults to 0\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\npatch = _glyph_function(glyphs.Patch, (\"x\", \"y\"), (),\n\"\"\" The patch glyph displays a single polygonal patch that connects several points given by the arrays of coordinates `x` and `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of patch `x` coordinates\n y (str or list[float]) : values or field names of patch `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\npatches = _glyph_function(glyphs.Patches, (\"xs\", \"ys\"), (),\n\"\"\" The patches glyph displays several patches, each with points given by the arrays of coordinates that are the elements of xs and ys.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n xs (str or list[list[float]]): values or field names of patches `x` coordinates\n ys (str or list[list[float]]): values or field names of patches `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\n.. note:: For this glyph, the data is not simply an array of scalars, it is really an \"array of arrays\".\n\nReturns:\n plot\n\n\"\"\",\n xfields=[\"xs\"], yfields=[\"ys\"],\n)\n\nquad = _glyph_function(glyphs.Quad, (\"left\", \"right\", \"top\", \"bottom\"), (),\n\"\"\" The quad glyph displays axis-aligned rectangles with the given dimensions.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n left (str or list[float]) : values or field names of left edges\n right (str or list[float]) : values or field names of right edges\n top (str or list[float]) : values or field names of top edges\n bottom (str or list[float]) : values or field names of bottom edges\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\",\n xfields=[\"left\", \"right\"], yfields=[\"top\", \"bottom\"])\n\nquadratic = _glyph_function(glyphs.Quadratic, (\"x0\", \"y0\", \"x1\", \"y1\", \"cx\", \"cy\"), (),\n\"\"\" The quadratic glyph displays quadratic curves with the given starting, ending, and control points.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x0 (str or list[float]) : values or field names of starting `x` coordinates\n y0 (str or list[float]) : values or field names of starting `y` coordinates\n x1 (str or list[float]) : values or field names of ending `x` coordinates\n y1 (str or list[float]) : values or field names of ending `y` coordinates\n cx (str or list[float]) : values or field names of control point `x` coordinates\n cy (str or list[float]) : values or field names of control point `y` coordinates\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot (Plot) : a plot to add this glyph to\n plot\n\"\"\",\n xfields=[\"x0\", \"x1\"], yfields=[\"y0\", \"y1\"])\n\nray = _glyph_function(glyphs.Ray, (\"x\", \"y\", \"length\", \"angle\"), (),\n\"\"\" The ray glyph displays line segments starting at the given coordinate and extending the given length at the given angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n length (str or list[float]) : values or field names of ray lengths in screen units\n angle (str or list[float]) : values or field names of ray angles\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nrect = _glyph_function(glyphs.Rect, (\"x\", \"y\", \"width\", \"height\"), (\"dilate\",),\n\"\"\" The rect glyph displays rectangles centered on the given coordinates with the given dimensions and angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n width (str or list[float]) : values or field names of widths\n height (str or list[float]) : values or field names of heights\n angle (str or list[float], optional) : values or field names of rotation angles, defaults to 0\n dilate (bool, optional) : whether to dilate pixel distance computations when drawing, defaults to False\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\nNotes:\n setting `dilate` to True will cause pixel distances (e.g., for `width` and `height`) to\n be rounded up, always.\n\n\"\"\"\n)\n\nsegment = _glyph_function(glyphs.Segment, (\"x0\", \"y0\", \"x1\", \"y1\"), (),\n\"\"\" The segment glyph displays line segments with the given starting and ending coordinates.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x0 (str or list[float]) : values or field names of starting `x` coordinates\n y0 (str or list[float]) : values or field names of starting `y` coordinates\n x1 (str or list[float]) : values or field names of ending `x` coordinates\n y1 (str or list[float]) : values or field names of ending `y` coordinates\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\",\n xfields=[\"x0\", \"x1\"], yfields=[\"y0\", \"y1\"])\n\nsquare = _glyph_function(markers.Square, (\"x\", \"y\"), (),\n\"\"\" The `square` glyph is a marker that renders squares at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n\nsquare_cross = _glyph_function(markers.SquareCross, (\"x\", \"y\"), (),\n\"\"\" The `square_cross` glyph is a marker that renders squares together with a crossbar (+) at `x`, `y` with size `size`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties` and\n:ref:`userguide_objects_fill_properties` are also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nsquare_x = _glyph_function(markers.SquareX, (\"x\", \"y\"), (),\n\"\"\" The `square_x` glyph is a marker that renders squares together with \"X\" glyphs at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties` and\n:ref:`userguide_objects_fill_properties` are also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n\ntext = _glyph_function(glyphs.Text, (\"x\", \"y\", \"text\"), (),\n\"\"\" The text glyph displays text at the given coordinates rotated by the given angle.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of text `x` coordinates\n y (str or list[float]) : values or field names of text `y` coordinates\n text (str or list[text]): values or field names of texts\n angle (str or list[float], optional) : values or field names of text angles, defaults to 0\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_text_properties`\nare also accepted as keyword parameters.\n\n.. note:: The location and angle of the text relative to the `x`, `y` coordinates is indicated by the alignment and baseline text properties.\n\nReturns:\n plot\n\"\"\"\n)\n\ntriangle = _glyph_function(markers.Triangle, (\"x\", \"y\"), (),\n\"\"\" The `triangle` glyph is a marker that renders triangles at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n\nwedge = _glyph_function(glyphs.Wedge, (\"x\", \"y\", \"radius\", \"start_angle\", \"end_angle\"), (\"direction\",),\n\"\"\" The `wedge` glyph renders circular wedges centered at `x`, `y`.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n radius (str or list[float]) : values or field names of wedge radii\n start_angle (str or list[float]) : values or field names of starting angles\n end_angle (str or list[float]) : values or field names of ending angles\n direction (\"clock\" or \"anticlock\", optional): direction to turn between starting and ending angles, defaults to \"anticlock\"\n\nIn addition the the parameters specific to this glyph, :ref:`userguide_objects_line_properties` and\n:ref:`userguide_objects_fill_properties` are also accepted as keyword parameters.\n\nReturns:\n plot\n\"\"\"\n)\n\nx = _glyph_function(markers.X, (\"x\", \"y\"), (),\n\"\"\" The `x` glyph is a marker that renders \"x\" glyphs at `x`, `y` with size `size`.\n\nIn addition the the parameters specific to this glyph,\n:ref:`userguide_objects_line_properties`\nare also accepted as keyword parameters.\n\nArgs:\n plot (Plot) : a plot to add this glyph to\n x (str or list[float]) : values or field names of center `x` coordinates\n y (str or list[float]) : values or field names of center `y` coordinates\n size (str or list[float]) : values or field names of sizes in screen units\n\nReturns:\n plot\n\"\"\"\n)\n", "path": "bokeh/_glyph_functions.py" } ]
diff --git a/bokeh/_glyph_functions.py b/bokeh/_glyph_functions.py index 757e0d784c6..ce6c3e21fcd 100644 --- a/bokeh/_glyph_functions.py +++ b/bokeh/_glyph_functions.py @@ -49,8 +49,6 @@ def func(document_or_plot, *args, **kwargs): raise ValueError("expected document or plot object for first argument") name = kwargs.pop('name', None) - if name: - plot._id = name select_tool = _get_select_tool(plot)
voxel51__fiftyone-1392
[BUG] App label filter not working in Colab I ran through the [quickstart](https://colab.research.google.com/github/voxel51/fiftyone-examples/blob/master/examples/quickstart.ipynb) in Colab (using `fiftyone==0.14.0`) and found an App error when trying to use a `label` filter in the App. The error also occurred when using the same label filter from the expanded modal. However, other filters such as confidence and the alternate low-choices mode of the label filter were working as expected, so I believe this is strictly related to the autocomplete-style filter UI. I cannot reproduce this error using `fiftyone==0.14.0` outside of colab. <img width="1156" alt="Screen Shot 2021-11-03 at 10 54 54 AM" src="https://user-images.githubusercontent.com/25985824/140086403-9f2ab519-c595-47a9-a3b3-a7b772ace06e.png">
[ { "content": "\"\"\"\nFiftyOne Tornado server.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport argparse\nfrom collections import defaultdict\nfrom datetime import date, datetime, timedelta\nimport math\nimport os\nimport traceback\n\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.iostream\nimport tornado.options\nimport tornado.web\nfrom tornado.web import HTTPError\nimport tornado.websocket\n\nimport eta.core.serial as etas\n\nif os.environ.get(\"FIFTYONE_DISABLE_SERVICES\", False):\n del os.environ[\"FIFTYONE_DISABLE_SERVICES\"]\n\nos.environ[\"FIFTYONE_SERVER\"] = \"1\"\n\nimport fiftyone as fo\nimport fiftyone.core.aggregations as foa\nimport fiftyone.constants as foc\nimport fiftyone.core.clips as focl\nfrom fiftyone.core.expressions import ViewField as F, _escape_regex_chars\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.fields as fof\nimport fiftyone.core.labels as fol\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.stages import _STAGES\nimport fiftyone.core.stages as fosg\nimport fiftyone.core.state as fos\nimport fiftyone.core.uid as fou\nimport fiftyone.core.utils as fout\nimport fiftyone.core.view as fov\n\nfrom fiftyone.server.colorscales import ColorscalesHandler\nfrom fiftyone.server.extended_view import get_extended_view, get_view_field\nfrom fiftyone.server.json_util import convert, FiftyOneJSONEncoder\nimport fiftyone.server.utils as fosu\n\n\ndb = foo.get_async_db_conn()\n_notebook_clients = {}\n_deactivated_clients = set()\n_DISCONNECT_TIMEOUT = 1 # seconds\n_DEFAULT_NUM_HISTOGRAM_BINS = 25\n_LIST_LIMIT = 200\n\n\nclass RequestHandler(tornado.web.RequestHandler):\n \"\"\"\"Base class for HTTP request handlers\"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n self.write(self.get_response())\n\n def get_response(self):\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n raise NotImplementedError(\"subclass must implement get_response()\")\n\n\nclass FiftyOneHandler(RequestHandler):\n \"\"\"Returns the version info of the fiftyone being used\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n uid, _ = fou.get_user_id()\n isfile = os.path.isfile(foc.TEAMS_PATH)\n if isfile:\n submitted = etas.load_json(foc.TEAMS_PATH)[\"submitted\"]\n else:\n submitted = False\n\n return {\n \"version\": foc.VERSION,\n \"user_id\": uid,\n \"do_not_track\": fo.config.do_not_track,\n \"teams\": {\"submitted\": submitted, \"minimized\": isfile},\n \"dev_install\": foc.DEV_INSTALL or foc.RC_INSTALL,\n }\n\n\nclass NotebookHandler(RequestHandler):\n \"\"\"Check that the requested handle exists on the server\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n\n response = self.get_response(handle_id)\n if response is None:\n raise tornado.web.HTTPError(status_code=404)\n\n self.write(response)\n\n @staticmethod\n def get_response(handle):\n \"\"\"Returns if the notebook handle exists on the server.\n\n Returns:\n the handle ID\n \"\"\"\n global _notebook_clients\n if handle in set(_notebook_clients.values()):\n return {\"exists\": True}\n\n\nclass ReactivateHandler(RequestHandler):\n \"\"\"Reactivates an IPython display handle\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n self.write(self.get_response(handle_id))\n\n @staticmethod\n def get_response(handle_id):\n \"\"\"Returns on success\n\n Args:\n handle_id: a handle uuid\n \"\"\"\n StateHandler.state[\"active_handle\"] = handle_id\n global _deactivated_clients\n _deactivated_clients.discard(handle_id)\n for client in StateHandler.clients:\n client.write_message({\"type\": \"reactivate\", \"handle\": handle_id})\n\n return {}\n\n\nclass StagesHandler(RequestHandler):\n \"\"\"Returns the definitions of stages available to the App\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n return {\n \"stages\": [\n {\"name\": stage.__name__, \"params\": stage._params()}\n for stage in _STAGES\n ]\n }\n\n\nclass FramesHandler(tornado.web.RequestHandler):\n \"\"\"Frames stream requests\"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n sample_id = self.get_argument(\"sampleId\", None)\n # pylint: disable=no-value-for-parameter\n start_frame = int(self.get_argument(\"frameNumber\"))\n # pylint: disable=no-value-for-parameter\n frame_count = int(self.get_argument(\"frameCount\"))\n\n if sample_id is None or start_frame is None:\n raise ValueError(\"error\")\n\n end_frame = min(\n # pylint: disable=no-value-for-parameter\n int(self.get_argument(\"numFrames\")) + start_frame,\n frame_count,\n )\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n\n view = fov.make_optimized_select_view(view, sample_id)\n view = view.set_field(\n \"frames\",\n F(\"frames\").filter(\n (F(\"frame_number\") >= start_frame)\n & (F(\"frame_number\") <= end_frame)\n ),\n )\n\n frames = await foo.aggregate(\n StateHandler.sample_collection(), view._pipeline(frames_only=True)\n ).to_list(end_frame - start_frame + 1)\n convert(frames)\n self.write({\"frames\": frames, \"range\": [start_frame, end_frame]})\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\"Page requests\n\n Args:\n page: the page number\n page_length (20): the number of items to return\n \"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n page = int(self.get_argument(\"page\", 1))\n page_length = int(self.get_argument(\"page_length\", 20))\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n self.write({\"results\": [], \"more\": False})\n return\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n if view.media_type == fom.VIDEO:\n if isinstance(view, focl.ClipsView):\n expr = F(\"frame_number\") == F(\"$support\")[0]\n else:\n expr = F(\"frame_number\") == 1\n\n view = view.set_field(\"frames\", F(\"frames\").filter(expr))\n\n view = view.skip((page - 1) * page_length)\n\n samples = await foo.aggregate(\n StateHandler.sample_collection(),\n view._pipeline(attach_frames=True, detach_frames=False),\n ).to_list(page_length + 1)\n convert(samples)\n\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n more = page + 1\n\n results = [{\"sample\": s} for s in samples]\n metadata = {}\n\n for r in results:\n filepath = r[\"sample\"][\"filepath\"]\n if filepath not in metadata:\n metadata[filepath] = fosu.read_metadata(\n filepath, r[\"sample\"].get(\"metadata\", None)\n )\n\n r.update(metadata[filepath])\n\n self.write({\"results\": results, \"more\": more})\n\n\nclass TeamsHandler(RequestHandler):\n \"\"\"Returns whether the teams button should be minimized\"\"\"\n\n def post(self):\n submitted = self.get_argument(\"submitted\", \"\") == \"true\"\n etas.write_json({\"submitted\": submitted}, foc.TEAMS_PATH)\n\n\ndef _catch_errors(func):\n async def wrapper(self, *args, **kwargs):\n try:\n StateHandler.prev_state = StateHandler.state\n result = await func(self, *args, **kwargs)\n return result\n except Exception:\n StateHandler.state = StateHandler.prev_state\n clients = list(StateHandler.clients)\n if isinstance(self, PollingHandler):\n clients.append(self)\n\n for client in clients:\n client.write_message(\n {\n \"type\": \"notification\",\n \"kind\": \"Server Error\",\n \"message\": (\n \"An exception has been raised by the server. Your session \"\n \"has been reverted to its previous state.\"\n ),\n \"session_items\": [traceback.format_exc()],\n \"app_items\": [\n \"A traceback has been printed to your Python shell.\"\n ],\n }\n )\n\n return wrapper\n\n\nclass PollingHandler(tornado.web.RequestHandler):\n\n clients = defaultdict(set)\n screenshots = {}\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n\n @staticmethod\n def gather_messages(client):\n messages = [\n {\"type\": message} for message in PollingHandler.clients[client]\n ]\n PollingHandler.clients[client].clear()\n return messages\n\n @_catch_errors\n async def get(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n if client not in PollingHandler.clients:\n PollingHandler.clients[client].add(\"update\")\n PollingHandler.clients[client].add(\"statistics\")\n PollingHandler.clients[client].add(\"extended_statistics\")\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n\n @_catch_errors\n async def post(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n # pylint: disable=no-value-for-parameter\n mode = self.get_argument(\"mode\")\n message = StateHandler.loads(self.request.body)\n event = message.pop(\"type\")\n force_update = False\n if mode == \"push\":\n if event == \"as_app\":\n if message[\"notebook\"]:\n message[\"ignore\"] = client\n global _notebook_clients\n global _deactivated_clients\n StateHandler.state[\"active_handle\"] = message[\"handle\"]\n _deactivated_clients.discard(message[\"handle\"])\n _notebook_clients[client] = message[\"handle\"]\n event = \"update\"\n force_update = True\n message = {\"state\": StateHandler.state}\n\n if event in {\n \"distinct\",\n \"distributions\",\n \"get_video_data\",\n \"all_tags\",\n \"selected_statistics\",\n \"tag_modal\",\n \"modal_statistics\",\n \"tag_statistics\",\n }:\n caller = self\n elif event in {\"capture\", \"update\"}:\n caller = client\n else:\n caller = StateHandler\n\n if event == \"refresh\":\n message[\"polling_client\"] = client\n\n if event == \"update\" and not force_update:\n message[\"ignore_polling_client\"] = client\n\n handle = getattr(StateHandler, \"on_%s\" % event)\n await handle(caller, **message)\n\n if caller == self:\n return\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n return\n\n if event == \"update\":\n self.write_message({\"type\": \"update\", \"state\": StateHandler.state})\n\n elif event == \"deactivate\":\n self.write_message({\"type\": \"deactivate\"})\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if event == \"statistics\":\n await StateHandler.send_statistics(\n view, extended=False, filters=state.filters, only=self\n )\n\n elif event == \"extended_statistics\":\n await StateHandler.send_statistics(\n view, extended=True, filters=state.filters, only=self\n )\n\n def write_message(self, message):\n message = StateHandler.dumps(message)\n self.write(message)\n\n\nclass StateHandler(tornado.websocket.WebSocketHandler):\n \"\"\"WebSocket handler for bi-directional state communication.\n\n Attributes:\n app_clients: active App clients\n clients: active clients\n state: the current a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n prev_state: the previous a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n \"\"\"\n\n app_clients = set()\n clients = set()\n state = fos.StateDescription().serialize()\n prev_state = fos.StateDescription().serialize()\n\n @staticmethod\n def dumps(data):\n \"\"\"Serializes data to a JSON formatted :class:`str`.\n\n Args:\n data: serializable object\n\n Returns:\n :class:`str`\n \"\"\"\n return FiftyOneJSONEncoder.dumps(data)\n\n @staticmethod\n def loads(data):\n \"\"\"Deserialized data to an object.\n\n Args:\n data: :class:`str`, :class:`bytes`, or :class:`bytearray`\n\n Returns:\n an object\n \"\"\"\n return FiftyOneJSONEncoder.loads(data)\n\n @staticmethod\n def sample_collection():\n \"\"\"Getter for the current sample collection.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n return db[dataset._sample_collection_name]\n\n def write_message(self, message):\n \"\"\"Writes a message to the client.\n\n Args:\n message: a serializable object\n \"\"\"\n if message is None:\n return\n message = self.dumps(message)\n return super().write_message(message)\n\n def check_origin(self, origin):\n \"\"\"Accepts all origins.\n\n Returns:\n True\n \"\"\"\n return True\n\n def open(self):\n \"\"\"On open, add the client to the active clients set, and write the\n current state to the new client.\n \"\"\"\n StateHandler.clients.add(self)\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state}, only=self\n )\n\n def on_close(self):\n \"\"\"On close, remove the client from the active clients set, and\n active App clients set (if applicable).\n \"\"\"\n StateHandler.clients.remove(self)\n StateHandler.app_clients.discard(self)\n\n async def close_wait():\n await asyncio.sleep(_DISCONNECT_TIMEOUT)\n if not StateHandler.app_clients:\n _write_message({\"type\": \"close\"}, session=True)\n\n tornado.ioloop.IOLoop.current().add_callback(close_wait)\n\n @_catch_errors\n async def on_message(self, message):\n \"\"\"On message, call the associated event awaitable, with respect to\n the provided message type.\n\n Args:\n message: a serialized message\n \"\"\"\n message = self.loads(message)\n event = getattr(self, \"on_%s\" % message.pop(\"type\"))\n await event(self, **message)\n\n @staticmethod\n async def on_capture(self, src, width):\n global _notebook_clients\n _write_message(\n {\n \"type\": \"capture\",\n \"handle\": _notebook_clients[self],\n \"src\": src,\n \"width\": width,\n }\n )\n\n @staticmethod\n async def on_as_app(self, notebook=False, handle=None, ignore=None):\n \"\"\"Event for registering a client as an App.\"\"\"\n if isinstance(self, StateHandler):\n StateHandler.app_clients.add(self)\n\n global _notebook_clients\n if isinstance(self, StateHandler) and notebook:\n _notebook_clients[self] = handle\n\n if not isinstance(self, StateHandler):\n return\n\n awaitables = self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_refresh(self, polling_client=None):\n \"\"\"Event for refreshing an App client.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.refresh = not state.refresh\n StateHandler.state = state.serialize()\n\n if polling_client:\n PollingHandler.clients[polling_client].update(\n {\"update\", \"statistics\", \"extended_statistics\"}\n )\n else:\n awaitables = [self.send_updates(only=self)]\n awaitables += self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_filters_update(self, filters):\n \"\"\"Event for updating state filters. Sends an extended dataset\n statistics message to active App clients.\n\n Args:\n filters: a :class:`dict` mapping field path to a serialized\n :class:fiftyone.core.stages.Stage`\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.filters = filters\n state.selected_labels = []\n state.selected = []\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n StateHandler.state = state.serialize()\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\"})\n\n await self.send_statistics(view, filters=filters, extended=True)\n\n @staticmethod\n async def on_update(caller, state, ignore_polling_client=None):\n \"\"\"Event for state updates. Sends an update message to all active\n clients, and statistics messages to active App clients.\n\n Args:\n state: a serialized :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n StateHandler.state = fos.StateDescription.from_dict(state).serialize()\n active_handle = state[\"active_handle\"]\n global _notebook_clients\n global _deactivated_clients\n _deactivated_clients.discard(active_handle)\n\n # ignore deactivated notebook cells\n if (\n active_handle\n and caller in _notebook_clients\n and _notebook_clients[caller] != active_handle\n ):\n return\n\n for client, events in PollingHandler.clients.items():\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n\n # deactivate the last active colab cell\n if uuid != active_handle:\n events.clear()\n _deactivated_clients.add(uuid)\n events.add(\"deactivate\")\n continue\n\n if client == ignore_polling_client:\n events.update({\"statistics\", \"extended_statistics\"})\n\n events.update({\"update\", \"statistics\", \"extended_statistics\"})\n\n awaitables = [\n StateHandler.send_updates(),\n ]\n awaitables += StateHandler.get_statistics_awaitables()\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_set_selection(self, _ids):\n \"\"\"Event for setting the selected\n :class:`fiftyone.core.samples.Sample` _ids\n\n Args:\n _ids: a list of sample _id\n \"\"\"\n StateHandler.state[\"selected\"] = _ids\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_clear_selection(self):\n \"\"\"Event for clearing the currently selected sample _ids.\n\n Sends state updates to all active clients.\n \"\"\"\n StateHandler.state[\"selected\"] = []\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_selected_labels(self, selected_labels):\n \"\"\"Event for setting the entire selected objects list.\n\n Args:\n selected_labels: a list of selected labels\n \"\"\"\n if not isinstance(selected_labels, list):\n raise TypeError(\"selected_labels must be a list\")\n\n StateHandler.state[\"selected_labels\"] = selected_labels\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_dataset(self, dataset_name):\n \"\"\"Event for setting the current dataset by name.\n\n Args:\n dataset_name: the dataset name\n \"\"\"\n dataset = fod.load_dataset(dataset_name)\n config = fos.StateDescription.from_dict(StateHandler.state).config\n active_handle = StateHandler.state[\"active_handle\"]\n StateHandler.state = fos.StateDescription(\n dataset=dataset, config=config, active_handle=active_handle\n ).serialize()\n await self.on_update(self, StateHandler.state)\n\n @staticmethod\n async def on_tag(\n caller, changes, target_labels=False, active_labels=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n if state.selected:\n view = view.select(state.selected)\n\n if target_labels:\n fosu.change_label_tags(view, changes, label_fields=active_labels)\n else:\n fosu.change_sample_tags(view, changes)\n\n StateHandler.state[\"refresh\"] = not state.refresh\n for clients in PollingHandler.clients.values():\n clients.update({\"update\"})\n\n await StateHandler.on_update(caller, StateHandler.state)\n\n @staticmethod\n async def on_all_tags(caller, sample_id=None):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view._dataset\n else:\n view = state.dataset\n\n if view is None:\n label = []\n sample = []\n else:\n (_, tag_aggs,) = fos.DatasetStatistics.get_label_aggregations(view)\n results = await view._async_aggregate(\n [foa.Distinct(\"tags\")] + tag_aggs,\n )\n sample = results[0]\n\n label = set()\n for result in results[1:]:\n label |= set(result.keys())\n\n _write_message(\n {\"type\": \"all_tags\", \"sample\": sample, \"label\": label}, only=caller\n )\n\n @staticmethod\n async def on_modal_statistics(caller, sample_id, uuid, filters=None):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if filters is not None:\n view = get_extended_view(\n view, filters, count_labels_tags=False, only_matches=False\n )\n\n view = view.select(sample_id)\n\n aggregations = fos.DatasetStatistics(view, filters).aggregations\n\n results = await view._async_aggregate(aggregations)\n convert(results)\n\n data = []\n for agg, result in zip(aggregations, results):\n data.append(\n {\n \"_CLS\": agg.__class__.__name__,\n \"name\": agg.field_name,\n \"result\": result,\n }\n )\n\n message = {\"type\": \"modal_statistics\", \"stats\": data, \"uuid\": uuid}\n\n _write_message(message, app=True, only=caller)\n\n @staticmethod\n async def on_save_filters(caller, add_stages=[], with_selected=False):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n\n if with_selected:\n if state.selected:\n view = view.select(state.selected)\n elif state.selected_labels:\n view = view.select_labels(state.selected_labels)\n\n for d in add_stages:\n stage = fosg.ViewStage._from_dict(d)\n view = view.add_stage(stage)\n\n state.selected = []\n state.selected_labels = []\n state.view = view\n state.filters = {}\n\n await StateHandler.on_update(caller, state.serialize())\n\n @staticmethod\n async def on_tag_modal(\n caller,\n changes,\n sample_id=None,\n labels=False,\n filters={},\n active_labels=[],\n frame_number=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n sample_ids = [sample_id]\n view = get_extended_view(view, filters)\n\n if labels:\n if state.selected_labels:\n labels = state.selected_labels\n sample_ids = list({label[\"sample_id\"] for label in labels})\n tag_view = view.select_labels(labels=labels)\n else:\n tag_view = view.select(sample_id)\n\n fosu.change_label_tags(\n tag_view, changes, label_fields=active_labels\n )\n else:\n tag_view = view.select(sample_id)\n fosu.change_sample_tags(tag_view, changes)\n\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\", \"statistics\"})\n\n if isinstance(caller, PollingHandler):\n await StateHandler.send_samples(\n sample_id, sample_ids, current_frame=frame_number, only=caller\n )\n\n awaitables = [\n StateHandler.send_samples(\n sample_id, sample_ids, current_frame=frame_number\n )\n ]\n awaitables += StateHandler.get_statistics_awaitables()\n\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_tag_statistics(\n caller,\n active_labels=[],\n filters={},\n sample_id=None,\n uuid=None,\n labels=False,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, filters)\n\n if state.selected_labels and labels:\n view = view.select_labels(state.selected_labels)\n elif sample_id:\n view = view.select(sample_id)\n elif state.selected:\n view = view.select(state.selected)\n\n if labels:\n view = view.select_fields(active_labels)\n (\n count_aggs,\n tag_aggs,\n ) = fos.DatasetStatistics.get_label_aggregations(view)\n results = await view._async_aggregate(count_aggs + tag_aggs)\n\n count = sum(results[: len(count_aggs)])\n tags = defaultdict(int)\n for result in results[len(count_aggs) :]:\n for tag, num in result.items():\n tags[tag] += num\n else:\n tags = view.count_values(\"tags\")\n count = sum(tags.values())\n\n _write_message(\n {\n \"type\": \"tag_statistics\",\n \"count\": count,\n \"tags\": tags,\n \"uuid\": uuid,\n },\n only=caller,\n )\n\n @classmethod\n async def send_samples(\n cls, sample_id, sample_ids, current_frame=None, only=None\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n view = fov.make_optimized_select_view(view, sample_ids)\n\n if view.media_type == fom.VIDEO and current_frame is not None:\n default_filter = F(\"frame_number\") == 1\n current_filter = F(\"frame_number\").is_in([current_frame, 1])\n filter_frames = lambda f: F(\"frames\").filter(f)\n expr = F.if_else(\n F(view._get_db_fields_map()[\"id\"]).to_string() == sample_id,\n filter_frames(current_filter),\n filter_frames(default_filter),\n )\n view = view.set_field(\"frames\", expr)\n\n samples = await foo.aggregate(\n StateHandler.sample_collection(),\n view._pipeline(attach_frames=True, detach_frames=False),\n ).to_list(len(sample_ids))\n convert(samples)\n\n _write_message(\n {\"type\": \"samples_update\", \"samples\": samples}, app=True, only=only\n )\n\n @classmethod\n def get_statistics_awaitables(cls, only=None):\n \"\"\"Gets statistic awaitables that will send statistics to the relevant\n client(s) when executed\n\n Args:\n only (None): a client to restrict the messages to\n\n Returns:\n a list of coroutines\n \"\"\"\n if StateHandler.state[\"dataset\"] is None:\n return []\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n return [\n cls.send_statistics(\n view, extended=False, filters=state.filters, only=only,\n ),\n cls.send_statistics(\n view, extended=True, filters=state.filters, only=only\n ),\n ]\n\n @classmethod\n async def send_updates(cls, ignore=None, only=None):\n \"\"\"Sends an update event to the all clients, exluding the ignore\n client, if it is not None.\n\n Args:\n ignore (None): a client to not send the update to\n only (None): a client to restrict the updates to\n \"\"\"\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state},\n ignore=ignore,\n only=only,\n )\n\n @classmethod\n async def send_statistics(\n cls, view, extended=False, filters=None, only=None\n ):\n \"\"\"Sends a statistics event given using the provided view to all App\n clients, unless an only client is provided in which case it is only\n sent to the that client.\n\n Args:\n view: a view\n extended (False): whether to apply the extended view filters\n filters (None): filter stages to append to the view\n only (None): a client to restrict the message to\n \"\"\"\n base_view = view\n data = []\n if view is not None and (not extended or filters):\n if extended:\n view = get_extended_view(view, filters)\n\n aggregations = fos.DatasetStatistics(view, filters).aggregations\n results = await view._async_aggregate(aggregations)\n convert(results)\n\n for agg, result in zip(aggregations, results):\n data.append(\n {\n \"_CLS\": agg.__class__.__name__,\n \"name\": agg.field_name,\n \"result\": result,\n }\n )\n\n view = (\n base_view._serialize()\n if isinstance(base_view, fov.DatasetView)\n else []\n )\n\n message = {\n \"type\": \"statistics\",\n \"stats\": data,\n \"view\": view,\n \"filters\": filters,\n \"extended\": extended,\n }\n\n _write_message(message, app=True, only=only)\n\n @classmethod\n async def on_count_values(\n cls,\n self,\n path,\n uuid=None,\n selected=[],\n search=\"\",\n asc=False,\n count=True,\n limit=_LIST_LIMIT,\n sample_id=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n\n view = _get_search_view(view, path, search, selected)\n\n if sample_id is not None:\n view = view.select(sample_id)\n\n sort_by = \"count\" if count else \"_id\"\n\n count, first = await view._async_aggregate(\n foa.CountValues(path, _first=limit, _asc=asc, _sort_by=sort_by)\n )\n\n message = {\n \"type\": \"count_values\",\n \"count\": count,\n \"results\": first,\n \"uuid\": uuid,\n }\n _write_message(message, app=True, only=self)\n\n @classmethod\n async def on_distributions(cls, self, group):\n \"\"\"Sends distribution data with respect to a group to the requesting\n client.\n\n Args:\n group: the distribution group. Valid groups are 'labels', 'scalars',\n and 'tags'.\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n results = None\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n results = []\n\n view = get_extended_view(view, state.filters)\n\n if group == \"label tags\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.tags\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(aggs, fields, view)\n\n elif group == \"labels\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.label\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(aggs, fields, view)\n\n elif group == \"sample tags\" and results is None:\n aggs = [foa.CountValues(\"tags\", _first=_LIST_LIMIT)]\n try:\n fields = [view.get_field_schema()[\"tags\"]]\n results = await _gather_results(aggs, fields, view)\n except:\n results = []\n\n elif results is None:\n\n def filter(field):\n if field.name in {\"tags\", \"filepath\"} or field.name.startswith(\n \"_\"\n ):\n return None\n\n if fos._meets_type(field, (fof.BooleanField, fof.StringField)):\n return field.name\n\n return None\n\n aggs, fields = _count_values(filter, view)\n\n hist_aggs, hist_fields, ticks = await _numeric_histograms(\n view, view.get_field_schema()\n )\n aggs.extend(hist_aggs)\n fields.extend(hist_fields)\n results = await _gather_results(aggs, fields, view, ticks)\n\n results = sorted(results, key=lambda i: i[\"name\"])\n _write_message(\n {\"type\": \"distributions\", \"results\": results}, only=self\n )\n\n\ndef _label_filter(field):\n path = None\n if isinstance(field, fof.EmbeddedDocumentField) and issubclass(\n field.document_type, fol.Label\n ):\n path = field.name\n if issubclass(field.document_type, fol._HasLabelList):\n path = \"%s.%s\" % (path, field.document_type._LABEL_LIST_FIELD,)\n\n return path\n\n\ndef _get_search_view(view, path, search, selected):\n search = _escape_regex_chars(search)\n\n fields_map = view._get_db_fields_map()\n if search == \"\" and not selected:\n return view\n\n if \".\" in path:\n fields = path.split(\".\")\n if view.media_type == fom.VIDEO and fields[0] == \"frames\":\n field = \".\".join(fields[:2])\n else:\n field = fields[0]\n\n vf = F(\"label\")\n meth = lambda expr: view.filter_labels(field, expr)\n else:\n vf = get_view_field(fields_map, path)\n meth = view.match\n\n if search != \"\" and selected:\n expr = vf.re_match(search) & ~vf.is_in(selected)\n elif search != \"\":\n expr = vf.re_match(search)\n elif selected:\n expr = ~vf.is_in(selected)\n\n return meth(expr)\n\n\ndef _write_message(message, app=False, session=False, ignore=None, only=None):\n clients = StateHandler.app_clients if app else StateHandler.clients\n clients = _filter_deactivated_clients(clients)\n\n if only:\n only.write_message(message)\n return\n\n for client in clients:\n if session and client in StateHandler.app_clients:\n continue\n\n if client == ignore:\n continue\n\n client.write_message(message)\n\n\ndef _filter_deactivated_clients(clients):\n global _notebook_clients\n global _deactivated_clients\n active_handle = StateHandler.state[\"active_handle\"]\n\n filtered = []\n\n for client in clients:\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n if uuid != active_handle and uuid not in _deactivated_clients:\n _deactivated_clients.add(uuid)\n client.write_message({\"type\": \"deactivate\"})\n\n if uuid != active_handle:\n continue\n\n filtered.append(client)\n\n return filtered\n\n\ndef _create_histogram_key(field, start, end):\n if isinstance(field, (fof.DateField, fof.DateTimeField)):\n return fout.datetime_to_timestamp(start + ((end - start) / 2))\n\n return round((start + end) / 2, 4)\n\n\ndef _parse_histogram_values(result, field):\n counts, edges, other = result\n data = sorted(\n [\n {\n \"key\": _create_histogram_key(field, k, edges[idx + 1]),\n \"count\": v,\n \"edges\": (k, edges[idx + 1]),\n }\n for idx, (k, v) in enumerate(zip(edges, counts))\n ],\n key=lambda i: i[\"key\"],\n )\n if (\n fos._meets_type(field, fof.IntField)\n and len(data) == _DEFAULT_NUM_HISTOGRAM_BINS\n ):\n for bin_ in data:\n bin_[\"edges\"] = [math.ceil(e) for e in bin_[\"edges\"]]\n bin_[\"key\"] = math.ceil(bin_[\"key\"])\n elif fos._meets_type(field, fof.IntField):\n for bin_ in data:\n del bin_[\"edges\"]\n\n if other > 0:\n data.append({\"key\": \"None\", \"count\": other})\n\n return data\n\n\ndef _parse_count_values(result, field):\n return sorted(\n [{\"key\": k, \"count\": v} for k, v in result[1]],\n key=lambda i: i[\"count\"],\n reverse=True,\n )\n\n\nasync def _gather_results(aggs, fields, view, ticks=None):\n response = await view._async_aggregate(aggs)\n\n sorters = {\n foa.HistogramValues: _parse_histogram_values,\n foa.CountValues: _parse_count_values,\n }\n\n results = []\n for idx, (result, agg) in enumerate(zip(response, aggs)):\n field = fields[idx]\n try:\n type_ = field.document_type.__name__\n cls = field.document_type\n except:\n type_ = field.__class__.__name__\n cls = None\n\n name = agg.field_name\n if cls and issubclass(cls, fol.Label):\n if view.media_type == fom.VIDEO and name.startswith(\n view._FRAMES_PREFIX\n ):\n name = \"\".join(name.split(\".\")[:2])\n else:\n name = name.split(\".\")[0]\n\n data = sorters[type(agg)](result, field)\n result_ticks = 0\n if type(agg) == foa.HistogramValues:\n result_ticks = ticks.pop(0)\n if result_ticks is None:\n result_ticks = []\n step = max(len(data) // 4, 1)\n for i in range(0, len(data), step):\n result_ticks.append(data[i][\"key\"])\n\n if result[2] > 0 and len(data) and data[-1][\"key\"] != \"None\":\n result_ticks.append(\"None\")\n\n if data:\n results.append(\n {\n \"data\": data,\n \"name\": name,\n \"ticks\": result_ticks,\n \"type\": type_,\n }\n )\n\n return results\n\n\ndef _count_values(f, view):\n aggregations = []\n fields = []\n schemas = [(view.get_field_schema(), \"\")]\n if view.media_type == fom.VIDEO:\n schemas.append((view.get_frame_field_schema(), view._FRAMES_PREFIX))\n\n for schema, prefix in schemas:\n for field in schema.values():\n path = f(field)\n if path is None:\n continue\n\n fields.append(field)\n aggregations.append(\n foa.CountValues(\n \"%s%s\" % (prefix, path), _first=_LIST_LIMIT, _asc=False\n )\n )\n\n return aggregations, fields\n\n\ndef _numeric_bounds(paths):\n return [foa.Bounds(path) for path in paths]\n\n\nasync def _numeric_histograms(view, schema, prefix=\"\"):\n paths = []\n fields = []\n numerics = (fof.IntField, fof.FloatField, fof.DateField, fof.DateTimeField)\n for name, field in schema.items():\n if prefix != \"\" and name == \"frame_number\":\n continue\n\n if fos._meets_type(field, numerics):\n paths.append(\"%s%s\" % (prefix, name))\n fields.append(field)\n\n aggs = _numeric_bounds(paths)\n bounds = await view._async_aggregate(aggs)\n aggregations = []\n ticks = []\n for range_, field, path in zip(bounds, fields, paths):\n bins = _DEFAULT_NUM_HISTOGRAM_BINS\n num_ticks = None\n if range_[0] == range_[1]:\n bins = 1\n if range_[0] is None:\n range_ = [0, 1]\n\n if isinstance(range_[1], datetime):\n range_ = (range_[0], range_[1] + timedelta(milliseconds=1))\n elif isinstance(range_[1], date):\n range_ = (range_[0], range_[1] + timedelta(days=1))\n else:\n range_ = (range_[0], range_[1] + 1e-6)\n\n if fos._meets_type(field, fof.IntField):\n delta = range_[1] - range_[0]\n range_ = (range_[0] - 0.5, range_[1] + 0.5)\n if delta < _DEFAULT_NUM_HISTOGRAM_BINS:\n bins = delta + 1\n num_ticks = 0\n\n ticks.append(num_ticks)\n aggregations.append(foa.HistogramValues(path, bins=bins, range=range_))\n\n return aggregations, fields, ticks\n\n\nclass FileHandler(tornado.web.StaticFileHandler):\n def set_headers(self):\n super().set_headers()\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"GET, HEAD, OPTIONS\")\n self.set_header(\"content-length\", self.get_content_size())\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n def get_content_type(self):\n if self.absolute_path.endswith(\".js\"):\n return \"text/javascript\"\n\n return super().get_content_type()\n\n\nclass MediaHandler(FileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n if os.name != \"nt\":\n path = os.path.join(\"/\", path)\n\n return path\n\n def validate_absolute_path(self, root, absolute_path):\n if os.path.isdir(absolute_path) and self.default_filename is not None:\n if not self.request.path.endswith(\"/\"):\n self.redirect(self.request.path + \"/\", permanent=True)\n return None\n\n absolute_path = os.path.join(absolute_path, self.default_filename)\n if not os.path.exists(absolute_path):\n raise HTTPError(404)\n\n if not os.path.isfile(absolute_path):\n raise HTTPError(403, \"%s is not a file\", self.path)\n\n return absolute_path\n\n\nclass Application(tornado.web.Application):\n \"\"\"FiftyOne Tornado Application\"\"\"\n\n def __init__(self, **settings):\n server_path = os.path.dirname(os.path.abspath(__file__))\n rel_web_path = \"static\"\n web_path = os.path.join(server_path, rel_web_path)\n handlers = [\n (r\"/colorscales\", ColorscalesHandler),\n (r\"/fiftyone\", FiftyOneHandler),\n (r\"/frames\", FramesHandler),\n (r\"/filepath/(.*)\", MediaHandler, {\"path\": \"\"},),\n (r\"/notebook\", NotebookHandler),\n (r\"/page\", PageHandler),\n (r\"/polling\", PollingHandler),\n (r\"/reactivate\", ReactivateHandler),\n (r\"/stages\", StagesHandler),\n (r\"/state\", StateHandler),\n (r\"/teams\", TeamsHandler),\n (\n r\"/(.*)\",\n FileHandler,\n {\"path\": web_path, \"default_filename\": \"index.html\"},\n ),\n ]\n super().__init__(handlers, **settings)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=fo.config.default_app_port)\n parser.add_argument(\n \"--address\", type=str, default=fo.config.default_app_address\n )\n args = parser.parse_args()\n app = Application(debug=foc.DEV_INSTALL)\n app.listen(args.port, address=args.address)\n tornado.ioloop.IOLoop.current().start()\n", "path": "fiftyone/server/main.py" } ]
[ { "content": "\"\"\"\nFiftyOne Tornado server.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport argparse\nfrom collections import defaultdict\nfrom datetime import date, datetime, timedelta\nimport math\nimport os\nimport traceback\n\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.iostream\nimport tornado.options\nimport tornado.web\nfrom tornado.web import HTTPError\nimport tornado.websocket\n\nimport eta.core.serial as etas\n\nif os.environ.get(\"FIFTYONE_DISABLE_SERVICES\", False):\n del os.environ[\"FIFTYONE_DISABLE_SERVICES\"]\n\nos.environ[\"FIFTYONE_SERVER\"] = \"1\"\n\nimport fiftyone as fo\nimport fiftyone.core.aggregations as foa\nimport fiftyone.constants as foc\nimport fiftyone.core.clips as focl\nfrom fiftyone.core.expressions import ViewField as F, _escape_regex_chars\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.fields as fof\nimport fiftyone.core.labels as fol\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.stages import _STAGES\nimport fiftyone.core.stages as fosg\nimport fiftyone.core.state as fos\nimport fiftyone.core.uid as fou\nimport fiftyone.core.utils as fout\nimport fiftyone.core.view as fov\n\nfrom fiftyone.server.colorscales import ColorscalesHandler\nfrom fiftyone.server.extended_view import get_extended_view, get_view_field\nfrom fiftyone.server.json_util import convert, FiftyOneJSONEncoder\nimport fiftyone.server.utils as fosu\n\n\ndb = foo.get_async_db_conn()\n_notebook_clients = {}\n_deactivated_clients = set()\n_DISCONNECT_TIMEOUT = 1 # seconds\n_DEFAULT_NUM_HISTOGRAM_BINS = 25\n_LIST_LIMIT = 200\n\n\nclass RequestHandler(tornado.web.RequestHandler):\n \"\"\"\"Base class for HTTP request handlers\"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n self.write(self.get_response())\n\n def get_response(self):\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n raise NotImplementedError(\"subclass must implement get_response()\")\n\n\nclass FiftyOneHandler(RequestHandler):\n \"\"\"Returns the version info of the fiftyone being used\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n uid, _ = fou.get_user_id()\n isfile = os.path.isfile(foc.TEAMS_PATH)\n if isfile:\n submitted = etas.load_json(foc.TEAMS_PATH)[\"submitted\"]\n else:\n submitted = False\n\n return {\n \"version\": foc.VERSION,\n \"user_id\": uid,\n \"do_not_track\": fo.config.do_not_track,\n \"teams\": {\"submitted\": submitted, \"minimized\": isfile},\n \"dev_install\": foc.DEV_INSTALL or foc.RC_INSTALL,\n }\n\n\nclass NotebookHandler(RequestHandler):\n \"\"\"Check that the requested handle exists on the server\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n\n response = self.get_response(handle_id)\n if response is None:\n raise tornado.web.HTTPError(status_code=404)\n\n self.write(response)\n\n @staticmethod\n def get_response(handle):\n \"\"\"Returns if the notebook handle exists on the server.\n\n Returns:\n the handle ID\n \"\"\"\n global _notebook_clients\n if handle in set(_notebook_clients.values()):\n return {\"exists\": True}\n\n\nclass ReactivateHandler(RequestHandler):\n \"\"\"Reactivates an IPython display handle\"\"\"\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n handle_id = self.get_argument(\"handleId\")\n self.write(self.get_response(handle_id))\n\n @staticmethod\n def get_response(handle_id):\n \"\"\"Returns on success\n\n Args:\n handle_id: a handle uuid\n \"\"\"\n StateHandler.state[\"active_handle\"] = handle_id\n global _deactivated_clients\n _deactivated_clients.discard(handle_id)\n for client in StateHandler.clients:\n client.write_message({\"type\": \"reactivate\", \"handle\": handle_id})\n\n return {}\n\n\nclass StagesHandler(RequestHandler):\n \"\"\"Returns the definitions of stages available to the App\"\"\"\n\n @staticmethod\n def get_response():\n \"\"\"Returns the serializable response\n\n Returns:\n dict\n \"\"\"\n return {\n \"stages\": [\n {\"name\": stage.__name__, \"params\": stage._params()}\n for stage in _STAGES\n ]\n }\n\n\nclass FramesHandler(tornado.web.RequestHandler):\n \"\"\"Frames stream requests\"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n sample_id = self.get_argument(\"sampleId\", None)\n # pylint: disable=no-value-for-parameter\n start_frame = int(self.get_argument(\"frameNumber\"))\n # pylint: disable=no-value-for-parameter\n frame_count = int(self.get_argument(\"frameCount\"))\n\n if sample_id is None or start_frame is None:\n raise ValueError(\"error\")\n\n end_frame = min(\n # pylint: disable=no-value-for-parameter\n int(self.get_argument(\"numFrames\")) + start_frame,\n frame_count,\n )\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n\n view = fov.make_optimized_select_view(view, sample_id)\n view = view.set_field(\n \"frames\",\n F(\"frames\").filter(\n (F(\"frame_number\") >= start_frame)\n & (F(\"frame_number\") <= end_frame)\n ),\n )\n\n frames = await foo.aggregate(\n StateHandler.sample_collection(), view._pipeline(frames_only=True)\n ).to_list(end_frame - start_frame + 1)\n convert(frames)\n self.write({\"frames\": frames, \"range\": [start_frame, end_frame]})\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\"Page requests\n\n Args:\n page: the page number\n page_length (20): the number of items to return\n \"\"\"\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n async def get(self):\n # pylint: disable=no-value-for-parameter\n page = int(self.get_argument(\"page\", 1))\n page_length = int(self.get_argument(\"page_length\", 20))\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n self.write({\"results\": [], \"more\": False})\n return\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n if view.media_type == fom.VIDEO:\n if isinstance(view, focl.ClipsView):\n expr = F(\"frame_number\") == F(\"$support\")[0]\n else:\n expr = F(\"frame_number\") == 1\n\n view = view.set_field(\"frames\", F(\"frames\").filter(expr))\n\n view = view.skip((page - 1) * page_length)\n\n samples = await foo.aggregate(\n StateHandler.sample_collection(),\n view._pipeline(attach_frames=True, detach_frames=False),\n ).to_list(page_length + 1)\n convert(samples)\n\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n more = page + 1\n\n results = [{\"sample\": s} for s in samples]\n metadata = {}\n\n for r in results:\n filepath = r[\"sample\"][\"filepath\"]\n if filepath not in metadata:\n metadata[filepath] = fosu.read_metadata(\n filepath, r[\"sample\"].get(\"metadata\", None)\n )\n\n r.update(metadata[filepath])\n\n self.write({\"results\": results, \"more\": more})\n\n\nclass TeamsHandler(RequestHandler):\n \"\"\"Returns whether the teams button should be minimized\"\"\"\n\n def post(self):\n submitted = self.get_argument(\"submitted\", \"\") == \"true\"\n etas.write_json({\"submitted\": submitted}, foc.TEAMS_PATH)\n\n\ndef _catch_errors(func):\n async def wrapper(self, *args, **kwargs):\n try:\n StateHandler.prev_state = StateHandler.state\n result = await func(self, *args, **kwargs)\n return result\n except Exception:\n StateHandler.state = StateHandler.prev_state\n clients = list(StateHandler.clients)\n if isinstance(self, PollingHandler):\n clients.append(self)\n\n for client in clients:\n client.write_message(\n {\n \"type\": \"notification\",\n \"kind\": \"Server Error\",\n \"message\": (\n \"An exception has been raised by the server. Your session \"\n \"has been reverted to its previous state.\"\n ),\n \"session_items\": [traceback.format_exc()],\n \"app_items\": [\n \"A traceback has been printed to your Python shell.\"\n ],\n }\n )\n\n return wrapper\n\n\nclass PollingHandler(tornado.web.RequestHandler):\n\n clients = defaultdict(set)\n screenshots = {}\n\n def set_default_headers(self, *args, **kwargs):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n\n @staticmethod\n def gather_messages(client):\n messages = [\n {\"type\": message} for message in PollingHandler.clients[client]\n ]\n PollingHandler.clients[client].clear()\n return messages\n\n @_catch_errors\n async def get(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n if client not in PollingHandler.clients:\n PollingHandler.clients[client].add(\"update\")\n PollingHandler.clients[client].add(\"statistics\")\n PollingHandler.clients[client].add(\"extended_statistics\")\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n\n @_catch_errors\n async def post(self):\n # pylint: disable=no-value-for-parameter\n client = self.get_argument(\"sessionId\")\n # pylint: disable=no-value-for-parameter\n mode = self.get_argument(\"mode\")\n message = StateHandler.loads(self.request.body)\n event = message.pop(\"type\")\n force_update = False\n if mode == \"push\":\n if event == \"as_app\":\n if message[\"notebook\"]:\n message[\"ignore\"] = client\n global _notebook_clients\n global _deactivated_clients\n StateHandler.state[\"active_handle\"] = message[\"handle\"]\n _deactivated_clients.discard(message[\"handle\"])\n _notebook_clients[client] = message[\"handle\"]\n event = \"update\"\n force_update = True\n message = {\"state\": StateHandler.state}\n\n if event in {\n \"count_values\",\n \"distinct\",\n \"distributions\",\n \"get_video_data\",\n \"all_tags\",\n \"selected_statistics\",\n \"tag_modal\",\n \"modal_statistics\",\n \"tag_statistics\",\n }:\n caller = self\n elif event in {\"capture\", \"update\"}:\n caller = client\n else:\n caller = StateHandler\n\n if event == \"refresh\":\n message[\"polling_client\"] = client\n\n if event == \"update\" and not force_update:\n message[\"ignore_polling_client\"] = client\n\n handle = getattr(StateHandler, \"on_%s\" % event)\n await handle(caller, **message)\n\n if caller == self:\n return\n\n messages = self.gather_messages(client)\n self.write_message({\"messages\": messages})\n return\n\n if event == \"update\":\n self.write_message({\"type\": \"update\", \"state\": StateHandler.state})\n\n elif event == \"deactivate\":\n self.write_message({\"type\": \"deactivate\"})\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if event == \"statistics\":\n await StateHandler.send_statistics(\n view, extended=False, filters=state.filters, only=self\n )\n\n elif event == \"extended_statistics\":\n await StateHandler.send_statistics(\n view, extended=True, filters=state.filters, only=self\n )\n\n def write_message(self, message):\n message = StateHandler.dumps(message)\n self.write(message)\n\n\nclass StateHandler(tornado.websocket.WebSocketHandler):\n \"\"\"WebSocket handler for bi-directional state communication.\n\n Attributes:\n app_clients: active App clients\n clients: active clients\n state: the current a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n prev_state: the previous a serialized\n :class:`fiftyone.core.state.StateDescription`, serialized\n \"\"\"\n\n app_clients = set()\n clients = set()\n state = fos.StateDescription().serialize()\n prev_state = fos.StateDescription().serialize()\n\n @staticmethod\n def dumps(data):\n \"\"\"Serializes data to a JSON formatted :class:`str`.\n\n Args:\n data: serializable object\n\n Returns:\n :class:`str`\n \"\"\"\n return FiftyOneJSONEncoder.dumps(data)\n\n @staticmethod\n def loads(data):\n \"\"\"Deserialized data to an object.\n\n Args:\n data: :class:`str`, :class:`bytes`, or :class:`bytearray`\n\n Returns:\n an object\n \"\"\"\n return FiftyOneJSONEncoder.loads(data)\n\n @staticmethod\n def sample_collection():\n \"\"\"Getter for the current sample collection.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n dataset = state.view._dataset\n else:\n dataset = state.dataset\n\n return db[dataset._sample_collection_name]\n\n def write_message(self, message):\n \"\"\"Writes a message to the client.\n\n Args:\n message: a serializable object\n \"\"\"\n if message is None:\n return\n message = self.dumps(message)\n return super().write_message(message)\n\n def check_origin(self, origin):\n \"\"\"Accepts all origins.\n\n Returns:\n True\n \"\"\"\n return True\n\n def open(self):\n \"\"\"On open, add the client to the active clients set, and write the\n current state to the new client.\n \"\"\"\n StateHandler.clients.add(self)\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state}, only=self\n )\n\n def on_close(self):\n \"\"\"On close, remove the client from the active clients set, and\n active App clients set (if applicable).\n \"\"\"\n StateHandler.clients.remove(self)\n StateHandler.app_clients.discard(self)\n\n async def close_wait():\n await asyncio.sleep(_DISCONNECT_TIMEOUT)\n if not StateHandler.app_clients:\n _write_message({\"type\": \"close\"}, session=True)\n\n tornado.ioloop.IOLoop.current().add_callback(close_wait)\n\n @_catch_errors\n async def on_message(self, message):\n \"\"\"On message, call the associated event awaitable, with respect to\n the provided message type.\n\n Args:\n message: a serialized message\n \"\"\"\n message = self.loads(message)\n event = getattr(self, \"on_%s\" % message.pop(\"type\"))\n await event(self, **message)\n\n @staticmethod\n async def on_capture(self, src, width):\n global _notebook_clients\n _write_message(\n {\n \"type\": \"capture\",\n \"handle\": _notebook_clients[self],\n \"src\": src,\n \"width\": width,\n }\n )\n\n @staticmethod\n async def on_as_app(self, notebook=False, handle=None, ignore=None):\n \"\"\"Event for registering a client as an App.\"\"\"\n if isinstance(self, StateHandler):\n StateHandler.app_clients.add(self)\n\n global _notebook_clients\n if isinstance(self, StateHandler) and notebook:\n _notebook_clients[self] = handle\n\n if not isinstance(self, StateHandler):\n return\n\n awaitables = self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_refresh(self, polling_client=None):\n \"\"\"Event for refreshing an App client.\"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.refresh = not state.refresh\n StateHandler.state = state.serialize()\n\n if polling_client:\n PollingHandler.clients[polling_client].update(\n {\"update\", \"statistics\", \"extended_statistics\"}\n )\n else:\n awaitables = [self.send_updates(only=self)]\n awaitables += self.get_statistics_awaitables(only=self)\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_filters_update(self, filters):\n \"\"\"Event for updating state filters. Sends an extended dataset\n statistics message to active App clients.\n\n Args:\n filters: a :class:`dict` mapping field path to a serialized\n :class:fiftyone.core.stages.Stage`\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n state.filters = filters\n state.selected_labels = []\n state.selected = []\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n StateHandler.state = state.serialize()\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\"})\n\n await self.send_statistics(view, filters=filters, extended=True)\n\n @staticmethod\n async def on_update(caller, state, ignore_polling_client=None):\n \"\"\"Event for state updates. Sends an update message to all active\n clients, and statistics messages to active App clients.\n\n Args:\n state: a serialized :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n StateHandler.state = fos.StateDescription.from_dict(state).serialize()\n active_handle = state[\"active_handle\"]\n global _notebook_clients\n global _deactivated_clients\n _deactivated_clients.discard(active_handle)\n\n # ignore deactivated notebook cells\n if (\n active_handle\n and caller in _notebook_clients\n and _notebook_clients[caller] != active_handle\n ):\n return\n\n for client, events in PollingHandler.clients.items():\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n\n # deactivate the last active colab cell\n if uuid != active_handle:\n events.clear()\n _deactivated_clients.add(uuid)\n events.add(\"deactivate\")\n continue\n\n if client == ignore_polling_client:\n events.update({\"statistics\", \"extended_statistics\"})\n\n events.update({\"update\", \"statistics\", \"extended_statistics\"})\n\n awaitables = [\n StateHandler.send_updates(),\n ]\n awaitables += StateHandler.get_statistics_awaitables()\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_set_selection(self, _ids):\n \"\"\"Event for setting the selected\n :class:`fiftyone.core.samples.Sample` _ids\n\n Args:\n _ids: a list of sample _id\n \"\"\"\n StateHandler.state[\"selected\"] = _ids\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_clear_selection(self):\n \"\"\"Event for clearing the currently selected sample _ids.\n\n Sends state updates to all active clients.\n \"\"\"\n StateHandler.state[\"selected\"] = []\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_selected_labels(self, selected_labels):\n \"\"\"Event for setting the entire selected objects list.\n\n Args:\n selected_labels: a list of selected labels\n \"\"\"\n if not isinstance(selected_labels, list):\n raise TypeError(\"selected_labels must be a list\")\n\n StateHandler.state[\"selected_labels\"] = selected_labels\n await self.send_updates(ignore=self)\n\n @staticmethod\n async def on_set_dataset(self, dataset_name):\n \"\"\"Event for setting the current dataset by name.\n\n Args:\n dataset_name: the dataset name\n \"\"\"\n dataset = fod.load_dataset(dataset_name)\n config = fos.StateDescription.from_dict(StateHandler.state).config\n active_handle = StateHandler.state[\"active_handle\"]\n StateHandler.state = fos.StateDescription(\n dataset=dataset, config=config, active_handle=active_handle\n ).serialize()\n await self.on_update(self, StateHandler.state)\n\n @staticmethod\n async def on_tag(\n caller, changes, target_labels=False, active_labels=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n if state.selected:\n view = view.select(state.selected)\n\n if target_labels:\n fosu.change_label_tags(view, changes, label_fields=active_labels)\n else:\n fosu.change_sample_tags(view, changes)\n\n StateHandler.state[\"refresh\"] = not state.refresh\n for clients in PollingHandler.clients.values():\n clients.update({\"update\"})\n\n await StateHandler.on_update(caller, StateHandler.state)\n\n @staticmethod\n async def on_all_tags(caller, sample_id=None):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view._dataset\n else:\n view = state.dataset\n\n if view is None:\n label = []\n sample = []\n else:\n (_, tag_aggs,) = fos.DatasetStatistics.get_label_aggregations(view)\n results = await view._async_aggregate(\n [foa.Distinct(\"tags\")] + tag_aggs,\n )\n sample = results[0]\n\n label = set()\n for result in results[1:]:\n label |= set(result.keys())\n\n _write_message(\n {\"type\": \"all_tags\", \"sample\": sample, \"label\": label}, only=caller\n )\n\n @staticmethod\n async def on_modal_statistics(caller, sample_id, uuid, filters=None):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n if filters is not None:\n view = get_extended_view(\n view, filters, count_labels_tags=False, only_matches=False\n )\n\n view = view.select(sample_id)\n\n aggregations = fos.DatasetStatistics(view, filters).aggregations\n\n results = await view._async_aggregate(aggregations)\n convert(results)\n\n data = []\n for agg, result in zip(aggregations, results):\n data.append(\n {\n \"_CLS\": agg.__class__.__name__,\n \"name\": agg.field_name,\n \"result\": result,\n }\n )\n\n message = {\"type\": \"modal_statistics\", \"stats\": data, \"uuid\": uuid}\n\n _write_message(message, app=True, only=caller)\n\n @staticmethod\n async def on_save_filters(caller, add_stages=[], with_selected=False):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters)\n\n if with_selected:\n if state.selected:\n view = view.select(state.selected)\n elif state.selected_labels:\n view = view.select_labels(state.selected_labels)\n\n for d in add_stages:\n stage = fosg.ViewStage._from_dict(d)\n view = view.add_stage(stage)\n\n state.selected = []\n state.selected_labels = []\n state.view = view\n state.filters = {}\n\n await StateHandler.on_update(caller, state.serialize())\n\n @staticmethod\n async def on_tag_modal(\n caller,\n changes,\n sample_id=None,\n labels=False,\n filters={},\n active_labels=[],\n frame_number=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n sample_ids = [sample_id]\n view = get_extended_view(view, filters)\n\n if labels:\n if state.selected_labels:\n labels = state.selected_labels\n sample_ids = list({label[\"sample_id\"] for label in labels})\n tag_view = view.select_labels(labels=labels)\n else:\n tag_view = view.select(sample_id)\n\n fosu.change_label_tags(\n tag_view, changes, label_fields=active_labels\n )\n else:\n tag_view = view.select(sample_id)\n fosu.change_sample_tags(tag_view, changes)\n\n for clients in PollingHandler.clients.values():\n clients.update({\"extended_statistics\", \"statistics\"})\n\n if isinstance(caller, PollingHandler):\n await StateHandler.send_samples(\n sample_id, sample_ids, current_frame=frame_number, only=caller\n )\n\n awaitables = [\n StateHandler.send_samples(\n sample_id, sample_ids, current_frame=frame_number\n )\n ]\n awaitables += StateHandler.get_statistics_awaitables()\n\n asyncio.gather(*awaitables)\n\n @staticmethod\n async def on_tag_statistics(\n caller,\n active_labels=[],\n filters={},\n sample_id=None,\n uuid=None,\n labels=False,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, filters)\n\n if state.selected_labels and labels:\n view = view.select_labels(state.selected_labels)\n elif sample_id:\n view = view.select(sample_id)\n elif state.selected:\n view = view.select(state.selected)\n\n if labels:\n view = view.select_fields(active_labels)\n (\n count_aggs,\n tag_aggs,\n ) = fos.DatasetStatistics.get_label_aggregations(view)\n results = await view._async_aggregate(count_aggs + tag_aggs)\n\n count = sum(results[: len(count_aggs)])\n tags = defaultdict(int)\n for result in results[len(count_aggs) :]:\n for tag, num in result.items():\n tags[tag] += num\n else:\n tags = view.count_values(\"tags\")\n count = sum(tags.values())\n\n _write_message(\n {\n \"type\": \"tag_statistics\",\n \"count\": count,\n \"tags\": tags,\n \"uuid\": uuid,\n },\n only=caller,\n )\n\n @classmethod\n async def send_samples(\n cls, sample_id, sample_ids, current_frame=None, only=None\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n view = get_extended_view(view, state.filters, count_labels_tags=True)\n view = fov.make_optimized_select_view(view, sample_ids)\n\n if view.media_type == fom.VIDEO and current_frame is not None:\n default_filter = F(\"frame_number\") == 1\n current_filter = F(\"frame_number\").is_in([current_frame, 1])\n filter_frames = lambda f: F(\"frames\").filter(f)\n expr = F.if_else(\n F(view._get_db_fields_map()[\"id\"]).to_string() == sample_id,\n filter_frames(current_filter),\n filter_frames(default_filter),\n )\n view = view.set_field(\"frames\", expr)\n\n samples = await foo.aggregate(\n StateHandler.sample_collection(),\n view._pipeline(attach_frames=True, detach_frames=False),\n ).to_list(len(sample_ids))\n convert(samples)\n\n _write_message(\n {\"type\": \"samples_update\", \"samples\": samples}, app=True, only=only\n )\n\n @classmethod\n def get_statistics_awaitables(cls, only=None):\n \"\"\"Gets statistic awaitables that will send statistics to the relevant\n client(s) when executed\n\n Args:\n only (None): a client to restrict the messages to\n\n Returns:\n a list of coroutines\n \"\"\"\n if StateHandler.state[\"dataset\"] is None:\n return []\n\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n else:\n view = state.dataset\n\n return [\n cls.send_statistics(\n view, extended=False, filters=state.filters, only=only,\n ),\n cls.send_statistics(\n view, extended=True, filters=state.filters, only=only\n ),\n ]\n\n @classmethod\n async def send_updates(cls, ignore=None, only=None):\n \"\"\"Sends an update event to the all clients, exluding the ignore\n client, if it is not None.\n\n Args:\n ignore (None): a client to not send the update to\n only (None): a client to restrict the updates to\n \"\"\"\n _write_message(\n {\"type\": \"update\", \"state\": StateHandler.state},\n ignore=ignore,\n only=only,\n )\n\n @classmethod\n async def send_statistics(\n cls, view, extended=False, filters=None, only=None\n ):\n \"\"\"Sends a statistics event given using the provided view to all App\n clients, unless an only client is provided in which case it is only\n sent to the that client.\n\n Args:\n view: a view\n extended (False): whether to apply the extended view filters\n filters (None): filter stages to append to the view\n only (None): a client to restrict the message to\n \"\"\"\n base_view = view\n data = []\n if view is not None and (not extended or filters):\n if extended:\n view = get_extended_view(view, filters)\n\n aggregations = fos.DatasetStatistics(view, filters).aggregations\n results = await view._async_aggregate(aggregations)\n convert(results)\n\n for agg, result in zip(aggregations, results):\n data.append(\n {\n \"_CLS\": agg.__class__.__name__,\n \"name\": agg.field_name,\n \"result\": result,\n }\n )\n\n view = (\n base_view._serialize()\n if isinstance(base_view, fov.DatasetView)\n else []\n )\n\n message = {\n \"type\": \"statistics\",\n \"stats\": data,\n \"view\": view,\n \"filters\": filters,\n \"extended\": extended,\n }\n\n _write_message(message, app=True, only=only)\n\n @classmethod\n async def on_count_values(\n cls,\n self,\n path,\n uuid=None,\n selected=[],\n search=\"\",\n asc=False,\n count=True,\n limit=_LIST_LIMIT,\n sample_id=None,\n ):\n state = fos.StateDescription.from_dict(StateHandler.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n\n view = _get_search_view(view, path, search, selected)\n\n if sample_id is not None:\n view = view.select(sample_id)\n\n sort_by = \"count\" if count else \"_id\"\n\n count, first = await view._async_aggregate(\n foa.CountValues(path, _first=limit, _asc=asc, _sort_by=sort_by)\n )\n\n message = {\n \"type\": \"count_values\",\n \"count\": count,\n \"results\": first,\n \"uuid\": uuid,\n }\n _write_message(message, app=True, only=self)\n\n @classmethod\n async def on_distributions(cls, self, group):\n \"\"\"Sends distribution data with respect to a group to the requesting\n client.\n\n Args:\n group: the distribution group. Valid groups are 'labels', 'scalars',\n and 'tags'.\n \"\"\"\n state = fos.StateDescription.from_dict(StateHandler.state)\n results = None\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset\n else:\n results = []\n\n view = get_extended_view(view, state.filters)\n\n if group == \"label tags\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.tags\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(aggs, fields, view)\n\n elif group == \"labels\" and results is None:\n\n def filter(field):\n path = _label_filter(field)\n\n if path is not None:\n path = \"%s.label\" % path\n\n return path\n\n aggs, fields = _count_values(filter, view)\n results = await _gather_results(aggs, fields, view)\n\n elif group == \"sample tags\" and results is None:\n aggs = [foa.CountValues(\"tags\", _first=_LIST_LIMIT)]\n try:\n fields = [view.get_field_schema()[\"tags\"]]\n results = await _gather_results(aggs, fields, view)\n except:\n results = []\n\n elif results is None:\n\n def filter(field):\n if field.name in {\"tags\", \"filepath\"} or field.name.startswith(\n \"_\"\n ):\n return None\n\n if fos._meets_type(field, (fof.BooleanField, fof.StringField)):\n return field.name\n\n return None\n\n aggs, fields = _count_values(filter, view)\n\n hist_aggs, hist_fields, ticks = await _numeric_histograms(\n view, view.get_field_schema()\n )\n aggs.extend(hist_aggs)\n fields.extend(hist_fields)\n results = await _gather_results(aggs, fields, view, ticks)\n\n results = sorted(results, key=lambda i: i[\"name\"])\n _write_message(\n {\"type\": \"distributions\", \"results\": results}, only=self\n )\n\n\ndef _label_filter(field):\n path = None\n if isinstance(field, fof.EmbeddedDocumentField) and issubclass(\n field.document_type, fol.Label\n ):\n path = field.name\n if issubclass(field.document_type, fol._HasLabelList):\n path = \"%s.%s\" % (path, field.document_type._LABEL_LIST_FIELD,)\n\n return path\n\n\ndef _get_search_view(view, path, search, selected):\n search = _escape_regex_chars(search)\n\n fields_map = view._get_db_fields_map()\n if search == \"\" and not selected:\n return view\n\n if \".\" in path:\n fields = path.split(\".\")\n if view.media_type == fom.VIDEO and fields[0] == \"frames\":\n field = \".\".join(fields[:2])\n else:\n field = fields[0]\n\n vf = F(\"label\")\n meth = lambda expr: view.filter_labels(field, expr)\n else:\n vf = get_view_field(fields_map, path)\n meth = view.match\n\n if search != \"\" and selected:\n expr = vf.re_match(search) & ~vf.is_in(selected)\n elif search != \"\":\n expr = vf.re_match(search)\n elif selected:\n expr = ~vf.is_in(selected)\n\n return meth(expr)\n\n\ndef _write_message(message, app=False, session=False, ignore=None, only=None):\n clients = StateHandler.app_clients if app else StateHandler.clients\n clients = _filter_deactivated_clients(clients)\n\n if only:\n only.write_message(message)\n return\n\n for client in clients:\n if session and client in StateHandler.app_clients:\n continue\n\n if client == ignore:\n continue\n\n client.write_message(message)\n\n\ndef _filter_deactivated_clients(clients):\n global _notebook_clients\n global _deactivated_clients\n active_handle = StateHandler.state[\"active_handle\"]\n\n filtered = []\n\n for client in clients:\n if client in _notebook_clients:\n uuid = _notebook_clients[client]\n if uuid != active_handle and uuid not in _deactivated_clients:\n _deactivated_clients.add(uuid)\n client.write_message({\"type\": \"deactivate\"})\n\n if uuid != active_handle:\n continue\n\n filtered.append(client)\n\n return filtered\n\n\ndef _create_histogram_key(field, start, end):\n if isinstance(field, (fof.DateField, fof.DateTimeField)):\n return fout.datetime_to_timestamp(start + ((end - start) / 2))\n\n return round((start + end) / 2, 4)\n\n\ndef _parse_histogram_values(result, field):\n counts, edges, other = result\n data = sorted(\n [\n {\n \"key\": _create_histogram_key(field, k, edges[idx + 1]),\n \"count\": v,\n \"edges\": (k, edges[idx + 1]),\n }\n for idx, (k, v) in enumerate(zip(edges, counts))\n ],\n key=lambda i: i[\"key\"],\n )\n if (\n fos._meets_type(field, fof.IntField)\n and len(data) == _DEFAULT_NUM_HISTOGRAM_BINS\n ):\n for bin_ in data:\n bin_[\"edges\"] = [math.ceil(e) for e in bin_[\"edges\"]]\n bin_[\"key\"] = math.ceil(bin_[\"key\"])\n elif fos._meets_type(field, fof.IntField):\n for bin_ in data:\n del bin_[\"edges\"]\n\n if other > 0:\n data.append({\"key\": \"None\", \"count\": other})\n\n return data\n\n\ndef _parse_count_values(result, field):\n return sorted(\n [{\"key\": k, \"count\": v} for k, v in result[1]],\n key=lambda i: i[\"count\"],\n reverse=True,\n )\n\n\nasync def _gather_results(aggs, fields, view, ticks=None):\n response = await view._async_aggregate(aggs)\n\n sorters = {\n foa.HistogramValues: _parse_histogram_values,\n foa.CountValues: _parse_count_values,\n }\n\n results = []\n for idx, (result, agg) in enumerate(zip(response, aggs)):\n field = fields[idx]\n try:\n type_ = field.document_type.__name__\n cls = field.document_type\n except:\n type_ = field.__class__.__name__\n cls = None\n\n name = agg.field_name\n if cls and issubclass(cls, fol.Label):\n if view.media_type == fom.VIDEO and name.startswith(\n view._FRAMES_PREFIX\n ):\n name = \"\".join(name.split(\".\")[:2])\n else:\n name = name.split(\".\")[0]\n\n data = sorters[type(agg)](result, field)\n result_ticks = 0\n if type(agg) == foa.HistogramValues:\n result_ticks = ticks.pop(0)\n if result_ticks is None:\n result_ticks = []\n step = max(len(data) // 4, 1)\n for i in range(0, len(data), step):\n result_ticks.append(data[i][\"key\"])\n\n if result[2] > 0 and len(data) and data[-1][\"key\"] != \"None\":\n result_ticks.append(\"None\")\n\n if data:\n results.append(\n {\n \"data\": data,\n \"name\": name,\n \"ticks\": result_ticks,\n \"type\": type_,\n }\n )\n\n return results\n\n\ndef _count_values(f, view):\n aggregations = []\n fields = []\n schemas = [(view.get_field_schema(), \"\")]\n if view.media_type == fom.VIDEO:\n schemas.append((view.get_frame_field_schema(), view._FRAMES_PREFIX))\n\n for schema, prefix in schemas:\n for field in schema.values():\n path = f(field)\n if path is None:\n continue\n\n fields.append(field)\n aggregations.append(\n foa.CountValues(\n \"%s%s\" % (prefix, path), _first=_LIST_LIMIT, _asc=False\n )\n )\n\n return aggregations, fields\n\n\ndef _numeric_bounds(paths):\n return [foa.Bounds(path) for path in paths]\n\n\nasync def _numeric_histograms(view, schema, prefix=\"\"):\n paths = []\n fields = []\n numerics = (fof.IntField, fof.FloatField, fof.DateField, fof.DateTimeField)\n for name, field in schema.items():\n if prefix != \"\" and name == \"frame_number\":\n continue\n\n if fos._meets_type(field, numerics):\n paths.append(\"%s%s\" % (prefix, name))\n fields.append(field)\n\n aggs = _numeric_bounds(paths)\n bounds = await view._async_aggregate(aggs)\n aggregations = []\n ticks = []\n for range_, field, path in zip(bounds, fields, paths):\n bins = _DEFAULT_NUM_HISTOGRAM_BINS\n num_ticks = None\n if range_[0] == range_[1]:\n bins = 1\n if range_[0] is None:\n range_ = [0, 1]\n\n if isinstance(range_[1], datetime):\n range_ = (range_[0], range_[1] + timedelta(milliseconds=1))\n elif isinstance(range_[1], date):\n range_ = (range_[0], range_[1] + timedelta(days=1))\n else:\n range_ = (range_[0], range_[1] + 1e-6)\n\n if fos._meets_type(field, fof.IntField):\n delta = range_[1] - range_[0]\n range_ = (range_[0] - 0.5, range_[1] + 0.5)\n if delta < _DEFAULT_NUM_HISTOGRAM_BINS:\n bins = delta + 1\n num_ticks = 0\n\n ticks.append(num_ticks)\n aggregations.append(foa.HistogramValues(path, bins=bins, range=range_))\n\n return aggregations, fields, ticks\n\n\nclass FileHandler(tornado.web.StaticFileHandler):\n def set_headers(self):\n super().set_headers()\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header(\"Access-Control-Allow-Methods\", \"GET, HEAD, OPTIONS\")\n self.set_header(\"content-length\", self.get_content_size())\n self.set_header(\"x-colab-notebook-cache-control\", \"no-cache\")\n\n def get_content_type(self):\n if self.absolute_path.endswith(\".js\"):\n return \"text/javascript\"\n\n return super().get_content_type()\n\n\nclass MediaHandler(FileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n if os.name != \"nt\":\n path = os.path.join(\"/\", path)\n\n return path\n\n def validate_absolute_path(self, root, absolute_path):\n if os.path.isdir(absolute_path) and self.default_filename is not None:\n if not self.request.path.endswith(\"/\"):\n self.redirect(self.request.path + \"/\", permanent=True)\n return None\n\n absolute_path = os.path.join(absolute_path, self.default_filename)\n if not os.path.exists(absolute_path):\n raise HTTPError(404)\n\n if not os.path.isfile(absolute_path):\n raise HTTPError(403, \"%s is not a file\", self.path)\n\n return absolute_path\n\n\nclass Application(tornado.web.Application):\n \"\"\"FiftyOne Tornado Application\"\"\"\n\n def __init__(self, **settings):\n server_path = os.path.dirname(os.path.abspath(__file__))\n rel_web_path = \"static\"\n web_path = os.path.join(server_path, rel_web_path)\n handlers = [\n (r\"/colorscales\", ColorscalesHandler),\n (r\"/fiftyone\", FiftyOneHandler),\n (r\"/frames\", FramesHandler),\n (r\"/filepath/(.*)\", MediaHandler, {\"path\": \"\"},),\n (r\"/notebook\", NotebookHandler),\n (r\"/page\", PageHandler),\n (r\"/polling\", PollingHandler),\n (r\"/reactivate\", ReactivateHandler),\n (r\"/stages\", StagesHandler),\n (r\"/state\", StateHandler),\n (r\"/teams\", TeamsHandler),\n (\n r\"/(.*)\",\n FileHandler,\n {\"path\": web_path, \"default_filename\": \"index.html\"},\n ),\n ]\n super().__init__(handlers, **settings)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, default=fo.config.default_app_port)\n parser.add_argument(\n \"--address\", type=str, default=fo.config.default_app_address\n )\n args = parser.parse_args()\n app = Application(debug=foc.DEV_INSTALL)\n app.listen(args.port, address=args.address)\n tornado.ioloop.IOLoop.current().start()\n", "path": "fiftyone/server/main.py" } ]
diff --git a/fiftyone/server/main.py b/fiftyone/server/main.py index 0fd89313b25..439f83b4369 100644 --- a/fiftyone/server/main.py +++ b/fiftyone/server/main.py @@ -377,6 +377,7 @@ async def post(self): message = {"state": StateHandler.state} if event in { + "count_values", "distinct", "distributions", "get_video_data",
ansible__ansible-modules-core-3295
size parameter required set to be no when state is absent in os_volume ##### Issue Type: - Documentation Report ##### Plugin Name: os_volume ##### Ansible Version: ``` 2.4 ``` ##### Ansible Configuration: <!-- Please mention any settings you've changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). --> ##### Environment: centos 6 ##### Summary: def _absent_volume(module, cloud): try: cloud.delete_volume( name_or_id=module.params['display_name'], wait=module.params['wait'], timeout=module.params['timeout']) No need to add size parameter while calling os_volume as delete_volume function does not need size parameter . http://docs.ansible.com/ansible/os_volume_module.html this document needs to be modified. size parameter required set to be 'NO' when state is absent in os_volume ##### Steps To Reproduce: <!-- For bugs, please show exactly how to reproduce the problem. For new features, show how the feature would be used. --> `````` <!-- (Paste example playbooks or commands here) --> ``` - name: "Delete Volumes attached" os_volume: state: "absent" display_name: "{{ item.id }}" timeout: "360" auth: auth_url: "{{ openstack_auth_url }}" username: "{{ openstack_username }}" password: "{{ openstack_password }}" project_name: "{{ openstack_tenant }}" environment: OS_VOLUME_API_VERSION: "1" OS_IMAGE_API_VERSION: "1" security_groups: default <!-- You can also paste gist.github.com links for larger files. --> ##### Expected Results: <!-- What did you expect to happen when running the steps above? --> ##### Actual Results: <!-- What actually happened? If possible run with high verbosity (-vvvv) --> `````` <!-- (Paste verbatim command output here) --> ``` ```
[ { "content": "#!/usr/bin/python\n\n# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nDOCUMENTATION = '''\n---\nmodule: os_volume\nshort_description: Create/Delete Cinder Volumes\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\nauthor: \"Monty Taylor (@emonty)\"\ndescription:\n - Create or Remove cinder block storage volumes\noptions:\n size:\n description:\n - Size of volume in GB\n required: only when state is 'present'\n default: None\n display_name:\n description:\n - Name of volume\n required: true\n display_description:\n description:\n - String describing the volume\n required: false\n default: None\n volume_type:\n description:\n - Volume type for volume\n required: false\n default: None\n image:\n description:\n - Image name or id for boot from volume\n required: false\n default: None\n snapshot_id:\n description:\n - Volume snapshot id to create from\n required: false\n default: None\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements:\n - \"python >= 2.6\"\n - \"shade\"\n'''\n\nEXAMPLES = '''\n# Creates a new volume\n- name: create a volume\n hosts: localhost\n tasks:\n - name: create 40g test volume\n os_volume:\n state: present\n cloud: mordred\n availability_zone: az2\n size: 40\n display_name: test_volume\n'''\n\n\ndef _present_volume(module, cloud):\n if cloud.volume_exists(module.params['display_name']):\n v = cloud.get_volume(module.params['display_name'])\n module.exit_json(changed=False, id=v['id'], volume=v)\n\n volume_args = dict(\n size=module.params['size'],\n volume_type=module.params['volume_type'],\n display_name=module.params['display_name'],\n display_description=module.params['display_description'],\n snapshot_id=module.params['snapshot_id'],\n availability_zone=module.params['availability_zone'],\n )\n if module.params['image']:\n image_id = cloud.get_image_id(module.params['image'])\n volume_args['imageRef'] = image_id\n\n volume = cloud.create_volume(\n wait=module.params['wait'], timeout=module.params['timeout'],\n **volume_args)\n module.exit_json(changed=True, id=volume['id'], volume=volume)\n\n\ndef _absent_volume(module, cloud):\n try:\n cloud.delete_volume(\n name_or_id=module.params['display_name'],\n wait=module.params['wait'],\n timeout=module.params['timeout'])\n except shade.OpenStackCloudTimeout:\n module.exit_json(changed=False)\n module.exit_json(changed=True)\n\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n size=dict(default=None),\n volume_type=dict(default=None),\n display_name=dict(required=True, aliases=['name']),\n display_description=dict(default=None, aliases=['description']),\n image=dict(default=None),\n snapshot_id=dict(default=None),\n state=dict(default='present', choices=['absent', 'present']),\n )\n module_kwargs = openstack_module_kwargs(\n mutually_exclusive=[\n ['image', 'snapshot_id'],\n ],\n )\n module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n state = module.params['state']\n\n if state == 'present' and not module.params['size']:\n module.fail_json(msg=\"Size is required when state is 'present'\")\n\n try:\n cloud = shade.openstack_cloud(**module.params)\n if state == 'present':\n _present_volume(module, cloud)\n if state == 'absent':\n _absent_volume(module, cloud)\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e))\n\n# this is magic, see lib/ansible/module_common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_volume.py" } ]
[ { "content": "#!/usr/bin/python\n\n# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nDOCUMENTATION = '''\n---\nmodule: os_volume\nshort_description: Create/Delete Cinder Volumes\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\nauthor: \"Monty Taylor (@emonty)\"\ndescription:\n - Create or Remove cinder block storage volumes\noptions:\n size:\n description:\n - Size of volume in GB. This parameter is required when the\n I(state) parameter is 'present'.\n required: false\n default: None\n display_name:\n description:\n - Name of volume\n required: true\n display_description:\n description:\n - String describing the volume\n required: false\n default: None\n volume_type:\n description:\n - Volume type for volume\n required: false\n default: None\n image:\n description:\n - Image name or id for boot from volume\n required: false\n default: None\n snapshot_id:\n description:\n - Volume snapshot id to create from\n required: false\n default: None\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements:\n - \"python >= 2.6\"\n - \"shade\"\n'''\n\nEXAMPLES = '''\n# Creates a new volume\n- name: create a volume\n hosts: localhost\n tasks:\n - name: create 40g test volume\n os_volume:\n state: present\n cloud: mordred\n availability_zone: az2\n size: 40\n display_name: test_volume\n'''\n\n\ndef _present_volume(module, cloud):\n if cloud.volume_exists(module.params['display_name']):\n v = cloud.get_volume(module.params['display_name'])\n module.exit_json(changed=False, id=v['id'], volume=v)\n\n volume_args = dict(\n size=module.params['size'],\n volume_type=module.params['volume_type'],\n display_name=module.params['display_name'],\n display_description=module.params['display_description'],\n snapshot_id=module.params['snapshot_id'],\n availability_zone=module.params['availability_zone'],\n )\n if module.params['image']:\n image_id = cloud.get_image_id(module.params['image'])\n volume_args['imageRef'] = image_id\n\n volume = cloud.create_volume(\n wait=module.params['wait'], timeout=module.params['timeout'],\n **volume_args)\n module.exit_json(changed=True, id=volume['id'], volume=volume)\n\n\ndef _absent_volume(module, cloud):\n try:\n cloud.delete_volume(\n name_or_id=module.params['display_name'],\n wait=module.params['wait'],\n timeout=module.params['timeout'])\n except shade.OpenStackCloudTimeout:\n module.exit_json(changed=False)\n module.exit_json(changed=True)\n\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n size=dict(default=None),\n volume_type=dict(default=None),\n display_name=dict(required=True, aliases=['name']),\n display_description=dict(default=None, aliases=['description']),\n image=dict(default=None),\n snapshot_id=dict(default=None),\n state=dict(default='present', choices=['absent', 'present']),\n )\n module_kwargs = openstack_module_kwargs(\n mutually_exclusive=[\n ['image', 'snapshot_id'],\n ],\n )\n module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n state = module.params['state']\n\n if state == 'present' and not module.params['size']:\n module.fail_json(msg=\"Size is required when state is 'present'\")\n\n try:\n cloud = shade.openstack_cloud(**module.params)\n if state == 'present':\n _present_volume(module, cloud)\n if state == 'absent':\n _absent_volume(module, cloud)\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e))\n\n# this is magic, see lib/ansible/module_common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_volume.py" } ]
diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py index 5a3d0aacab0..9e7436e1a5f 100644 --- a/cloud/openstack/os_volume.py +++ b/cloud/openstack/os_volume.py @@ -35,8 +35,9 @@ options: size: description: - - Size of volume in GB - required: only when state is 'present' + - Size of volume in GB. This parameter is required when the + I(state) parameter is 'present'. + required: false default: None display_name: description:
wagtail__wagtail-6263
Missing SVG icons for optional rich text features ### Issue Summary Wagtail doesn't provide SVG icons for optional rich text features like strikethrough, superscript and subscript. It would also be a good addition to provide an SVG icon for underline as well, even though this rich feature is not implemented at the moment. ### Steps to Reproduce 1. Enable the "strikethrough" rich feature: ```python class RichTextBlock(blocks.RichTextBlock): def __init__(self, **kwargs): super().__init__( features=[ 'bold', 'italic', 'strikethrough', ], **kwargs) ``` 2. Check the rich editor menu and see the empty space where it's supposed to be an icon. ![Screenshot 2020-07-28 at 13 30 58](https://user-images.githubusercontent.com/3457859/88666048-70f6f400-d0d7-11ea-8499-36f0f7989803.png) * I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes / no) Yes ### Technical details * Python version: Run `python --version`. ``` Python 3.8.3 ``` * Django version: Look in your requirements.txt, or run `pip show django | grep Version`. ``` Version: 3.0.8 ``` * Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`. ``` Version: 2.10rc1 ``` * Browser version: You can use https://www.whatsmybrowser.org/ to find this out. ``` Chrome 84 ```
[ { "content": "from django.contrib.auth.models import Permission\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import gettext\nfrom draftjs_exporter.dom import DOM\n\nimport wagtail.admin.rich_text.editors.draftail.features as draftail_features\nfrom wagtail.admin.auth import user_has_any_page_permission\nfrom wagtail.admin.localization import get_available_admin_languages, get_available_admin_time_zones\nfrom wagtail.admin.menu import MenuItem, SubmenuMenuItem, reports_menu, settings_menu\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.rich_text import (\n HalloFormatPlugin, HalloHeadingPlugin, HalloListPlugin, HalloPlugin)\nfrom wagtail.admin.rich_text.converters.contentstate import link_entity\nfrom wagtail.admin.rich_text.converters.editor_html import (\n LinkTypeRule, PageLinkHandler, WhitelistRule)\nfrom wagtail.admin.rich_text.converters.html_to_contentstate import (\n BlockElementHandler, ExternalLinkElementHandler, HorizontalRuleHandler,\n InlineStyleElementHandler, ListElementHandler, ListItemElementHandler, PageLinkElementHandler)\nfrom wagtail.admin.search import SearchArea\nfrom wagtail.admin.site_summary import PagesSummaryItem\nfrom wagtail.admin.views.account import email_management_enabled, password_management_enabled\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.core import hooks\nfrom wagtail.core.models import UserPagePermissionsProxy\nfrom wagtail.core.permissions import (\n collection_permission_policy, task_permission_policy, workflow_permission_policy)\nfrom wagtail.core.whitelist import allow_without_attributes, attribute_rule, check_url\n\n\nclass ExplorerMenuItem(MenuItem):\n template = 'wagtailadmin/shared/explorer_menu_item.html'\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n def get_context(self, request):\n context = super().get_context(request)\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n context['start_page_id'] = start_page.id\n\n return context\n\n\[email protected]('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Pages'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n icon_name='folder-open-inverse',\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\[email protected]('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'),\n settings_menu,\n icon_name='cogs',\n order=10000)\n\n\[email protected]('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\nclass PageSearchArea(SearchArea):\n def __init__(self):\n super().__init__(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n\[email protected]('register_admin_search_area')\ndef register_pages_search_area():\n return PageSearchArea()\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), icon_name='folder-open-1', order=700)\n\n\nclass WorkflowsMenuItem(MenuItem):\n def is_shown(self, request):\n return workflow_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\nclass WorkflowTasksMenuItem(MenuItem):\n def is_shown(self, request):\n return task_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_workflows_menu_item():\n return WorkflowsMenuItem(_('Workflows'), reverse('wagtailadmin_workflows:index'), icon_name='tasks', order=100)\n\n\[email protected]('register_settings_menu_item')\ndef register_workflow_tasks_menu_item():\n return WorkflowTasksMenuItem(_('Workflow tasks'), reverse('wagtailadmin_workflows:task_index'), icon_name='thumbtack', order=150)\n\n\[email protected]('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False, next_url=None):\n if page_perms.can_edit():\n yield PageListingButton(\n _('Edit'),\n reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'aria-label': _(\"Edit '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=10\n )\n if page.has_unpublished_changes and page.is_previewable():\n yield PageListingButton(\n _('View draft'),\n reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={\n 'aria-label': _(\"Preview draft version of '%(title)s'\") % {'title': page.get_admin_display_title()},\n 'target': '_blank', 'rel': 'noopener noreferrer'\n },\n priority=20\n )\n if page.live and page.url:\n yield PageListingButton(\n _('View live'),\n page.url,\n attrs={\n 'target': \"_blank\", 'rel': 'noopener noreferrer',\n 'aria-label': _(\"View live version of '%(title)s'\") % {'title': page.get_admin_display_title()},\n },\n priority=30\n )\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(\n _('Add child page'),\n reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={\n 'aria-label': _(\"Add a child page to '%(title)s' \") % {'title': page.get_admin_display_title()},\n },\n classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'},\n priority=40\n )\n else:\n yield PageListingButton(\n _('Add child page'),\n reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'aria-label': _(\"Add a child page to '%(title)s' \") % {'title': page.get_admin_display_title()}},\n priority=40\n )\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n next_url=next_url,\n attrs={\n 'target': '_blank', 'rel': 'noopener noreferrer',\n 'title': _(\"View more options for '%(title)s'\") % {'title': page.get_admin_display_title()}\n },\n priority=50\n )\n\n\[email protected]('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False, next_url=None):\n if page_perms.can_move():\n yield Button(\n _('Move'),\n reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _(\"Move page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=10\n )\n if page_perms.can_copy():\n url = reverse('wagtailadmin_pages:copy', args=[page.id])\n if next_url:\n url += '?' + urlencode({'next': next_url})\n\n urlencode\n yield Button(\n _('Copy'),\n url,\n attrs={'title': _(\"Copy page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=20\n )\n if page_perms.can_delete():\n url = reverse('wagtailadmin_pages:delete', args=[page.id])\n if next_url:\n url += '?' + urlencode({'next': next_url})\n\n yield Button(\n _('Delete'),\n url,\n attrs={'title': _(\"Delete page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=30\n )\n if page_perms.can_unpublish():\n url = reverse('wagtailadmin_pages:unpublish', args=[page.id])\n if next_url:\n url += '?' + urlencode({'next': next_url})\n\n yield Button(\n _('Unpublish'),\n url,\n attrs={'title': _(\"Unpublish page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=40\n )\n\n if page_perms.can_view_revisions():\n yield Button(\n _('History'),\n reverse('wagtailadmin_pages:history', args=[page.id]),\n attrs={'title': _(\"View page history for '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=50\n )\n\n\[email protected]('register_admin_urls')\ndef register_viewsets_urls():\n viewsets.populate()\n return viewsets.get_urlpatterns()\n\n\[email protected]('register_account_menu_item')\ndef register_account_set_profile_picture(request):\n return {\n 'url': reverse('wagtailadmin_account_change_avatar'),\n 'label': _('Set profile picture'),\n 'help_text': _(\"Change your profile picture.\")\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_change_email(request):\n if email_management_enabled():\n return {\n 'url': reverse('wagtailadmin_account_change_email'),\n 'label': _('Change email'),\n 'help_text': _('Change the email address linked to your account.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_change_password(request):\n if password_management_enabled() and request.user.has_usable_password():\n return {\n 'url': reverse('wagtailadmin_account_change_password'),\n 'label': _('Change password'),\n 'help_text': _('Change the password you use to log in.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_notification_preferences(request):\n user_perms = UserPagePermissionsProxy(request.user)\n if user_perms.can_edit_pages() or user_perms.can_publish_pages():\n return {\n 'url': reverse('wagtailadmin_account_notification_preferences'),\n 'label': _('Notification preferences'),\n 'help_text': _('Choose which email notifications to receive.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_preferred_language_preferences(request):\n if len(get_available_admin_languages()) > 1:\n return {\n 'url': reverse('wagtailadmin_account_language_preferences'),\n 'label': _('Language preferences'),\n 'help_text': _('Choose the language you want to use here.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_current_time_zone(request):\n if len(get_available_admin_time_zones()) > 1:\n return {\n 'url': reverse('wagtailadmin_account_current_time_zone'),\n 'label': _('Current Time Zone'),\n 'help_text': _('Choose your current time zone.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_change_name(request):\n return {\n 'url': reverse('wagtailadmin_account_change_name'),\n 'label': _('Change name'),\n 'help_text': _('Change your first and last name on your account.'),\n }\n\n\[email protected]('register_rich_text_features')\ndef register_core_features(features):\n # Hallo.js\n features.register_editor_plugin(\n 'hallo', 'hr',\n HalloPlugin(\n name='hallohr',\n js=['wagtailadmin/js/hallo-plugins/hallo-hr.js'],\n order=45,\n )\n )\n features.register_converter_rule('editorhtml', 'hr', [\n WhitelistRule('hr', allow_without_attributes)\n ])\n\n features.register_editor_plugin(\n 'hallo', 'link',\n HalloPlugin(\n name='hallowagtaillink',\n js=[\n 'wagtailadmin/js/page-chooser-modal.js',\n 'wagtailadmin/js/hallo-plugins/hallo-wagtaillink.js',\n ],\n )\n )\n features.register_converter_rule('editorhtml', 'link', [\n WhitelistRule('a', attribute_rule({'href': check_url})),\n LinkTypeRule('page', PageLinkHandler),\n ])\n\n features.register_editor_plugin(\n 'hallo', 'bold', HalloFormatPlugin(format_name='bold')\n )\n features.register_converter_rule('editorhtml', 'bold', [\n WhitelistRule('b', allow_without_attributes),\n WhitelistRule('strong', allow_without_attributes),\n ])\n\n features.register_editor_plugin(\n 'hallo', 'italic', HalloFormatPlugin(format_name='italic')\n )\n features.register_converter_rule('editorhtml', 'italic', [\n WhitelistRule('i', allow_without_attributes),\n WhitelistRule('em', allow_without_attributes),\n ])\n\n headings_elements = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']\n headings_order_start = HalloHeadingPlugin.default_order + 1\n for order, element in enumerate(headings_elements, start=headings_order_start):\n features.register_editor_plugin(\n 'hallo', element, HalloHeadingPlugin(element=element, order=order)\n )\n features.register_converter_rule('editorhtml', element, [\n WhitelistRule(element, allow_without_attributes)\n ])\n\n features.register_editor_plugin(\n 'hallo', 'ol', HalloListPlugin(list_type='ordered')\n )\n features.register_converter_rule('editorhtml', 'ol', [\n WhitelistRule('ol', allow_without_attributes),\n WhitelistRule('li', allow_without_attributes),\n ])\n\n features.register_editor_plugin(\n 'hallo', 'ul', HalloListPlugin(list_type='unordered')\n )\n features.register_converter_rule('editorhtml', 'ul', [\n WhitelistRule('ul', allow_without_attributes),\n WhitelistRule('li', allow_without_attributes),\n ])\n\n # Draftail\n features.register_editor_plugin(\n 'draftail', 'hr', draftail_features.BooleanFeature('enableHorizontalRule')\n )\n features.register_converter_rule('contentstate', 'hr', {\n 'from_database_format': {\n 'hr': HorizontalRuleHandler(),\n },\n 'to_database_format': {\n 'entity_decorators': {'HORIZONTAL_RULE': lambda props: DOM.create_element('hr')}\n }\n })\n\n features.register_editor_plugin(\n 'draftail', 'h1', draftail_features.BlockFeature({\n 'label': 'H1',\n 'type': 'header-one',\n 'description': gettext('Heading %(level)d') % {'level': 1},\n })\n )\n features.register_converter_rule('contentstate', 'h1', {\n 'from_database_format': {\n 'h1': BlockElementHandler('header-one'),\n },\n 'to_database_format': {\n 'block_map': {'header-one': 'h1'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h2', draftail_features.BlockFeature({\n 'label': 'H2',\n 'type': 'header-two',\n 'description': gettext('Heading %(level)d') % {'level': 2},\n })\n )\n features.register_converter_rule('contentstate', 'h2', {\n 'from_database_format': {\n 'h2': BlockElementHandler('header-two'),\n },\n 'to_database_format': {\n 'block_map': {'header-two': 'h2'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h3', draftail_features.BlockFeature({\n 'label': 'H3',\n 'type': 'header-three',\n 'description': gettext('Heading %(level)d') % {'level': 3},\n })\n )\n features.register_converter_rule('contentstate', 'h3', {\n 'from_database_format': {\n 'h3': BlockElementHandler('header-three'),\n },\n 'to_database_format': {\n 'block_map': {'header-three': 'h3'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h4', draftail_features.BlockFeature({\n 'label': 'H4',\n 'type': 'header-four',\n 'description': gettext('Heading %(level)d') % {'level': 4},\n })\n )\n features.register_converter_rule('contentstate', 'h4', {\n 'from_database_format': {\n 'h4': BlockElementHandler('header-four'),\n },\n 'to_database_format': {\n 'block_map': {'header-four': 'h4'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h5', draftail_features.BlockFeature({\n 'label': 'H5',\n 'type': 'header-five',\n 'description': gettext('Heading %(level)d') % {'level': 5},\n })\n )\n features.register_converter_rule('contentstate', 'h5', {\n 'from_database_format': {\n 'h5': BlockElementHandler('header-five'),\n },\n 'to_database_format': {\n 'block_map': {'header-five': 'h5'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h6', draftail_features.BlockFeature({\n 'label': 'H6',\n 'type': 'header-six',\n 'description': gettext('Heading %(level)d') % {'level': 6},\n })\n )\n features.register_converter_rule('contentstate', 'h6', {\n 'from_database_format': {\n 'h6': BlockElementHandler('header-six'),\n },\n 'to_database_format': {\n 'block_map': {'header-six': 'h6'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'ul', draftail_features.BlockFeature({\n 'type': 'unordered-list-item',\n 'icon': 'list-ul',\n 'description': gettext('Bulleted list'),\n })\n )\n features.register_converter_rule('contentstate', 'ul', {\n 'from_database_format': {\n 'ul': ListElementHandler('unordered-list-item'),\n 'li': ListItemElementHandler(),\n },\n 'to_database_format': {\n 'block_map': {'unordered-list-item': {'element': 'li', 'wrapper': 'ul'}}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'ol', draftail_features.BlockFeature({\n 'type': 'ordered-list-item',\n 'icon': 'list-ol',\n 'description': gettext('Numbered list'),\n })\n )\n features.register_converter_rule('contentstate', 'ol', {\n 'from_database_format': {\n 'ol': ListElementHandler('ordered-list-item'),\n 'li': ListItemElementHandler(),\n },\n 'to_database_format': {\n 'block_map': {'ordered-list-item': {'element': 'li', 'wrapper': 'ol'}}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'blockquote', draftail_features.BlockFeature({\n 'type': 'blockquote',\n 'icon': 'openquote',\n 'description': gettext('Blockquote'),\n })\n )\n features.register_converter_rule('contentstate', 'blockquote', {\n 'from_database_format': {\n 'blockquote': BlockElementHandler('blockquote'),\n },\n 'to_database_format': {\n 'block_map': {'blockquote': 'blockquote'}\n }\n })\n\n features.register_editor_plugin(\n 'draftail', 'bold', draftail_features.InlineStyleFeature({\n 'type': 'BOLD',\n 'icon': 'bold',\n 'description': gettext('Bold'),\n })\n )\n features.register_converter_rule('contentstate', 'bold', {\n 'from_database_format': {\n 'b': InlineStyleElementHandler('BOLD'),\n 'strong': InlineStyleElementHandler('BOLD'),\n },\n 'to_database_format': {\n 'style_map': {'BOLD': 'b'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'italic', draftail_features.InlineStyleFeature({\n 'type': 'ITALIC',\n 'icon': 'italic',\n 'description': gettext('Italic'),\n })\n )\n features.register_converter_rule('contentstate', 'italic', {\n 'from_database_format': {\n 'i': InlineStyleElementHandler('ITALIC'),\n 'em': InlineStyleElementHandler('ITALIC'),\n },\n 'to_database_format': {\n 'style_map': {'ITALIC': 'i'}\n }\n })\n\n features.register_editor_plugin(\n 'draftail', 'link', draftail_features.EntityFeature({\n 'type': 'LINK',\n 'icon': 'link',\n 'description': gettext('Link'),\n # We want to enforce constraints on which links can be pasted into rich text.\n # Keep only the attributes Wagtail needs.\n 'attributes': ['url', 'id', 'parentId'],\n 'whitelist': {\n # Keep pasted links with http/https protocol, and not-pasted links (href = undefined).\n 'href': \"^(http:|https:|undefined$)\",\n }\n }, js=[\n 'wagtailadmin/js/page-chooser-modal.js',\n ])\n )\n features.register_converter_rule('contentstate', 'link', {\n 'from_database_format': {\n 'a[href]': ExternalLinkElementHandler('LINK'),\n 'a[linktype=\"page\"]': PageLinkElementHandler('LINK'),\n },\n 'to_database_format': {\n 'entity_decorators': {'LINK': link_entity}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'superscript', draftail_features.InlineStyleFeature({\n 'type': 'SUPERSCRIPT',\n 'icon': 'superscript',\n 'description': gettext('Superscript'),\n })\n )\n features.register_converter_rule('contentstate', 'superscript', {\n 'from_database_format': {\n 'sup': InlineStyleElementHandler('SUPERSCRIPT'),\n },\n 'to_database_format': {\n 'style_map': {'SUPERSCRIPT': 'sup'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'subscript', draftail_features.InlineStyleFeature({\n 'type': 'SUBSCRIPT',\n 'icon': 'subscript',\n 'description': gettext('Subscript'),\n })\n )\n features.register_converter_rule('contentstate', 'subscript', {\n 'from_database_format': {\n 'sub': InlineStyleElementHandler('SUBSCRIPT'),\n },\n 'to_database_format': {\n 'style_map': {'SUBSCRIPT': 'sub'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'strikethrough', draftail_features.InlineStyleFeature({\n 'type': 'STRIKETHROUGH',\n 'icon': 'strikethrough',\n 'description': gettext('Strikethrough'),\n })\n )\n features.register_converter_rule('contentstate', 'strikethrough', {\n 'from_database_format': {\n 's': InlineStyleElementHandler('STRIKETHROUGH'),\n },\n 'to_database_format': {\n 'style_map': {'STRIKETHROUGH': 's'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'code', draftail_features.InlineStyleFeature({\n 'type': 'CODE',\n 'icon': 'code',\n 'description': gettext('Code'),\n })\n )\n features.register_converter_rule('contentstate', 'code', {\n 'from_database_format': {\n 'code': InlineStyleElementHandler('CODE'),\n },\n 'to_database_format': {\n 'style_map': {'CODE': 'code'}\n }\n })\n\n\nclass ReportsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_submenu_item.html'\n\n\nclass LockedPagesMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).can_remove_locks()\n\n\nclass WorkflowReportMenuItem(MenuItem):\n def is_shown(self, request):\n return True\n\n\nclass SiteHistoryReportMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).explorable_pages().exists()\n\n\[email protected]('register_reports_menu_item')\ndef register_locked_pages_menu_item():\n return LockedPagesMenuItem(_('Locked Pages'), reverse('wagtailadmin_reports:locked_pages'), icon_name='lock', order=700)\n\n\[email protected]('register_reports_menu_item')\ndef register_workflow_report_menu_item():\n return WorkflowReportMenuItem(_('Workflows'), reverse('wagtailadmin_reports:workflow'), icon_name='tasks', order=800)\n\n\[email protected]('register_reports_menu_item')\ndef register_workflow_tasks_report_menu_item():\n return WorkflowReportMenuItem(_('Workflow tasks'), reverse('wagtailadmin_reports:workflow_tasks'), icon_name='thumbtack', order=900)\n\n\[email protected]('register_reports_menu_item')\ndef register_site_history_report_menu_item():\n return SiteHistoryReportMenuItem(_('Site history'), reverse('wagtailadmin_reports:site_history'), icon_name='history', order=1000)\n\n\[email protected]('register_admin_menu_item')\ndef register_reports_menu():\n return ReportsMenuItem(\n _('Reports'), reports_menu, classnames='icon icon-site', order=9000)\n\n\[email protected]('register_icons')\ndef register_icons(icons):\n for icon in [\n 'arrow-down-big.svg',\n 'arrow-down.svg',\n 'arrow-left.svg',\n 'arrow-right.svg',\n 'arrow-up-big.svg',\n 'arrow-up.svg',\n 'arrows-up-down.svg',\n 'bin.svg',\n 'bold.svg',\n 'chain-broken.svg',\n 'clipboard-list.svg',\n 'code.svg',\n 'cog.svg',\n 'cogs.svg',\n 'collapse-down.svg',\n 'collapse-up.svg',\n 'cross.svg',\n 'date.svg',\n 'doc-empty-inverse.svg',\n 'doc-empty.svg',\n 'doc-full-inverse.svg',\n 'doc-full.svg', # aka file-text-alt\n 'download-alt.svg',\n 'download.svg',\n 'draft.svg',\n 'duplicate.svg',\n 'edit.svg',\n 'error.svg',\n 'folder-inverse.svg',\n 'folder-open-1.svg',\n 'folder-open-inverse.svg',\n 'folder.svg',\n 'form.svg',\n 'grip.svg',\n 'group.svg',\n 'help.svg',\n 'history.svg',\n 'home.svg',\n 'horizontalrule.svg',\n 'image.svg', # aka picture\n 'italic.svg',\n 'link.svg',\n 'link-external.svg',\n 'list-ol.svg',\n 'list-ul.svg',\n 'lock-open.svg',\n 'lock.svg',\n 'login.svg',\n 'logout.svg',\n 'mail.svg',\n 'media.svg',\n 'no-view.svg',\n 'openquote.svg',\n 'order-down.svg',\n 'order-up.svg',\n 'order.svg',\n 'password.svg',\n 'pick.svg',\n 'pilcrow.svg',\n 'placeholder.svg', # aka marquee\n 'plus-inverse.svg',\n 'plus.svg',\n 'radio-empty.svg',\n 'radio-full.svg',\n 'redirect.svg',\n 'repeat.svg',\n 'reset.svg',\n 'resubmit.svg',\n 'search.svg',\n 'site.svg',\n 'snippet.svg',\n 'spinner.svg',\n 'success.svg',\n 'table.svg',\n 'tag.svg',\n 'tasks.svg',\n 'thumbtack.svg',\n 'tick-inverse.svg',\n 'tick.svg',\n 'time.svg',\n 'title.svg',\n 'undo.svg',\n 'uni52.svg', # Is this a redundant icon?\n 'upload.svg',\n 'user.svg',\n 'view.svg',\n 'wagtail-inverse.svg',\n 'wagtail.svg',\n 'warning.svg',\n ]:\n icons.append('wagtailadmin/icons/{}'.format(icon))\n return icons\n\n\[email protected]('construct_homepage_summary_items')\ndef add_pages_summary_item(request, items):\n items.insert(0, PagesSummaryItem(request))\n\n\[email protected]('register_log_actions')\ndef register_core_log_actions(actions):\n actions.register_action('wagtail.create', _('Create'), _('Created'))\n actions.register_action('wagtail.edit', _('Save draft'), _('Draft saved'))\n actions.register_action('wagtail.delete', _('Delete'), _('Deleted'))\n actions.register_action('wagtail.publish', _('Publish'), _('Published'))\n actions.register_action('wagtail.publish.scheduled', _(\"Publish scheduled draft\"), _('Published scheduled draft'))\n actions.register_action('wagtail.unpublish', _('Unpublish'), _('Unpublished'))\n actions.register_action('wagtail.unpublish.scheduled', _('Unpublish scheduled draft'), _('Unpublished scheduled draft'))\n actions.register_action('wagtail.lock', _('Lock'), _('Locked'))\n actions.register_action('wagtail.unlock', _('Unlock'), _('Unlocked'))\n actions.register_action('wagtail.moderation.approve', _('Approve'), _('Approved'))\n actions.register_action('wagtail.moderation.reject', _('Reject'), _('Rejected'))\n\n def revert_message(data):\n try:\n return _('Reverted to previous revision with id %(revision_id)s from %(created_at)s') % {\n 'revision_id': data['revision']['id'],\n 'created_at': data['revision']['created'],\n }\n except KeyError:\n return _('Reverted to previous revision')\n\n def copy_message(data):\n try:\n return _('Copied from %(title)s') % {\n 'title': data['source']['title'],\n }\n except KeyError:\n return _(\"Copied\")\n\n def move_message(data):\n try:\n return _(\"Moved from '%(old_parent)s' to '%(new_parent)s'\") % {\n 'old_parent': data['source']['title'],\n 'new_parent': data['destination']['title'],\n }\n except KeyError:\n return _('Moved')\n\n def schedule_publish_message(data):\n try:\n if data['revision']['has_live_version']:\n return _('Revision %(revision_id)s from %(created_at)s scheduled for publishing at %(go_live_at)s.') % {\n 'revision_id': data['revision']['id'],\n 'created_at': data['revision']['created'],\n 'go_live_at': data['revision']['go_live_at'],\n }\n else:\n return _('Page scheduled for publishing at %(go_live_at)s') % {\n 'go_live_at': data['revision']['go_live_at'],\n }\n except KeyError:\n return _('Page scheduled for publishing')\n\n def unschedule_publish_message(data):\n try:\n if data['revision']['has_live_version']:\n return _('Revision %(revision_id)s from %(created_at)s unscheduled from publishing at %(go_live_at)s.') % {\n 'revision_id': data['revision']['id'],\n 'created_at': data['revision']['created'],\n 'go_live_at': data['revision']['go_live_at'],\n }\n else:\n return _('Page unscheduled for publishing at %(go_live_at)s') % {\n 'go_live_at': data['revision']['go_live_at'],\n }\n except KeyError:\n return _('Page unscheduled from publishing')\n\n def add_view_restriction(data):\n try:\n return _(\"Added the '%(restriction)s' view restriction\") % {\n 'restriction': data['restriction']['title'],\n }\n except KeyError:\n return _('Added view restriction')\n\n def edit_view_restriction(data):\n try:\n return _(\"Updated the view restriction to '%(restriction)s'\") % {\n 'restriction': data['restriction']['title'],\n }\n except KeyError:\n return _('Updated view restriction')\n\n def delete_view_restriction(data):\n try:\n return _(\"Removed the '%(restriction)s' view restriction\") % {\n 'restriction': data['restriction']['title'],\n }\n except KeyError:\n return _('Removed view restriction')\n\n def rename_message(data):\n try:\n return _(\"Renamed from '%(old)s' to '%(new)s'\") % {\n 'old': data['title']['old'],\n 'new': data['title']['new'],\n }\n except KeyError:\n return _('Renamed')\n\n actions.register_action('wagtail.rename', _('Rename'), rename_message)\n actions.register_action('wagtail.revert', _('Revert'), revert_message)\n actions.register_action('wagtail.copy', _('Copy'), copy_message)\n actions.register_action('wagtail.move', _('Move'), move_message)\n actions.register_action('wagtail.publish.schedule', _(\"Schedule publication\"), schedule_publish_message)\n actions.register_action('wagtail.schedule.cancel', _(\"Unschedule publication\"), unschedule_publish_message)\n actions.register_action('wagtail.view_restriction.create', _(\"Add view restrictions\"), add_view_restriction)\n actions.register_action('wagtail.view_restriction.edit', _(\"Update view restrictions\"), edit_view_restriction)\n actions.register_action('wagtail.view_restriction.delete', _(\"Remove view restrictions\"), delete_view_restriction)\n\n\[email protected]('register_log_actions')\ndef register_workflow_log_actions(actions):\n def workflow_start_message(data):\n try:\n return _(\"'%(workflow)s' started. Next step '%(task)s'\") % {\n 'workflow': data['workflow']['title'],\n 'task': data['workflow']['next']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow started')\n\n def workflow_approve_message(data):\n try:\n if data['workflow']['next']:\n return _(\"Approved at '%(task)s'. Next step '%(next_task)s'\") % {\n 'task': data['workflow']['task']['title'],\n 'next_task': data['workflow']['next']['title'],\n }\n else:\n return _(\"Approved at '%(task)s'. '%(workflow)s' complete\") % {\n 'task': data['workflow']['task']['title'],\n 'workflow': data['workflow']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow task approved')\n\n def workflow_reject_message(data):\n try:\n return _(\"Rejected at '%(task)s'. Changes requested\") % {\n 'task': data['workflow']['task']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow task rejected. Workflow complete')\n\n def workflow_resume_message(data):\n try:\n return _(\"Resubmitted '%(task)s'. Workflow resumed'\") % {\n 'task': data['workflow']['task']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow task resubmitted. Workflow resumed')\n\n def workflow_cancel_message(data):\n try:\n return _(\"Cancelled '%(workflow)s' at '%(task)s'\") % {\n 'workflow': data['workflow']['title'],\n 'task': data['workflow']['task']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow cancelled')\n\n actions.register_action('wagtail.workflow.start', _('Workflow: start'), workflow_start_message)\n actions.register_action('wagtail.workflow.approve', _('Workflow: approve task'), workflow_approve_message)\n actions.register_action('wagtail.workflow.reject', _('Workflow: reject task'), workflow_reject_message)\n actions.register_action('wagtail.workflow.resume', _('Workflow: resume task'), workflow_resume_message)\n actions.register_action('wagtail.workflow.cancel', _('Workflow: cancel'), workflow_cancel_message)\n", "path": "wagtail/admin/wagtail_hooks.py" } ]
[ { "content": "from django.contrib.auth.models import Permission\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import gettext\nfrom draftjs_exporter.dom import DOM\n\nimport wagtail.admin.rich_text.editors.draftail.features as draftail_features\nfrom wagtail.admin.auth import user_has_any_page_permission\nfrom wagtail.admin.localization import get_available_admin_languages, get_available_admin_time_zones\nfrom wagtail.admin.menu import MenuItem, SubmenuMenuItem, reports_menu, settings_menu\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.rich_text import (\n HalloFormatPlugin, HalloHeadingPlugin, HalloListPlugin, HalloPlugin)\nfrom wagtail.admin.rich_text.converters.contentstate import link_entity\nfrom wagtail.admin.rich_text.converters.editor_html import (\n LinkTypeRule, PageLinkHandler, WhitelistRule)\nfrom wagtail.admin.rich_text.converters.html_to_contentstate import (\n BlockElementHandler, ExternalLinkElementHandler, HorizontalRuleHandler,\n InlineStyleElementHandler, ListElementHandler, ListItemElementHandler, PageLinkElementHandler)\nfrom wagtail.admin.search import SearchArea\nfrom wagtail.admin.site_summary import PagesSummaryItem\nfrom wagtail.admin.views.account import email_management_enabled, password_management_enabled\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.core import hooks\nfrom wagtail.core.models import UserPagePermissionsProxy\nfrom wagtail.core.permissions import (\n collection_permission_policy, task_permission_policy, workflow_permission_policy)\nfrom wagtail.core.whitelist import allow_without_attributes, attribute_rule, check_url\n\n\nclass ExplorerMenuItem(MenuItem):\n template = 'wagtailadmin/shared/explorer_menu_item.html'\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n def get_context(self, request):\n context = super().get_context(request)\n start_page = get_explorable_root_page(request.user)\n\n if start_page:\n context['start_page_id'] = start_page.id\n\n return context\n\n\[email protected]('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Pages'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n icon_name='folder-open-inverse',\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\[email protected]('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'),\n settings_menu,\n icon_name='cogs',\n order=10000)\n\n\[email protected]('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\nclass PageSearchArea(SearchArea):\n def __init__(self):\n super().__init__(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n def is_shown(self, request):\n return user_has_any_page_permission(request.user)\n\n\[email protected]('register_admin_search_area')\ndef register_pages_search_area():\n return PageSearchArea()\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), icon_name='folder-open-1', order=700)\n\n\nclass WorkflowsMenuItem(MenuItem):\n def is_shown(self, request):\n return workflow_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\nclass WorkflowTasksMenuItem(MenuItem):\n def is_shown(self, request):\n return task_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_workflows_menu_item():\n return WorkflowsMenuItem(_('Workflows'), reverse('wagtailadmin_workflows:index'), icon_name='tasks', order=100)\n\n\[email protected]('register_settings_menu_item')\ndef register_workflow_tasks_menu_item():\n return WorkflowTasksMenuItem(_('Workflow tasks'), reverse('wagtailadmin_workflows:task_index'), icon_name='thumbtack', order=150)\n\n\[email protected]('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False, next_url=None):\n if page_perms.can_edit():\n yield PageListingButton(\n _('Edit'),\n reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'aria-label': _(\"Edit '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=10\n )\n if page.has_unpublished_changes and page.is_previewable():\n yield PageListingButton(\n _('View draft'),\n reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={\n 'aria-label': _(\"Preview draft version of '%(title)s'\") % {'title': page.get_admin_display_title()},\n 'target': '_blank', 'rel': 'noopener noreferrer'\n },\n priority=20\n )\n if page.live and page.url:\n yield PageListingButton(\n _('View live'),\n page.url,\n attrs={\n 'target': \"_blank\", 'rel': 'noopener noreferrer',\n 'aria-label': _(\"View live version of '%(title)s'\") % {'title': page.get_admin_display_title()},\n },\n priority=30\n )\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(\n _('Add child page'),\n reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={\n 'aria-label': _(\"Add a child page to '%(title)s' \") % {'title': page.get_admin_display_title()},\n },\n classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'},\n priority=40\n )\n else:\n yield PageListingButton(\n _('Add child page'),\n reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'aria-label': _(\"Add a child page to '%(title)s' \") % {'title': page.get_admin_display_title()}},\n priority=40\n )\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n next_url=next_url,\n attrs={\n 'target': '_blank', 'rel': 'noopener noreferrer',\n 'title': _(\"View more options for '%(title)s'\") % {'title': page.get_admin_display_title()}\n },\n priority=50\n )\n\n\[email protected]('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False, next_url=None):\n if page_perms.can_move():\n yield Button(\n _('Move'),\n reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _(\"Move page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=10\n )\n if page_perms.can_copy():\n url = reverse('wagtailadmin_pages:copy', args=[page.id])\n if next_url:\n url += '?' + urlencode({'next': next_url})\n\n urlencode\n yield Button(\n _('Copy'),\n url,\n attrs={'title': _(\"Copy page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=20\n )\n if page_perms.can_delete():\n url = reverse('wagtailadmin_pages:delete', args=[page.id])\n if next_url:\n url += '?' + urlencode({'next': next_url})\n\n yield Button(\n _('Delete'),\n url,\n attrs={'title': _(\"Delete page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=30\n )\n if page_perms.can_unpublish():\n url = reverse('wagtailadmin_pages:unpublish', args=[page.id])\n if next_url:\n url += '?' + urlencode({'next': next_url})\n\n yield Button(\n _('Unpublish'),\n url,\n attrs={'title': _(\"Unpublish page '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=40\n )\n\n if page_perms.can_view_revisions():\n yield Button(\n _('History'),\n reverse('wagtailadmin_pages:history', args=[page.id]),\n attrs={'title': _(\"View page history for '%(title)s'\") % {'title': page.get_admin_display_title()}},\n priority=50\n )\n\n\[email protected]('register_admin_urls')\ndef register_viewsets_urls():\n viewsets.populate()\n return viewsets.get_urlpatterns()\n\n\[email protected]('register_account_menu_item')\ndef register_account_set_profile_picture(request):\n return {\n 'url': reverse('wagtailadmin_account_change_avatar'),\n 'label': _('Set profile picture'),\n 'help_text': _(\"Change your profile picture.\")\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_change_email(request):\n if email_management_enabled():\n return {\n 'url': reverse('wagtailadmin_account_change_email'),\n 'label': _('Change email'),\n 'help_text': _('Change the email address linked to your account.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_change_password(request):\n if password_management_enabled() and request.user.has_usable_password():\n return {\n 'url': reverse('wagtailadmin_account_change_password'),\n 'label': _('Change password'),\n 'help_text': _('Change the password you use to log in.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_notification_preferences(request):\n user_perms = UserPagePermissionsProxy(request.user)\n if user_perms.can_edit_pages() or user_perms.can_publish_pages():\n return {\n 'url': reverse('wagtailadmin_account_notification_preferences'),\n 'label': _('Notification preferences'),\n 'help_text': _('Choose which email notifications to receive.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_preferred_language_preferences(request):\n if len(get_available_admin_languages()) > 1:\n return {\n 'url': reverse('wagtailadmin_account_language_preferences'),\n 'label': _('Language preferences'),\n 'help_text': _('Choose the language you want to use here.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_current_time_zone(request):\n if len(get_available_admin_time_zones()) > 1:\n return {\n 'url': reverse('wagtailadmin_account_current_time_zone'),\n 'label': _('Current Time Zone'),\n 'help_text': _('Choose your current time zone.'),\n }\n\n\[email protected]('register_account_menu_item')\ndef register_account_change_name(request):\n return {\n 'url': reverse('wagtailadmin_account_change_name'),\n 'label': _('Change name'),\n 'help_text': _('Change your first and last name on your account.'),\n }\n\n\[email protected]('register_rich_text_features')\ndef register_core_features(features):\n # Hallo.js\n features.register_editor_plugin(\n 'hallo', 'hr',\n HalloPlugin(\n name='hallohr',\n js=['wagtailadmin/js/hallo-plugins/hallo-hr.js'],\n order=45,\n )\n )\n features.register_converter_rule('editorhtml', 'hr', [\n WhitelistRule('hr', allow_without_attributes)\n ])\n\n features.register_editor_plugin(\n 'hallo', 'link',\n HalloPlugin(\n name='hallowagtaillink',\n js=[\n 'wagtailadmin/js/page-chooser-modal.js',\n 'wagtailadmin/js/hallo-plugins/hallo-wagtaillink.js',\n ],\n )\n )\n features.register_converter_rule('editorhtml', 'link', [\n WhitelistRule('a', attribute_rule({'href': check_url})),\n LinkTypeRule('page', PageLinkHandler),\n ])\n\n features.register_editor_plugin(\n 'hallo', 'bold', HalloFormatPlugin(format_name='bold')\n )\n features.register_converter_rule('editorhtml', 'bold', [\n WhitelistRule('b', allow_without_attributes),\n WhitelistRule('strong', allow_without_attributes),\n ])\n\n features.register_editor_plugin(\n 'hallo', 'italic', HalloFormatPlugin(format_name='italic')\n )\n features.register_converter_rule('editorhtml', 'italic', [\n WhitelistRule('i', allow_without_attributes),\n WhitelistRule('em', allow_without_attributes),\n ])\n\n headings_elements = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']\n headings_order_start = HalloHeadingPlugin.default_order + 1\n for order, element in enumerate(headings_elements, start=headings_order_start):\n features.register_editor_plugin(\n 'hallo', element, HalloHeadingPlugin(element=element, order=order)\n )\n features.register_converter_rule('editorhtml', element, [\n WhitelistRule(element, allow_without_attributes)\n ])\n\n features.register_editor_plugin(\n 'hallo', 'ol', HalloListPlugin(list_type='ordered')\n )\n features.register_converter_rule('editorhtml', 'ol', [\n WhitelistRule('ol', allow_without_attributes),\n WhitelistRule('li', allow_without_attributes),\n ])\n\n features.register_editor_plugin(\n 'hallo', 'ul', HalloListPlugin(list_type='unordered')\n )\n features.register_converter_rule('editorhtml', 'ul', [\n WhitelistRule('ul', allow_without_attributes),\n WhitelistRule('li', allow_without_attributes),\n ])\n\n # Draftail\n features.register_editor_plugin(\n 'draftail', 'hr', draftail_features.BooleanFeature('enableHorizontalRule')\n )\n features.register_converter_rule('contentstate', 'hr', {\n 'from_database_format': {\n 'hr': HorizontalRuleHandler(),\n },\n 'to_database_format': {\n 'entity_decorators': {'HORIZONTAL_RULE': lambda props: DOM.create_element('hr')}\n }\n })\n\n features.register_editor_plugin(\n 'draftail', 'h1', draftail_features.BlockFeature({\n 'label': 'H1',\n 'type': 'header-one',\n 'description': gettext('Heading %(level)d') % {'level': 1},\n })\n )\n features.register_converter_rule('contentstate', 'h1', {\n 'from_database_format': {\n 'h1': BlockElementHandler('header-one'),\n },\n 'to_database_format': {\n 'block_map': {'header-one': 'h1'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h2', draftail_features.BlockFeature({\n 'label': 'H2',\n 'type': 'header-two',\n 'description': gettext('Heading %(level)d') % {'level': 2},\n })\n )\n features.register_converter_rule('contentstate', 'h2', {\n 'from_database_format': {\n 'h2': BlockElementHandler('header-two'),\n },\n 'to_database_format': {\n 'block_map': {'header-two': 'h2'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h3', draftail_features.BlockFeature({\n 'label': 'H3',\n 'type': 'header-three',\n 'description': gettext('Heading %(level)d') % {'level': 3},\n })\n )\n features.register_converter_rule('contentstate', 'h3', {\n 'from_database_format': {\n 'h3': BlockElementHandler('header-three'),\n },\n 'to_database_format': {\n 'block_map': {'header-three': 'h3'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h4', draftail_features.BlockFeature({\n 'label': 'H4',\n 'type': 'header-four',\n 'description': gettext('Heading %(level)d') % {'level': 4},\n })\n )\n features.register_converter_rule('contentstate', 'h4', {\n 'from_database_format': {\n 'h4': BlockElementHandler('header-four'),\n },\n 'to_database_format': {\n 'block_map': {'header-four': 'h4'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h5', draftail_features.BlockFeature({\n 'label': 'H5',\n 'type': 'header-five',\n 'description': gettext('Heading %(level)d') % {'level': 5},\n })\n )\n features.register_converter_rule('contentstate', 'h5', {\n 'from_database_format': {\n 'h5': BlockElementHandler('header-five'),\n },\n 'to_database_format': {\n 'block_map': {'header-five': 'h5'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'h6', draftail_features.BlockFeature({\n 'label': 'H6',\n 'type': 'header-six',\n 'description': gettext('Heading %(level)d') % {'level': 6},\n })\n )\n features.register_converter_rule('contentstate', 'h6', {\n 'from_database_format': {\n 'h6': BlockElementHandler('header-six'),\n },\n 'to_database_format': {\n 'block_map': {'header-six': 'h6'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'ul', draftail_features.BlockFeature({\n 'type': 'unordered-list-item',\n 'icon': 'list-ul',\n 'description': gettext('Bulleted list'),\n })\n )\n features.register_converter_rule('contentstate', 'ul', {\n 'from_database_format': {\n 'ul': ListElementHandler('unordered-list-item'),\n 'li': ListItemElementHandler(),\n },\n 'to_database_format': {\n 'block_map': {'unordered-list-item': {'element': 'li', 'wrapper': 'ul'}}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'ol', draftail_features.BlockFeature({\n 'type': 'ordered-list-item',\n 'icon': 'list-ol',\n 'description': gettext('Numbered list'),\n })\n )\n features.register_converter_rule('contentstate', 'ol', {\n 'from_database_format': {\n 'ol': ListElementHandler('ordered-list-item'),\n 'li': ListItemElementHandler(),\n },\n 'to_database_format': {\n 'block_map': {'ordered-list-item': {'element': 'li', 'wrapper': 'ol'}}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'blockquote', draftail_features.BlockFeature({\n 'type': 'blockquote',\n 'icon': 'openquote',\n 'description': gettext('Blockquote'),\n })\n )\n features.register_converter_rule('contentstate', 'blockquote', {\n 'from_database_format': {\n 'blockquote': BlockElementHandler('blockquote'),\n },\n 'to_database_format': {\n 'block_map': {'blockquote': 'blockquote'}\n }\n })\n\n features.register_editor_plugin(\n 'draftail', 'bold', draftail_features.InlineStyleFeature({\n 'type': 'BOLD',\n 'icon': 'bold',\n 'description': gettext('Bold'),\n })\n )\n features.register_converter_rule('contentstate', 'bold', {\n 'from_database_format': {\n 'b': InlineStyleElementHandler('BOLD'),\n 'strong': InlineStyleElementHandler('BOLD'),\n },\n 'to_database_format': {\n 'style_map': {'BOLD': 'b'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'italic', draftail_features.InlineStyleFeature({\n 'type': 'ITALIC',\n 'icon': 'italic',\n 'description': gettext('Italic'),\n })\n )\n features.register_converter_rule('contentstate', 'italic', {\n 'from_database_format': {\n 'i': InlineStyleElementHandler('ITALIC'),\n 'em': InlineStyleElementHandler('ITALIC'),\n },\n 'to_database_format': {\n 'style_map': {'ITALIC': 'i'}\n }\n })\n\n features.register_editor_plugin(\n 'draftail', 'link', draftail_features.EntityFeature({\n 'type': 'LINK',\n 'icon': 'link',\n 'description': gettext('Link'),\n # We want to enforce constraints on which links can be pasted into rich text.\n # Keep only the attributes Wagtail needs.\n 'attributes': ['url', 'id', 'parentId'],\n 'whitelist': {\n # Keep pasted links with http/https protocol, and not-pasted links (href = undefined).\n 'href': \"^(http:|https:|undefined$)\",\n }\n }, js=[\n 'wagtailadmin/js/page-chooser-modal.js',\n ])\n )\n features.register_converter_rule('contentstate', 'link', {\n 'from_database_format': {\n 'a[href]': ExternalLinkElementHandler('LINK'),\n 'a[linktype=\"page\"]': PageLinkElementHandler('LINK'),\n },\n 'to_database_format': {\n 'entity_decorators': {'LINK': link_entity}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'superscript', draftail_features.InlineStyleFeature({\n 'type': 'SUPERSCRIPT',\n 'icon': 'superscript',\n 'description': gettext('Superscript'),\n })\n )\n features.register_converter_rule('contentstate', 'superscript', {\n 'from_database_format': {\n 'sup': InlineStyleElementHandler('SUPERSCRIPT'),\n },\n 'to_database_format': {\n 'style_map': {'SUPERSCRIPT': 'sup'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'subscript', draftail_features.InlineStyleFeature({\n 'type': 'SUBSCRIPT',\n 'icon': 'subscript',\n 'description': gettext('Subscript'),\n })\n )\n features.register_converter_rule('contentstate', 'subscript', {\n 'from_database_format': {\n 'sub': InlineStyleElementHandler('SUBSCRIPT'),\n },\n 'to_database_format': {\n 'style_map': {'SUBSCRIPT': 'sub'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'strikethrough', draftail_features.InlineStyleFeature({\n 'type': 'STRIKETHROUGH',\n 'icon': 'strikethrough',\n 'description': gettext('Strikethrough'),\n })\n )\n features.register_converter_rule('contentstate', 'strikethrough', {\n 'from_database_format': {\n 's': InlineStyleElementHandler('STRIKETHROUGH'),\n },\n 'to_database_format': {\n 'style_map': {'STRIKETHROUGH': 's'}\n }\n })\n features.register_editor_plugin(\n 'draftail', 'code', draftail_features.InlineStyleFeature({\n 'type': 'CODE',\n 'icon': 'code',\n 'description': gettext('Code'),\n })\n )\n features.register_converter_rule('contentstate', 'code', {\n 'from_database_format': {\n 'code': InlineStyleElementHandler('CODE'),\n },\n 'to_database_format': {\n 'style_map': {'CODE': 'code'}\n }\n })\n\n\nclass ReportsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_submenu_item.html'\n\n\nclass LockedPagesMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).can_remove_locks()\n\n\nclass WorkflowReportMenuItem(MenuItem):\n def is_shown(self, request):\n return True\n\n\nclass SiteHistoryReportMenuItem(MenuItem):\n def is_shown(self, request):\n return UserPagePermissionsProxy(request.user).explorable_pages().exists()\n\n\[email protected]('register_reports_menu_item')\ndef register_locked_pages_menu_item():\n return LockedPagesMenuItem(_('Locked Pages'), reverse('wagtailadmin_reports:locked_pages'), icon_name='lock', order=700)\n\n\[email protected]('register_reports_menu_item')\ndef register_workflow_report_menu_item():\n return WorkflowReportMenuItem(_('Workflows'), reverse('wagtailadmin_reports:workflow'), icon_name='tasks', order=800)\n\n\[email protected]('register_reports_menu_item')\ndef register_workflow_tasks_report_menu_item():\n return WorkflowReportMenuItem(_('Workflow tasks'), reverse('wagtailadmin_reports:workflow_tasks'), icon_name='thumbtack', order=900)\n\n\[email protected]('register_reports_menu_item')\ndef register_site_history_report_menu_item():\n return SiteHistoryReportMenuItem(_('Site history'), reverse('wagtailadmin_reports:site_history'), icon_name='history', order=1000)\n\n\[email protected]('register_admin_menu_item')\ndef register_reports_menu():\n return ReportsMenuItem(\n _('Reports'), reports_menu, classnames='icon icon-site', order=9000)\n\n\[email protected]('register_icons')\ndef register_icons(icons):\n for icon in [\n 'arrow-down-big.svg',\n 'arrow-down.svg',\n 'arrow-left.svg',\n 'arrow-right.svg',\n 'arrow-up-big.svg',\n 'arrow-up.svg',\n 'arrows-up-down.svg',\n 'bin.svg',\n 'bold.svg',\n 'chain-broken.svg',\n 'clipboard-list.svg',\n 'code.svg',\n 'cog.svg',\n 'cogs.svg',\n 'collapse-down.svg',\n 'collapse-up.svg',\n 'cross.svg',\n 'date.svg',\n 'doc-empty-inverse.svg',\n 'doc-empty.svg',\n 'doc-full-inverse.svg',\n 'doc-full.svg', # aka file-text-alt\n 'download-alt.svg',\n 'download.svg',\n 'draft.svg',\n 'duplicate.svg',\n 'edit.svg',\n 'error.svg',\n 'folder-inverse.svg',\n 'folder-open-1.svg',\n 'folder-open-inverse.svg',\n 'folder.svg',\n 'form.svg',\n 'grip.svg',\n 'group.svg',\n 'help.svg',\n 'history.svg',\n 'home.svg',\n 'horizontalrule.svg',\n 'image.svg', # aka picture\n 'italic.svg',\n 'link.svg',\n 'link-external.svg',\n 'list-ol.svg',\n 'list-ul.svg',\n 'lock-open.svg',\n 'lock.svg',\n 'login.svg',\n 'logout.svg',\n 'mail.svg',\n 'media.svg',\n 'no-view.svg',\n 'openquote.svg',\n 'order-down.svg',\n 'order-up.svg',\n 'order.svg',\n 'password.svg',\n 'pick.svg',\n 'pilcrow.svg',\n 'placeholder.svg', # aka marquee\n 'plus-inverse.svg',\n 'plus.svg',\n 'radio-empty.svg',\n 'radio-full.svg',\n 'redirect.svg',\n 'repeat.svg',\n 'reset.svg',\n 'resubmit.svg',\n 'search.svg',\n 'site.svg',\n 'snippet.svg',\n 'spinner.svg',\n 'strikethrough.svg',\n 'success.svg',\n 'subscript.svg',\n 'superscript.svg',\n 'table.svg',\n 'tag.svg',\n 'tasks.svg',\n 'thumbtack.svg',\n 'tick-inverse.svg',\n 'tick.svg',\n 'time.svg',\n 'title.svg',\n 'undo.svg',\n 'uni52.svg', # Is this a redundant icon?\n 'upload.svg',\n 'user.svg',\n 'view.svg',\n 'wagtail-inverse.svg',\n 'wagtail.svg',\n 'warning.svg',\n ]:\n icons.append('wagtailadmin/icons/{}'.format(icon))\n return icons\n\n\[email protected]('construct_homepage_summary_items')\ndef add_pages_summary_item(request, items):\n items.insert(0, PagesSummaryItem(request))\n\n\[email protected]('register_log_actions')\ndef register_core_log_actions(actions):\n actions.register_action('wagtail.create', _('Create'), _('Created'))\n actions.register_action('wagtail.edit', _('Save draft'), _('Draft saved'))\n actions.register_action('wagtail.delete', _('Delete'), _('Deleted'))\n actions.register_action('wagtail.publish', _('Publish'), _('Published'))\n actions.register_action('wagtail.publish.scheduled', _(\"Publish scheduled draft\"), _('Published scheduled draft'))\n actions.register_action('wagtail.unpublish', _('Unpublish'), _('Unpublished'))\n actions.register_action('wagtail.unpublish.scheduled', _('Unpublish scheduled draft'), _('Unpublished scheduled draft'))\n actions.register_action('wagtail.lock', _('Lock'), _('Locked'))\n actions.register_action('wagtail.unlock', _('Unlock'), _('Unlocked'))\n actions.register_action('wagtail.moderation.approve', _('Approve'), _('Approved'))\n actions.register_action('wagtail.moderation.reject', _('Reject'), _('Rejected'))\n\n def revert_message(data):\n try:\n return _('Reverted to previous revision with id %(revision_id)s from %(created_at)s') % {\n 'revision_id': data['revision']['id'],\n 'created_at': data['revision']['created'],\n }\n except KeyError:\n return _('Reverted to previous revision')\n\n def copy_message(data):\n try:\n return _('Copied from %(title)s') % {\n 'title': data['source']['title'],\n }\n except KeyError:\n return _(\"Copied\")\n\n def move_message(data):\n try:\n return _(\"Moved from '%(old_parent)s' to '%(new_parent)s'\") % {\n 'old_parent': data['source']['title'],\n 'new_parent': data['destination']['title'],\n }\n except KeyError:\n return _('Moved')\n\n def schedule_publish_message(data):\n try:\n if data['revision']['has_live_version']:\n return _('Revision %(revision_id)s from %(created_at)s scheduled for publishing at %(go_live_at)s.') % {\n 'revision_id': data['revision']['id'],\n 'created_at': data['revision']['created'],\n 'go_live_at': data['revision']['go_live_at'],\n }\n else:\n return _('Page scheduled for publishing at %(go_live_at)s') % {\n 'go_live_at': data['revision']['go_live_at'],\n }\n except KeyError:\n return _('Page scheduled for publishing')\n\n def unschedule_publish_message(data):\n try:\n if data['revision']['has_live_version']:\n return _('Revision %(revision_id)s from %(created_at)s unscheduled from publishing at %(go_live_at)s.') % {\n 'revision_id': data['revision']['id'],\n 'created_at': data['revision']['created'],\n 'go_live_at': data['revision']['go_live_at'],\n }\n else:\n return _('Page unscheduled for publishing at %(go_live_at)s') % {\n 'go_live_at': data['revision']['go_live_at'],\n }\n except KeyError:\n return _('Page unscheduled from publishing')\n\n def add_view_restriction(data):\n try:\n return _(\"Added the '%(restriction)s' view restriction\") % {\n 'restriction': data['restriction']['title'],\n }\n except KeyError:\n return _('Added view restriction')\n\n def edit_view_restriction(data):\n try:\n return _(\"Updated the view restriction to '%(restriction)s'\") % {\n 'restriction': data['restriction']['title'],\n }\n except KeyError:\n return _('Updated view restriction')\n\n def delete_view_restriction(data):\n try:\n return _(\"Removed the '%(restriction)s' view restriction\") % {\n 'restriction': data['restriction']['title'],\n }\n except KeyError:\n return _('Removed view restriction')\n\n def rename_message(data):\n try:\n return _(\"Renamed from '%(old)s' to '%(new)s'\") % {\n 'old': data['title']['old'],\n 'new': data['title']['new'],\n }\n except KeyError:\n return _('Renamed')\n\n actions.register_action('wagtail.rename', _('Rename'), rename_message)\n actions.register_action('wagtail.revert', _('Revert'), revert_message)\n actions.register_action('wagtail.copy', _('Copy'), copy_message)\n actions.register_action('wagtail.move', _('Move'), move_message)\n actions.register_action('wagtail.publish.schedule', _(\"Schedule publication\"), schedule_publish_message)\n actions.register_action('wagtail.schedule.cancel', _(\"Unschedule publication\"), unschedule_publish_message)\n actions.register_action('wagtail.view_restriction.create', _(\"Add view restrictions\"), add_view_restriction)\n actions.register_action('wagtail.view_restriction.edit', _(\"Update view restrictions\"), edit_view_restriction)\n actions.register_action('wagtail.view_restriction.delete', _(\"Remove view restrictions\"), delete_view_restriction)\n\n\[email protected]('register_log_actions')\ndef register_workflow_log_actions(actions):\n def workflow_start_message(data):\n try:\n return _(\"'%(workflow)s' started. Next step '%(task)s'\") % {\n 'workflow': data['workflow']['title'],\n 'task': data['workflow']['next']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow started')\n\n def workflow_approve_message(data):\n try:\n if data['workflow']['next']:\n return _(\"Approved at '%(task)s'. Next step '%(next_task)s'\") % {\n 'task': data['workflow']['task']['title'],\n 'next_task': data['workflow']['next']['title'],\n }\n else:\n return _(\"Approved at '%(task)s'. '%(workflow)s' complete\") % {\n 'task': data['workflow']['task']['title'],\n 'workflow': data['workflow']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow task approved')\n\n def workflow_reject_message(data):\n try:\n return _(\"Rejected at '%(task)s'. Changes requested\") % {\n 'task': data['workflow']['task']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow task rejected. Workflow complete')\n\n def workflow_resume_message(data):\n try:\n return _(\"Resubmitted '%(task)s'. Workflow resumed'\") % {\n 'task': data['workflow']['task']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow task resubmitted. Workflow resumed')\n\n def workflow_cancel_message(data):\n try:\n return _(\"Cancelled '%(workflow)s' at '%(task)s'\") % {\n 'workflow': data['workflow']['title'],\n 'task': data['workflow']['task']['title'],\n }\n except (KeyError, TypeError):\n return _('Workflow cancelled')\n\n actions.register_action('wagtail.workflow.start', _('Workflow: start'), workflow_start_message)\n actions.register_action('wagtail.workflow.approve', _('Workflow: approve task'), workflow_approve_message)\n actions.register_action('wagtail.workflow.reject', _('Workflow: reject task'), workflow_reject_message)\n actions.register_action('wagtail.workflow.resume', _('Workflow: resume task'), workflow_resume_message)\n actions.register_action('wagtail.workflow.cancel', _('Workflow: cancel'), workflow_cancel_message)\n", "path": "wagtail/admin/wagtail_hooks.py" } ]
diff --git a/client/src/components/Draftail/Draftail.scss b/client/src/components/Draftail/Draftail.scss index 0ce61e81f453..d8c3d6e6650c 100644 --- a/client/src/components/Draftail/Draftail.scss +++ b/client/src/components/Draftail/Draftail.scss @@ -110,6 +110,13 @@ $draftail-editor-font-family: $font-serif; border: 1px solid $color-grey-3; } +.Draftail-ToolbarButton { + &:hover, + &:active { + border: 1px solid $color-grey-3; + } +} + .title .Draftail-Editor .public-DraftEditor-content, .title .Draftail-Editor .public-DraftEditorPlaceholder-root { font-size: 2em; diff --git a/wagtail/admin/templates/wagtailadmin/icons/strikethrough.svg b/wagtail/admin/templates/wagtailadmin/icons/strikethrough.svg new file mode 100755 index 000000000000..515ac248460f --- /dev/null +++ b/wagtail/admin/templates/wagtailadmin/icons/strikethrough.svg @@ -0,0 +1,3 @@ +<symbol id="icon-strikethrough" viewBox="0 0 512 512"> + <path d="M496 224H293.9l-87.17-26.83A43.55 43.55 0 0 1 219.55 112h66.79A49.89 49.89 0 0 1 331 139.58a16 16 0 0 0 21.46 7.15l42.94-21.47a16 16 0 0 0 7.16-21.46l-.53-1A128 128 0 0 0 287.51 32h-68a123.68 123.68 0 0 0-123 135.64c2 20.89 10.1 39.83 21.78 56.36H16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h480a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zm-180.24 96A43 43 0 0 1 336 356.45 43.59 43.59 0 0 1 292.45 400h-66.79A49.89 49.89 0 0 1 181 372.42a16 16 0 0 0-21.46-7.15l-42.94 21.47a16 16 0 0 0-7.16 21.46l.53 1A128 128 0 0 0 224.49 480h68a123.68 123.68 0 0 0 123-135.64 114.25 114.25 0 0 0-5.34-24.36z"></path> +</symbol> diff --git a/wagtail/admin/templates/wagtailadmin/icons/subscript.svg b/wagtail/admin/templates/wagtailadmin/icons/subscript.svg new file mode 100755 index 000000000000..1b42cd01a2e7 --- /dev/null +++ b/wagtail/admin/templates/wagtailadmin/icons/subscript.svg @@ -0,0 +1,3 @@ +<symbol id="icon-subscript" viewBox="0 0 512 512"> + <path d="M496 448h-16V304a16 16 0 0 0-16-16h-48a16 16 0 0 0-14.29 8.83l-16 32A16 16 0 0 0 400 352h16v96h-16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h96a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zM336 64h-67a16 16 0 0 0-13.14 6.87l-79.9 115-79.9-115A16 16 0 0 0 83 64H16A16 16 0 0 0 0 80v48a16 16 0 0 0 16 16h33.48l77.81 112-77.81 112H16a16 16 0 0 0-16 16v48a16 16 0 0 0 16 16h67a16 16 0 0 0 13.14-6.87l79.9-115 79.9 115A16 16 0 0 0 269 448h67a16 16 0 0 0 16-16v-48a16 16 0 0 0-16-16h-33.48l-77.81-112 77.81-112H336a16 16 0 0 0 16-16V80a16 16 0 0 0-16-16z"></path> +</symbol> diff --git a/wagtail/admin/templates/wagtailadmin/icons/superscript.svg b/wagtail/admin/templates/wagtailadmin/icons/superscript.svg new file mode 100755 index 000000000000..61b949d8742f --- /dev/null +++ b/wagtail/admin/templates/wagtailadmin/icons/superscript.svg @@ -0,0 +1,3 @@ +<symbol id="icon-superscript" viewBox="0 0 512 512"> + <path d="M496 160h-16V16a16 16 0 0 0-16-16h-48a16 16 0 0 0-14.29 8.83l-16 32A16 16 0 0 0 400 64h16v96h-16a16 16 0 0 0-16 16v32a16 16 0 0 0 16 16h96a16 16 0 0 0 16-16v-32a16 16 0 0 0-16-16zM336 64h-67a16 16 0 0 0-13.14 6.87l-79.9 115-79.9-115A16 16 0 0 0 83 64H16A16 16 0 0 0 0 80v48a16 16 0 0 0 16 16h33.48l77.81 112-77.81 112H16a16 16 0 0 0-16 16v48a16 16 0 0 0 16 16h67a16 16 0 0 0 13.14-6.87l79.9-115 79.9 115A16 16 0 0 0 269 448h67a16 16 0 0 0 16-16v-48a16 16 0 0 0-16-16h-33.48l-77.81-112 77.81-112H336a16 16 0 0 0 16-16V80a16 16 0 0 0-16-16z"></path> +</symbol> diff --git a/wagtail/admin/wagtail_hooks.py b/wagtail/admin/wagtail_hooks.py index 43fb7ffdce49..76de591fad75 100644 --- a/wagtail/admin/wagtail_hooks.py +++ b/wagtail/admin/wagtail_hooks.py @@ -780,7 +780,10 @@ def register_icons(icons): 'site.svg', 'snippet.svg', 'spinner.svg', + 'strikethrough.svg', 'success.svg', + 'subscript.svg', + 'superscript.svg', 'table.svg', 'tag.svg', 'tasks.svg', diff --git a/wagtail/contrib/styleguide/templates/wagtailstyleguide/base.html b/wagtail/contrib/styleguide/templates/wagtailstyleguide/base.html index ae8a7f0f342a..0a2151d88f7e 100644 --- a/wagtail/contrib/styleguide/templates/wagtailstyleguide/base.html +++ b/wagtail/contrib/styleguide/templates/wagtailstyleguide/base.html @@ -845,6 +845,9 @@ <h2>SVG Icons</h2> <li>{% icon 'list-ul' %} list-ul</li> <li>{% icon 'link' %} link</li> <li>{% icon 'link-external' %} link-external</li> + <li>{% icon 'superscript' %} superscript</li> + <li>{% icon 'subscript' %} subscript</li> + <li>{% icon 'strikethrough' %} strikethrough</li> <li>{% icon 'radio-full' %} radio-full</li> <li>{% icon 'radio-empty' %} radio-empty</li> <li>{% icon 'arrow-up-big' %} arrow-up-big</li>
comic__grand-challenge.org-1062
The schema is empty for unauthorised users. Another problem with this - the schema is empty for unauthorised users. You need to add `public=True` to `get_schema_view`. _Originally posted by @jmsmkn in https://github.com/comic/grand-challenge.org/issues/1017#issuecomment-567254400_
[ { "content": "from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.urls import path\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom grandchallenge.algorithms.views import (\n AlgorithmImageViewSet,\n AlgorithmViewSet,\n JobViewSet,\n ResultViewSet,\n)\nfrom grandchallenge.cases.views import (\n ImageViewSet,\n RawImageUploadSessionViewSet,\n)\nfrom grandchallenge.jqfileupload.views import StagedFileViewSet\nfrom grandchallenge.reader_studies.views import (\n AnswerViewSet,\n QuestionViewSet,\n ReaderStudyViewSet,\n)\nfrom grandchallenge.retina_api.views import LandmarkAnnotationSetViewSet\nfrom grandchallenge.subdomains.utils import reverse_lazy\nfrom grandchallenge.workstation_configs.views import WorkstationConfigViewSet\nfrom grandchallenge.workstations.views import SessionViewSet\n\napp_name = \"api\"\n\nrouter = routers.DefaultRouter()\nrouter.register(\n r\"cases/upload-sessions\",\n RawImageUploadSessionViewSet,\n basename=\"upload-session\",\n)\nrouter.register(r\"cases/images\", ImageViewSet, basename=\"image\")\nrouter.register(r\"workstations/sessions\", SessionViewSet)\nrouter.register(\n r\"workstations/configs\",\n WorkstationConfigViewSet,\n basename=\"workstations-config\",\n)\nrouter.register(r\"algorithms/jobs\", JobViewSet, basename=\"algorithms-job\")\nrouter.register(\n r\"algorithms/results\", ResultViewSet, basename=\"algorithms-result\"\n)\nrouter.register(\n r\"algorithms/images\", AlgorithmImageViewSet, basename=\"algorithms-image\"\n)\nrouter.register(r\"algorithms\", AlgorithmViewSet, basename=\"algorithm\")\n\nrouter.register(\n r\"reader-studies/answers\", AnswerViewSet, basename=\"reader-studies-answer\"\n)\nrouter.register(\n r\"reader-studies/questions\",\n QuestionViewSet,\n basename=\"reader-studies-question\",\n)\nrouter.register(r\"reader-studies\", ReaderStudyViewSet, basename=\"reader-study\")\nrouter.register(r\"chunked-uploads\", StagedFileViewSet, basename=\"staged-file\")\n\nrouter.register(\n r\"retina/landmark-annotation\",\n LandmarkAnnotationSetViewSet,\n basename=\"landmark-annotation\",\n)\n\n# TODO: add terms_of_service and contact\nschema_view = get_schema_view(\n openapi.Info(\n title=f\"{settings.SESSION_COOKIE_DOMAIN.lstrip('.')} API\",\n default_version=\"v1\",\n description=f\"The API for {settings.SESSION_COOKIE_DOMAIN.lstrip('.')}.\",\n license=openapi.License(name=\"Apache License 2.0\"),\n terms_of_service=reverse_lazy(\n \"policies:detail\", kwargs={\"slug\": \"terms-of-service\"}\n ),\n ),\n permission_classes=(permissions.AllowAny,),\n patterns=[path(\"api/v1/\", include(router.urls))],\n)\n\nurlpatterns = [\n url(\n r\"^swagger(?P<format>\\.json|\\.yaml)$\",\n schema_view.without_ui(),\n name=\"schema-json\",\n ),\n # Do not namespace the router.urls without updating the view names in\n # the serializers\n path(\"v1/\", include(router.urls)),\n path(\"auth/\", include(\"rest_framework.urls\", namespace=\"rest_framework\")),\n path(\"\", schema_view.with_ui(\"swagger\"), name=\"schema-docs\"),\n]\n", "path": "app/grandchallenge/api/urls.py" } ]
[ { "content": "from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.urls import path\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom grandchallenge.algorithms.views import (\n AlgorithmImageViewSet,\n AlgorithmViewSet,\n JobViewSet,\n ResultViewSet,\n)\nfrom grandchallenge.cases.views import (\n ImageViewSet,\n RawImageUploadSessionViewSet,\n)\nfrom grandchallenge.jqfileupload.views import StagedFileViewSet\nfrom grandchallenge.reader_studies.views import (\n AnswerViewSet,\n QuestionViewSet,\n ReaderStudyViewSet,\n)\nfrom grandchallenge.retina_api.views import LandmarkAnnotationSetViewSet\nfrom grandchallenge.subdomains.utils import reverse_lazy\nfrom grandchallenge.workstation_configs.views import WorkstationConfigViewSet\nfrom grandchallenge.workstations.views import SessionViewSet\n\napp_name = \"api\"\n\nrouter = routers.DefaultRouter()\nrouter.register(\n r\"cases/upload-sessions\",\n RawImageUploadSessionViewSet,\n basename=\"upload-session\",\n)\nrouter.register(r\"cases/images\", ImageViewSet, basename=\"image\")\nrouter.register(r\"workstations/sessions\", SessionViewSet)\nrouter.register(\n r\"workstations/configs\",\n WorkstationConfigViewSet,\n basename=\"workstations-config\",\n)\nrouter.register(r\"algorithms/jobs\", JobViewSet, basename=\"algorithms-job\")\nrouter.register(\n r\"algorithms/results\", ResultViewSet, basename=\"algorithms-result\"\n)\nrouter.register(\n r\"algorithms/images\", AlgorithmImageViewSet, basename=\"algorithms-image\"\n)\nrouter.register(r\"algorithms\", AlgorithmViewSet, basename=\"algorithm\")\n\nrouter.register(\n r\"reader-studies/answers\", AnswerViewSet, basename=\"reader-studies-answer\"\n)\nrouter.register(\n r\"reader-studies/questions\",\n QuestionViewSet,\n basename=\"reader-studies-question\",\n)\nrouter.register(r\"reader-studies\", ReaderStudyViewSet, basename=\"reader-study\")\nrouter.register(r\"chunked-uploads\", StagedFileViewSet, basename=\"staged-file\")\n\nrouter.register(\n r\"retina/landmark-annotation\",\n LandmarkAnnotationSetViewSet,\n basename=\"landmark-annotation\",\n)\n\n# TODO: add terms_of_service and contact\nschema_view = get_schema_view(\n openapi.Info(\n title=f\"{settings.SESSION_COOKIE_DOMAIN.lstrip('.')} API\",\n default_version=\"v1\",\n description=f\"The API for {settings.SESSION_COOKIE_DOMAIN.lstrip('.')}.\",\n license=openapi.License(name=\"Apache License 2.0\"),\n terms_of_service=reverse_lazy(\n \"policies:detail\", kwargs={\"slug\": \"terms-of-service\"}\n ),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n patterns=[path(\"api/v1/\", include(router.urls))],\n)\n\nurlpatterns = [\n url(\n r\"^swagger(?P<format>\\.json|\\.yaml)$\",\n schema_view.without_ui(),\n name=\"schema-json\",\n ),\n # Do not namespace the router.urls without updating the view names in\n # the serializers\n path(\"v1/\", include(router.urls)),\n path(\"auth/\", include(\"rest_framework.urls\", namespace=\"rest_framework\")),\n path(\"\", schema_view.with_ui(\"swagger\"), name=\"schema-docs\"),\n]\n", "path": "app/grandchallenge/api/urls.py" } ]
diff --git a/app/grandchallenge/api/urls.py b/app/grandchallenge/api/urls.py index df1c4687dd..83ed74ac34 100644 --- a/app/grandchallenge/api/urls.py +++ b/app/grandchallenge/api/urls.py @@ -78,6 +78,7 @@ "policies:detail", kwargs={"slug": "terms-of-service"} ), ), + public=True, permission_classes=(permissions.AllowAny,), patterns=[path("api/v1/", include(router.urls))], ) diff --git a/app/tests/api_tests/test_urls.py b/app/tests/api_tests/test_urls.py index 1e3f5c45d5..6e5b91da23 100644 --- a/app/tests/api_tests/test_urls.py +++ b/app/tests/api_tests/test_urls.py @@ -17,6 +17,10 @@ def test_api_docs_generation( client, schema, schema_format, ): kwargs = dict(format=schema_format) if schema == "schema-json" else None - assert_viewname_status( - code=200, url=reverse(f"api:{schema}", kwargs=kwargs), client=client, + response = assert_viewname_status( + code=200, url=reverse(f"api:{schema}", kwargs=kwargs), client=client ) + if schema_format is not None: + assert len(response.data["paths"]) > 0 + else: + assert len(response.content) > 0
Cog-Creators__Red-DiscordBot-4453
Stop backing up lavalink logs Lavalink logs are host specific, stop backing them up.
[ { "content": "from __future__ import annotations\n\nimport asyncio\nimport collections.abc\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport tarfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import (\n AsyncIterator,\n Awaitable,\n Callable,\n Iterator,\n List,\n Optional,\n Union,\n TYPE_CHECKING,\n Tuple,\n)\n\nimport aiohttp\nimport discord\nimport pkg_resources\nfrom fuzzywuzzy import fuzz, process\nfrom redbot import VersionInfo\n\nfrom redbot.core import data_manager\nfrom redbot.core.utils.chat_formatting import box\n\nif TYPE_CHECKING:\n from redbot.core.bot import Red\n from redbot.core.commands import Command, Context\n\nmain_log = logging.getLogger(\"red\")\n\n__all__ = (\n \"safe_delete\",\n \"fuzzy_command_search\",\n \"format_fuzzy_results\",\n \"create_backup\",\n \"send_to_owners_with_preprocessor\",\n \"send_to_owners_with_prefix_replaced\",\n \"expected_version\",\n \"fetch_latest_red_version_info\",\n)\n\n\ndef safe_delete(pth: Path):\n if pth.exists():\n for root, dirs, files in os.walk(str(pth)):\n os.chmod(root, 0o700)\n\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o700)\n\n for f in files:\n os.chmod(os.path.join(root, f), 0o700)\n\n shutil.rmtree(str(pth), ignore_errors=True)\n\n\ndef _fuzzy_log_filter(record):\n return record.funcName != \"extractWithoutOrder\"\n\n\nlogging.getLogger().addFilter(_fuzzy_log_filter)\n\n\nasync def fuzzy_command_search(\n ctx: Context,\n term: Optional[str] = None,\n *,\n commands: Optional[Union[AsyncIterator[Command], Iterator[Command]]] = None,\n min_score: int = 80,\n) -> Optional[List[Command]]:\n \"\"\"Search for commands which are similar in name to the one invoked.\n\n Returns a maximum of 5 commands which must all be at least matched\n greater than ``min_score``.\n\n Parameters\n ----------\n ctx : `commands.Context <redbot.core.commands.Context>`\n The command invocation context.\n term : Optional[str]\n The name of the invoked command. If ``None``,\n `Context.invoked_with` will be used instead.\n commands : Optional[Union[AsyncIterator[commands.Command], Iterator[commands.Command]]]\n The commands available to choose from when doing a fuzzy match.\n When omitted, `Bot.walk_commands` will be used instead.\n min_score : int\n The minimum score for matched commands to reach. Defaults to 80.\n\n Returns\n -------\n Optional[List[`commands.Command <redbot.core.commands.Command>`]]\n A list of commands which were fuzzily matched with the invoked\n command.\n\n \"\"\"\n if ctx.guild is not None:\n enabled = await ctx.bot._config.guild(ctx.guild).fuzzy()\n else:\n enabled = await ctx.bot._config.fuzzy()\n\n if not enabled:\n return None\n\n if term is None:\n term = ctx.invoked_with\n\n # If the term is an alias or CC, we don't want to send a supplementary fuzzy search.\n alias_cog = ctx.bot.get_cog(\"Alias\")\n if alias_cog is not None:\n alias = await alias_cog._aliases.get_alias(ctx.guild, term)\n\n if alias:\n return None\n customcom_cog = ctx.bot.get_cog(\"CustomCommands\")\n if customcom_cog is not None:\n cmd_obj = customcom_cog.commandobj\n\n try:\n await cmd_obj.get(ctx.message, term)\n except:\n pass\n else:\n return None\n\n if commands is None:\n choices = set(ctx.bot.walk_commands())\n elif isinstance(commands, collections.abc.AsyncIterator):\n choices = {c async for c in commands}\n else:\n choices = set(commands)\n\n # Do the scoring. `extracted` is a list of tuples in the form `(command, score)`\n extracted = process.extract(term, choices, limit=5, scorer=fuzz.QRatio)\n if not extracted:\n return None\n\n # Filter through the fuzzy-matched commands.\n matched_commands = []\n for command, score in extracted:\n if score < min_score:\n # Since the list is in decreasing order of score, we can exit early.\n break\n if await command.can_see(ctx):\n matched_commands.append(command)\n\n return matched_commands\n\n\nasync def format_fuzzy_results(\n ctx: Context, matched_commands: List[Command], *, embed: Optional[bool] = None\n) -> Union[str, discord.Embed]:\n \"\"\"Format the result of a fuzzy command search.\n\n Parameters\n ----------\n ctx : `commands.Context <redbot.core.commands.Context>`\n The context in which this result is being displayed.\n matched_commands : List[`commands.Command <redbot.core.commands.Command>`]\n A list of commands which have been matched by the fuzzy search, sorted\n in order of decreasing similarity.\n embed : bool\n Whether or not the result should be an embed. If set to ``None``, this\n will default to the result of `ctx.embed_requested`.\n\n Returns\n -------\n Union[str, discord.Embed]\n The formatted results.\n\n \"\"\"\n if embed is not False and (embed is True or await ctx.embed_requested()):\n lines = []\n for cmd in matched_commands:\n short_doc = cmd.format_shortdoc_for_context(ctx)\n lines.append(f\"**{ctx.clean_prefix}{cmd.qualified_name}** {short_doc}\")\n return discord.Embed(\n title=\"Perhaps you wanted one of these?\",\n colour=await ctx.embed_colour(),\n description=\"\\n\".join(lines),\n )\n else:\n lines = []\n for cmd in matched_commands:\n short_doc = cmd.format_shortdoc_for_context(ctx)\n lines.append(f\"{ctx.clean_prefix}{cmd.qualified_name} -- {short_doc}\")\n return \"Perhaps you wanted one of these? \" + box(\"\\n\".join(lines), lang=\"vhdl\")\n\n\nasync def create_backup(dest: Path = Path.home()) -> Optional[Path]:\n data_path = Path(data_manager.core_data_path().parent)\n if not data_path.exists():\n return None\n\n dest.mkdir(parents=True, exist_ok=True)\n timestr = datetime.utcnow().strftime(\"%Y-%m-%dT%H-%M-%S\")\n backup_fpath = dest / f\"redv3_{data_manager.instance_name}_{timestr}.tar.gz\"\n\n to_backup = []\n exclusions = [\n \"__pycache__\",\n \"Lavalink.jar\",\n os.path.join(\"Downloader\", \"lib\"),\n os.path.join(\"CogManager\", \"cogs\"),\n os.path.join(\"RepoManager\", \"repos\"),\n ]\n\n # Avoiding circular imports\n from ...cogs.downloader.repo_manager import RepoManager\n\n repo_mgr = RepoManager()\n await repo_mgr.initialize()\n repo_output = []\n for repo in repo_mgr.repos:\n repo_output.append({\"url\": repo.url, \"name\": repo.name, \"branch\": repo.branch})\n repos_file = data_path / \"cogs\" / \"RepoManager\" / \"repos.json\"\n with repos_file.open(\"w\") as fs:\n json.dump(repo_output, fs, indent=4)\n instance_file = data_path / \"instance.json\"\n with instance_file.open(\"w\") as fs:\n json.dump({data_manager.instance_name: data_manager.basic_config}, fs, indent=4)\n for f in data_path.glob(\"**/*\"):\n if not any(ex in str(f) for ex in exclusions) and f.is_file():\n to_backup.append(f)\n\n with tarfile.open(str(backup_fpath), \"w:gz\") as tar:\n for f in to_backup:\n tar.add(str(f), arcname=str(f.relative_to(data_path)), recursive=False)\n return backup_fpath\n\n\n# this might be worth moving to `bot.send_to_owners` at later date\n\n\nasync def send_to_owners_with_preprocessor(\n bot: Red,\n content: str,\n *,\n content_preprocessor: Optional[\n Callable[[Red, discord.abc.Messageable, str], Awaitable[str]]\n ] = None,\n **kwargs,\n):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This acts the same as `Red.send_to_owners`, with\n one added keyword argument as detailed below in *Other Parameters*.\n\n Other Parameters\n ----------------\n content_preprocessor: Optional[Callable[[Red, discord.abc.Messageable, str], Awaitable[str]]]\n Optional async function that takes\n bot object, owner notification destination and message content\n and returns the content that should be sent to given location.\n \"\"\"\n destinations = await bot.get_owner_notification_destinations()\n\n async def wrapped_send(bot, location, content=None, preprocessor=None, **kwargs):\n try:\n if preprocessor is not None:\n content = await preprocessor(bot, location, content)\n await location.send(content, **kwargs)\n except Exception as _exc:\n main_log.error(\n \"I could not send an owner notification to %s (%s)\",\n location,\n location.id,\n exc_info=_exc,\n )\n\n sends = [wrapped_send(bot, d, content, content_preprocessor, **kwargs) for d in destinations]\n await asyncio.gather(*sends)\n\n\nasync def send_to_owners_with_prefix_replaced(bot: Red, content: str, **kwargs):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This acts the same as `Red.send_to_owners`, with one addition - `[p]` in ``content`` argument\n is replaced with a clean prefix for each specific destination.\n \"\"\"\n\n async def preprocessor(bot: Red, destination: discord.abc.Messageable, content: str) -> str:\n prefixes = await bot.get_valid_prefixes(getattr(destination, \"guild\", None))\n prefix = re.sub(\n rf\"<@!?{bot.user.id}>\", f\"@{bot.user.name}\".replace(\"\\\\\", r\"\\\\\"), prefixes[0]\n )\n return content.replace(\"[p]\", prefix)\n\n await send_to_owners_with_preprocessor(bot, content, content_preprocessor=preprocessor)\n\n\ndef expected_version(current: str, expected: str) -> bool:\n # `pkg_resources` needs a regular requirement string, so \"x\" serves as requirement's name here\n return current in pkg_resources.Requirement.parse(f\"x{expected}\")\n\n\nasync def fetch_latest_red_version_info() -> Tuple[Optional[VersionInfo], Optional[str]]:\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://pypi.org/pypi/Red-DiscordBot/json\") as r:\n data = await r.json()\n except (aiohttp.ClientError, asyncio.TimeoutError):\n return None, None\n else:\n release = VersionInfo.from_str(data[\"info\"][\"version\"])\n required_python = data[\"info\"][\"requires_python\"]\n\n return release, required_python\n", "path": "redbot/core/utils/_internal_utils.py" } ]
[ { "content": "from __future__ import annotations\n\nimport asyncio\nimport collections.abc\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport tarfile\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import (\n AsyncIterator,\n Awaitable,\n Callable,\n Iterator,\n List,\n Optional,\n Union,\n TYPE_CHECKING,\n Tuple,\n)\n\nimport aiohttp\nimport discord\nimport pkg_resources\nfrom fuzzywuzzy import fuzz, process\nfrom redbot import VersionInfo\n\nfrom redbot.core import data_manager\nfrom redbot.core.utils.chat_formatting import box\n\nif TYPE_CHECKING:\n from redbot.core.bot import Red\n from redbot.core.commands import Command, Context\n\nmain_log = logging.getLogger(\"red\")\n\n__all__ = (\n \"safe_delete\",\n \"fuzzy_command_search\",\n \"format_fuzzy_results\",\n \"create_backup\",\n \"send_to_owners_with_preprocessor\",\n \"send_to_owners_with_prefix_replaced\",\n \"expected_version\",\n \"fetch_latest_red_version_info\",\n)\n\n\ndef safe_delete(pth: Path):\n if pth.exists():\n for root, dirs, files in os.walk(str(pth)):\n os.chmod(root, 0o700)\n\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o700)\n\n for f in files:\n os.chmod(os.path.join(root, f), 0o700)\n\n shutil.rmtree(str(pth), ignore_errors=True)\n\n\ndef _fuzzy_log_filter(record):\n return record.funcName != \"extractWithoutOrder\"\n\n\nlogging.getLogger().addFilter(_fuzzy_log_filter)\n\n\nasync def fuzzy_command_search(\n ctx: Context,\n term: Optional[str] = None,\n *,\n commands: Optional[Union[AsyncIterator[Command], Iterator[Command]]] = None,\n min_score: int = 80,\n) -> Optional[List[Command]]:\n \"\"\"Search for commands which are similar in name to the one invoked.\n\n Returns a maximum of 5 commands which must all be at least matched\n greater than ``min_score``.\n\n Parameters\n ----------\n ctx : `commands.Context <redbot.core.commands.Context>`\n The command invocation context.\n term : Optional[str]\n The name of the invoked command. If ``None``,\n `Context.invoked_with` will be used instead.\n commands : Optional[Union[AsyncIterator[commands.Command], Iterator[commands.Command]]]\n The commands available to choose from when doing a fuzzy match.\n When omitted, `Bot.walk_commands` will be used instead.\n min_score : int\n The minimum score for matched commands to reach. Defaults to 80.\n\n Returns\n -------\n Optional[List[`commands.Command <redbot.core.commands.Command>`]]\n A list of commands which were fuzzily matched with the invoked\n command.\n\n \"\"\"\n if ctx.guild is not None:\n enabled = await ctx.bot._config.guild(ctx.guild).fuzzy()\n else:\n enabled = await ctx.bot._config.fuzzy()\n\n if not enabled:\n return None\n\n if term is None:\n term = ctx.invoked_with\n\n # If the term is an alias or CC, we don't want to send a supplementary fuzzy search.\n alias_cog = ctx.bot.get_cog(\"Alias\")\n if alias_cog is not None:\n alias = await alias_cog._aliases.get_alias(ctx.guild, term)\n\n if alias:\n return None\n customcom_cog = ctx.bot.get_cog(\"CustomCommands\")\n if customcom_cog is not None:\n cmd_obj = customcom_cog.commandobj\n\n try:\n await cmd_obj.get(ctx.message, term)\n except:\n pass\n else:\n return None\n\n if commands is None:\n choices = set(ctx.bot.walk_commands())\n elif isinstance(commands, collections.abc.AsyncIterator):\n choices = {c async for c in commands}\n else:\n choices = set(commands)\n\n # Do the scoring. `extracted` is a list of tuples in the form `(command, score)`\n extracted = process.extract(term, choices, limit=5, scorer=fuzz.QRatio)\n if not extracted:\n return None\n\n # Filter through the fuzzy-matched commands.\n matched_commands = []\n for command, score in extracted:\n if score < min_score:\n # Since the list is in decreasing order of score, we can exit early.\n break\n if await command.can_see(ctx):\n matched_commands.append(command)\n\n return matched_commands\n\n\nasync def format_fuzzy_results(\n ctx: Context, matched_commands: List[Command], *, embed: Optional[bool] = None\n) -> Union[str, discord.Embed]:\n \"\"\"Format the result of a fuzzy command search.\n\n Parameters\n ----------\n ctx : `commands.Context <redbot.core.commands.Context>`\n The context in which this result is being displayed.\n matched_commands : List[`commands.Command <redbot.core.commands.Command>`]\n A list of commands which have been matched by the fuzzy search, sorted\n in order of decreasing similarity.\n embed : bool\n Whether or not the result should be an embed. If set to ``None``, this\n will default to the result of `ctx.embed_requested`.\n\n Returns\n -------\n Union[str, discord.Embed]\n The formatted results.\n\n \"\"\"\n if embed is not False and (embed is True or await ctx.embed_requested()):\n lines = []\n for cmd in matched_commands:\n short_doc = cmd.format_shortdoc_for_context(ctx)\n lines.append(f\"**{ctx.clean_prefix}{cmd.qualified_name}** {short_doc}\")\n return discord.Embed(\n title=\"Perhaps you wanted one of these?\",\n colour=await ctx.embed_colour(),\n description=\"\\n\".join(lines),\n )\n else:\n lines = []\n for cmd in matched_commands:\n short_doc = cmd.format_shortdoc_for_context(ctx)\n lines.append(f\"{ctx.clean_prefix}{cmd.qualified_name} -- {short_doc}\")\n return \"Perhaps you wanted one of these? \" + box(\"\\n\".join(lines), lang=\"vhdl\")\n\n\nasync def create_backup(dest: Path = Path.home()) -> Optional[Path]:\n data_path = Path(data_manager.core_data_path().parent)\n if not data_path.exists():\n return None\n\n dest.mkdir(parents=True, exist_ok=True)\n timestr = datetime.utcnow().strftime(\"%Y-%m-%dT%H-%M-%S\")\n backup_fpath = dest / f\"redv3_{data_manager.instance_name}_{timestr}.tar.gz\"\n\n to_backup = []\n exclusions = [\n \"__pycache__\",\n \"Lavalink.jar\",\n os.path.join(\"Downloader\", \"lib\"),\n os.path.join(\"CogManager\", \"cogs\"),\n os.path.join(\"RepoManager\", \"repos\"),\n os.path.join(\"Audio\", \"logs\"),\n ]\n\n # Avoiding circular imports\n from ...cogs.downloader.repo_manager import RepoManager\n\n repo_mgr = RepoManager()\n await repo_mgr.initialize()\n repo_output = []\n for repo in repo_mgr.repos:\n repo_output.append({\"url\": repo.url, \"name\": repo.name, \"branch\": repo.branch})\n repos_file = data_path / \"cogs\" / \"RepoManager\" / \"repos.json\"\n with repos_file.open(\"w\") as fs:\n json.dump(repo_output, fs, indent=4)\n instance_file = data_path / \"instance.json\"\n with instance_file.open(\"w\") as fs:\n json.dump({data_manager.instance_name: data_manager.basic_config}, fs, indent=4)\n for f in data_path.glob(\"**/*\"):\n if not any(ex in str(f) for ex in exclusions) and f.is_file():\n to_backup.append(f)\n\n with tarfile.open(str(backup_fpath), \"w:gz\") as tar:\n for f in to_backup:\n tar.add(str(f), arcname=str(f.relative_to(data_path)), recursive=False)\n return backup_fpath\n\n\n# this might be worth moving to `bot.send_to_owners` at later date\n\n\nasync def send_to_owners_with_preprocessor(\n bot: Red,\n content: str,\n *,\n content_preprocessor: Optional[\n Callable[[Red, discord.abc.Messageable, str], Awaitable[str]]\n ] = None,\n **kwargs,\n):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This acts the same as `Red.send_to_owners`, with\n one added keyword argument as detailed below in *Other Parameters*.\n\n Other Parameters\n ----------------\n content_preprocessor: Optional[Callable[[Red, discord.abc.Messageable, str], Awaitable[str]]]\n Optional async function that takes\n bot object, owner notification destination and message content\n and returns the content that should be sent to given location.\n \"\"\"\n destinations = await bot.get_owner_notification_destinations()\n\n async def wrapped_send(bot, location, content=None, preprocessor=None, **kwargs):\n try:\n if preprocessor is not None:\n content = await preprocessor(bot, location, content)\n await location.send(content, **kwargs)\n except Exception as _exc:\n main_log.error(\n \"I could not send an owner notification to %s (%s)\",\n location,\n location.id,\n exc_info=_exc,\n )\n\n sends = [wrapped_send(bot, d, content, content_preprocessor, **kwargs) for d in destinations]\n await asyncio.gather(*sends)\n\n\nasync def send_to_owners_with_prefix_replaced(bot: Red, content: str, **kwargs):\n \"\"\"\n This sends something to all owners and their configured extra destinations.\n\n This acts the same as `Red.send_to_owners`, with one addition - `[p]` in ``content`` argument\n is replaced with a clean prefix for each specific destination.\n \"\"\"\n\n async def preprocessor(bot: Red, destination: discord.abc.Messageable, content: str) -> str:\n prefixes = await bot.get_valid_prefixes(getattr(destination, \"guild\", None))\n prefix = re.sub(\n rf\"<@!?{bot.user.id}>\", f\"@{bot.user.name}\".replace(\"\\\\\", r\"\\\\\"), prefixes[0]\n )\n return content.replace(\"[p]\", prefix)\n\n await send_to_owners_with_preprocessor(bot, content, content_preprocessor=preprocessor)\n\n\ndef expected_version(current: str, expected: str) -> bool:\n # `pkg_resources` needs a regular requirement string, so \"x\" serves as requirement's name here\n return current in pkg_resources.Requirement.parse(f\"x{expected}\")\n\n\nasync def fetch_latest_red_version_info() -> Tuple[Optional[VersionInfo], Optional[str]]:\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://pypi.org/pypi/Red-DiscordBot/json\") as r:\n data = await r.json()\n except (aiohttp.ClientError, asyncio.TimeoutError):\n return None, None\n else:\n release = VersionInfo.from_str(data[\"info\"][\"version\"])\n required_python = data[\"info\"][\"requires_python\"]\n\n return release, required_python\n", "path": "redbot/core/utils/_internal_utils.py" } ]
diff --git a/redbot/core/utils/_internal_utils.py b/redbot/core/utils/_internal_utils.py index e5ffb6ebb52..c91149f0f12 100644 --- a/redbot/core/utils/_internal_utils.py +++ b/redbot/core/utils/_internal_utils.py @@ -211,6 +211,7 @@ async def create_backup(dest: Path = Path.home()) -> Optional[Path]: os.path.join("Downloader", "lib"), os.path.join("CogManager", "cogs"), os.path.join("RepoManager", "repos"), + os.path.join("Audio", "logs"), ] # Avoiding circular imports
plotly__dash-565
New version of dash_renderer is not automatically installed with Dash 0.36.0 Deploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more.
[ { "content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py" } ]
[ { "content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer==0.17.0',\n 'dash-core-components==0.43.0',\n 'dash-html-components==0.13.5',\n 'dash-table==3.3.0'\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py" } ]
diff --git a/.circleci/requirements/dev-requirements-py37.txt b/.circleci/requirements/dev-requirements-py37.txt index f23f28d4a7..45d9a67f42 100644 --- a/.circleci/requirements/dev-requirements-py37.txt +++ b/.circleci/requirements/dev-requirements-py37.txt @@ -1,8 +1,8 @@ -dash_core_components>=0.40.2 -dash_html_components==0.12.0rc3 +dash_core_components>=0.43.0 +dash_html_components==0.13.4 dash-flow-example==0.0.3 dash-dangerously-set-inner-html -git+git://github.com/plotly/dash-renderer@master#egg=dash_renderer +dash_renderer==0.17.0 percy selenium mock diff --git a/.circleci/requirements/dev-requirements.txt b/.circleci/requirements/dev-requirements.txt index dce645d828..01fef6eca0 100644 --- a/.circleci/requirements/dev-requirements.txt +++ b/.circleci/requirements/dev-requirements.txt @@ -1,8 +1,8 @@ -dash_core_components>=0.40.2 -dash_html_components>=0.12.0rc3 +dash_core_components>=0.43.0 +dash_html_components==0.13.4 dash_flow_example==0.0.3 dash-dangerously-set-inner-html -git+git://github.com/plotly/dash-renderer@master#egg=dash_renderer +dash_renderer==0.17.0 percy selenium mock diff --git a/CHANGELOG.md b/CHANGELOG.md index 015999416a..f9e7c9a630 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## Fixed - Fixed collections.abc deprecation warning for python 3.8 [#563](https://github.com/plotly/dash/pull/563) +## Changed +- Added core libraries as version locked dependencies [#565](https://github.com/plotly/dash/pull/565) + ## [0.36.0] - 2019-01-25 ## Removed - Removed support for `Event` system. Use event properties instead, for example the `n_clicks` property instead of the `click` event, see [#531](https://github.com/plotly/dash/issues/531) for details. `dash_renderer` MUST be upgraded to >=0.17.0 together with this, and it is recommended to update `dash_core_components` to >=0.43.0 and `dash_html_components` to >=0.14.0. [#550](https://github.com/plotly/dash/pull/550) diff --git a/setup.py b/setup.py index 5c27cc94ee..daf3906671 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,10 @@ 'Flask>=0.12', 'flask-compress', 'plotly', - 'dash_renderer', + 'dash_renderer==0.17.0', + 'dash-core-components==0.43.0', + 'dash-html-components==0.13.5', + 'dash-table==3.3.0' ], entry_points={ 'console_scripts': [
lutris__lutris-3306
lutris lutris:rungameid/ does not work **Describe the bug** Using desktop shortcut does work, lutris not starting the game. **Steps to reproduce** - Install kubuntu 20.10 + lutris 0.5.8. - Install game, create a desktop shortcut from lutris. - Try to launch game using desktop shortcut **Lutris debugging output (Optional)** All log: ``` ~ ❯ lutris -d lutris:rungameid/1219 INFO 2020-11-15 15:53:55,474 [application.do_command_line:319]:Lutris 0.5.8 INFO 2020-11-15 15:53:55,474 [startup.check_driver:69]:Running X.Org Mesa driver 20.2.2 on Radeon RX 560 Series (POLARIS11, DRM 3.39.0, 5.9.8-xanmod1, LLVM 11.0.0) (0x67ff) INFO 2020-11-15 15:53:55,475 [startup.check_driver:81]:GPU: 1002:67FF 1043:04BC (amdgpu drivers) ``` **System information (Optional)** [lutris-issue-report.zip](https://github.com/lutris/lutris/files/5542468/lutris-issue-report.zip) **Screenshots (Optional)** If your issue is hard to explain with words, feel free to attach screenshots.
[ { "content": "# pylint: disable=no-member,wrong-import-position\n#\n# Copyright (C) 2020 Mathieu Comandon <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport json\nimport logging\nimport os\nimport signal\nimport sys\nimport tempfile\nfrom gettext import gettext as _\n\nimport gi\ngi.require_version(\"Gdk\", \"3.0\")\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version(\"GnomeDesktop\", \"3.0\")\n\nfrom gi.repository import Gio, GLib, Gtk, GObject\n\nfrom lutris import settings\nfrom lutris.api import parse_installer_url\nfrom lutris.command import exec_command\nfrom lutris.database import games as games_db\nfrom lutris.game import Game\nfrom lutris.installer import get_installers\nfrom lutris.gui.dialogs import ErrorDialog, InstallOrPlayDialog\nfrom lutris.gui.dialogs.issue import IssueReportWindow\nfrom lutris.gui.installerwindow import InstallerWindow\nfrom lutris.gui.widgets.status_icon import LutrisStatusIcon\nfrom lutris.migrations import migrate\nfrom lutris.startup import init_lutris, run_all_checks\nfrom lutris.util import datapath, log\nfrom lutris.util.http import HTTPError, Request\nfrom lutris.util.jobs import AsyncCall\nfrom lutris.util.log import logger\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.util.wine.dxvk import init_dxvk_versions\nfrom lutris.services import get_services\nfrom lutris.database.services import ServiceGameCollection\n\nfrom .lutriswindow import LutrisWindow\n\n\nclass Application(Gtk.Application):\n\n def __init__(self):\n super().__init__(\n application_id=\"net.lutris.Lutris\",\n flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,\n )\n\n GObject.add_emission_hook(Game, \"game-launch\", self.on_game_launch)\n GObject.add_emission_hook(Game, \"game-start\", self.on_game_start)\n GObject.add_emission_hook(Game, \"game-stop\", self.on_game_stop)\n GObject.add_emission_hook(Game, \"game-install\", self.on_game_install)\n\n GLib.set_application_name(_(\"Lutris\"))\n self.window = None\n\n try:\n init_lutris()\n except RuntimeError as ex:\n ErrorDialog(str(ex))\n return\n\n self.running_games = Gio.ListStore.new(Game)\n self.app_windows = {}\n self.tray = None\n self.css_provider = Gtk.CssProvider.new()\n self.run_in_background = False\n\n if os.geteuid() == 0:\n ErrorDialog(_(\"Running Lutris as root is not recommended and may cause unexpected issues\"))\n\n try:\n self.css_provider.load_from_path(os.path.join(datapath.get(), \"ui\", \"lutris.css\"))\n except GLib.Error as e:\n logger.exception(e)\n\n if hasattr(self, \"add_main_option\"):\n self.add_arguments()\n else:\n ErrorDialog(_(\"Your Linux distribution is too old. Lutris won't function properly.\"))\n\n def add_arguments(self):\n if hasattr(self, \"set_option_context_summary\"):\n self.set_option_context_summary(_(\n \"Run a game directly by adding the parameter lutris:rungame/game-identifier.\\n\"\n \"If several games share the same identifier you can use the numerical ID \"\n \"(displayed when running lutris --list-games) and add \"\n \"lutris:rungameid/numerical-id.\\n\"\n \"To install a game, add lutris:install/game-identifier.\"\n ))\n else:\n logger.warning(\"GLib.set_option_context_summary missing, \" \"was added in GLib 2.56 (Released 2018-03-12)\")\n self.add_main_option(\n \"version\",\n ord(\"v\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Print the version of Lutris and exit\"),\n None,\n )\n self.add_main_option(\n \"debug\",\n ord(\"d\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Show debug messages\"),\n None,\n )\n self.add_main_option(\n \"install\",\n ord(\"i\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING,\n _(\"Install a game from a yml file\"),\n None,\n )\n self.add_main_option(\n \"output-script\",\n ord(\"b\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING,\n _(\"Generate a bash script to run a game without the client\"),\n None,\n )\n self.add_main_option(\n \"exec\",\n ord(\"e\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING,\n _(\"Execute a program with the lutris runtime\"),\n None,\n )\n self.add_main_option(\n \"list-games\",\n ord(\"l\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"List all games in database\"),\n None,\n )\n self.add_main_option(\n \"installed\",\n ord(\"o\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Only list installed games\"),\n None,\n )\n self.add_main_option(\n \"list-steam-games\",\n ord(\"s\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"List available Steam games\"),\n None,\n )\n self.add_main_option(\n \"list-steam-folders\",\n 0,\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"List all known Steam library folders\"),\n None,\n )\n self.add_main_option(\n \"json\",\n ord(\"j\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Display the list of games in JSON format\"),\n None,\n )\n self.add_main_option(\n \"reinstall\",\n 0,\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Reinstall game\"),\n None,\n )\n self.add_main_option(\"submit-issue\", 0, GLib.OptionFlags.NONE, GLib.OptionArg.NONE, _(\"Submit an issue\"), None)\n self.add_main_option(\n GLib.OPTION_REMAINING,\n 0,\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING_ARRAY,\n _(\"uri to open\"),\n \"URI\",\n )\n\n def do_startup(self): # pylint: disable=arguments-differ\n Gtk.Application.do_startup(self)\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n action = Gio.SimpleAction.new(\"quit\")\n action.connect(\"activate\", lambda *x: self.quit())\n self.add_action(action)\n self.add_accelerator(\"<Primary>q\", \"app.quit\")\n\n def do_activate(self): # pylint: disable=arguments-differ\n if not self.window:\n self.window = LutrisWindow(application=self)\n screen = self.window.props.screen\n Gtk.StyleContext.add_provider_for_screen(screen, self.css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n if not self.run_in_background:\n self.window.present()\n else:\n # Reset run in background to False. Future calls will set it\n # accordingly\n self.run_in_background = False\n\n def show_window(self, window_class, **kwargs):\n \"\"\"Instanciate a window keeping 1 instance max\n\n Params:\n window_class (Gtk.Window): class to create the instance from\n kwargs (dict): Additional arguments to pass to the instanciated window\n\n Returns:\n Gtk.Window: the existing window instance or a newly created one\n \"\"\"\n window_key = str(window_class) + str(kwargs)\n if self.app_windows.get(window_key):\n self.app_windows[window_key].present()\n return self.app_windows[window_key]\n if issubclass(window_class, Gtk.Dialog):\n window_inst = window_class(parent=self.window, **kwargs)\n else:\n window_inst = window_class(application=self, **kwargs)\n window_inst.connect(\"destroy\", self.on_app_window_destroyed, str(kwargs))\n self.app_windows[window_key] = window_inst\n return window_inst\n\n def show_installer_window(self, installers, service=None, appid=None):\n self.show_window(\n InstallerWindow,\n installers=installers,\n service=service,\n appid=appid\n )\n\n def on_app_window_destroyed(self, app_window, kwargs_str):\n \"\"\"Remove the reference to the window when it has been destroyed\"\"\"\n window_key = str(app_window.__class__) + kwargs_str\n try:\n del self.app_windows[window_key]\n except KeyError:\n pass\n return True\n\n @staticmethod\n def _print(command_line, string):\n # Workaround broken pygobject bindings\n command_line.do_print_literal(command_line, string + \"\\n\")\n\n def generate_script(self, db_game, script_path):\n \"\"\"Output a script to a file.\n The script is capable of launching a game without the client\n \"\"\"\n game = Game(db_game[\"id\"])\n game.load_config()\n game.write_script(script_path)\n\n def do_command_line(self, command_line): # noqa: C901 # pylint: disable=arguments-differ\n # pylint: disable=too-many-locals,too-many-return-statements,too-many-branches\n # pylint: disable=too-many-statements\n # TODO: split into multiple methods to reduce complexity (35)\n options = command_line.get_options_dict()\n\n # Use stdout to output logs, only if no command line argument is\n # provided\n argc = len(sys.argv) - 1\n if \"-d\" in sys.argv or \"--debug\" in sys.argv:\n argc -= 1\n if not argc:\n # Switch back the log output to stderr (the default in Python)\n # to avoid messing with any output from command line options.\n\n # Use when targetting Python 3.7 minimum\n # console_handler.setStream(sys.stderr)\n\n # Until then...\n logger.removeHandler(log.console_handler)\n log.console_handler = logging.StreamHandler(stream=sys.stdout)\n log.console_handler.setFormatter(log.SIMPLE_FORMATTER)\n logger.addHandler(log.console_handler)\n\n # Set up logger\n if options.contains(\"debug\"):\n log.console_handler.setFormatter(log.DEBUG_FORMATTER)\n logger.setLevel(logging.DEBUG)\n\n # Text only commands\n\n # Print Lutris version and exit\n if options.contains(\"version\"):\n executable_name = os.path.basename(sys.argv[0])\n print(executable_name + \"-\" + settings.VERSION)\n logger.setLevel(logging.NOTSET)\n return 0\n\n logger.info(\"Lutris %s\", settings.VERSION)\n migrate()\n run_all_checks()\n AsyncCall(init_dxvk_versions, None)\n\n # List game\n if options.contains(\"list-games\"):\n game_list = games_db.get_games()\n if options.contains(\"installed\"):\n game_list = [game for game in game_list if game[\"installed\"]]\n if options.contains(\"json\"):\n self.print_game_json(command_line, game_list)\n else:\n self.print_game_list(command_line, game_list)\n return 0\n # List Steam games\n if options.contains(\"list-steam-games\"):\n self.print_steam_list(command_line)\n return 0\n # List Steam folders\n if options.contains(\"list-steam-folders\"):\n self.print_steam_folders(command_line)\n return 0\n\n # Execute command in Lutris context\n if options.contains(\"exec\"):\n command = options.lookup_value(\"exec\").get_string()\n self.execute_command(command)\n return 0\n\n if options.contains(\"submit-issue\"):\n IssueReportWindow(application=self)\n return 0\n\n try:\n url = options.lookup_value(GLib.OPTION_REMAINING)\n installer_info = self.get_lutris_action(url)\n except ValueError:\n self._print(command_line, _(\"%s is not a valid URI\") % url.get_strv())\n return 1\n\n game_slug = installer_info[\"game_slug\"]\n action = installer_info[\"action\"]\n\n if options.contains(\"output-script\"):\n action = \"write-script\"\n\n revision = installer_info[\"revision\"]\n\n installer_file = None\n if options.contains(\"install\"):\n installer_file = options.lookup_value(\"install\").get_string()\n if installer_file.startswith((\"http:\", \"https:\")):\n try:\n request = Request(installer_file).get()\n except HTTPError:\n self._print(command_line, _(\"Failed to download %s\") % installer_file)\n return 1\n try:\n headers = dict(request.response_headers)\n file_name = headers[\"Content-Disposition\"].split(\"=\", 1)[-1]\n except (KeyError, IndexError):\n file_name = os.path.basename(installer_file)\n file_path = os.path.join(tempfile.gettempdir(), file_name)\n self._print(command_line, _(\"download {url} to {file} started\").format(\n url=installer_file, file=file_path))\n with open(file_path, 'wb') as dest_file:\n dest_file.write(request.content)\n installer_file = file_path\n action = \"install\"\n else:\n installer_file = os.path.abspath(installer_file)\n action = \"install\"\n\n if not os.path.isfile(installer_file):\n self._print(command_line, _(\"No such file: %s\") % installer_file)\n return 1\n\n db_game = None\n if game_slug:\n if action == \"rungameid\":\n # Force db_game to use game id\n self.run_in_background = True\n db_game = games_db.get_game_by_field(game_slug, \"id\")\n elif action == \"rungame\":\n # Force db_game to use game slug\n self.run_in_background = True\n db_game = games_db.get_game_by_field(game_slug, \"slug\")\n elif action == \"install\":\n # Installers can use game or installer slugs\n self.run_in_background = True\n db_game = games_db.get_game_by_field(game_slug, \"slug\") \\\n or games_db.get_game_by_field(game_slug, \"installer_slug\")\n else:\n # Dazed and confused, try anything that might works\n db_game = (\n games_db.get_game_by_field(game_slug, \"id\")\n or games_db.get_game_by_field(game_slug, \"slug\")\n or games_db.get_game_by_field(game_slug, \"installer_slug\")\n )\n\n # If reinstall flag is passed, force the action to install\n if options.contains(\"reinstall\"):\n action = \"install\"\n\n if action == \"write-script\":\n if not db_game or not db_game[\"id\"]:\n logger.warning(\"No game provided to generate the script\")\n return 1\n self.generate_script(db_game, options.lookup_value(\"output-script\").get_string())\n return 0\n\n # Graphical commands\n self.activate()\n self.set_tray_icon()\n\n if not action:\n if db_game and db_game[\"installed\"]:\n # Game found but no action provided, ask what to do\n dlg = InstallOrPlayDialog(db_game[\"name\"])\n if not dlg.action_confirmed:\n action = None\n elif dlg.action == \"play\":\n action = \"rungame\"\n elif dlg.action == \"install\":\n action = \"install\"\n elif game_slug or installer_file:\n # No game found, default to install if a game_slug or\n # installer_file is provided\n action = \"install\"\n if action == \"install\":\n installers = get_installers(\n game_slug=game_slug,\n installer_file=installer_file,\n revision=revision,\n )\n if installers:\n self.show_installer_window(installers)\n\n elif action in (\"rungame\", \"rungameid\"):\n if not db_game or not db_game[\"id\"]:\n logger.warning(\"No game found in library\")\n if not self.window.is_visible():\n self.do_shutdown()\n return 0\n game = Game(db_game[\"id\"])\n self.on_game_start(game)\n return 0\n\n def on_game_launch(self, game):\n game.launch()\n return True # Return True to continue handling the emission hook\n\n def on_game_start(self, game):\n self.running_games.append(game)\n if settings.read_setting(\"hide_client_on_game_start\") == \"True\":\n self.window.hide() # Hide launcher window\n return True\n\n def on_game_install(self, game):\n \"\"\"Request installation of a game\"\"\"\n if game.service:\n service = get_services()[game.service]()\n db_game = ServiceGameCollection.get_game(service.id, game.appid)\n service.install(db_game)\n return True\n\n installers = get_installers(game_slug=game.slug)\n if installers:\n self.show_installer_window(installers)\n else:\n logger.debug(\"Should generate automagical installer here but....\")\n logger.debug(\"Wait? how did you get here?\")\n return True\n\n def get_running_game_ids(self):\n ids = []\n for i in range(self.running_games.get_n_items()):\n game = self.running_games.get_item(i)\n ids.append(str(game.id))\n return ids\n\n def get_game_by_id(self, game_id):\n for i in range(self.running_games.get_n_items()):\n game = self.running_games.get_item(i)\n if str(game.id) == str(game_id):\n return game\n return None\n\n def on_game_stop(self, game):\n \"\"\"Callback to remove the game from the running games\"\"\"\n ids = self.get_running_game_ids()\n if str(game.id) in ids:\n try:\n self.running_games.remove(ids.index(str(game.id)))\n except ValueError:\n pass\n else:\n logger.warning(\"%s not in %s\", game.id, ids)\n\n game.emit(\"game-stopped\")\n if settings.read_setting(\"hide_client_on_game_start\") == \"True\":\n self.window.show() # Show launcher window\n elif not self.window.is_visible():\n if self.running_games.get_n_items() == 0:\n self.quit()\n return True\n\n @staticmethod\n def get_lutris_action(url):\n installer_info = {\"game_slug\": None, \"revision\": None, \"action\": None}\n\n if url:\n url = url.get_strv()\n\n if url:\n url = url[0]\n installer_info = parse_installer_url(url)\n if installer_info is False:\n raise ValueError\n return installer_info\n\n def print_game_list(self, command_line, game_list):\n for game in game_list:\n self._print(\n command_line,\n \"{:4} | {:<40} | {:<40} | {:<15} | {:<64}\".format(\n game[\"id\"],\n game[\"name\"][:40],\n game[\"slug\"][:40],\n game[\"runner\"] or \"-\",\n game[\"directory\"] or \"-\",\n ),\n )\n\n def print_game_json(self, command_line, game_list):\n games = [\n {\n \"id\": game[\"id\"],\n \"slug\": game[\"slug\"],\n \"name\": game[\"name\"],\n \"runner\": game[\"runner\"],\n \"platform\": game[\"platform\"],\n \"directory\": game[\"directory\"],\n } for game in game_list\n ]\n self._print(command_line, json.dumps(games, indent=2))\n\n def print_steam_list(self, command_line):\n steamapps_paths = get_steamapps_paths()\n for platform in (\"linux\", \"windows\"):\n for path in steamapps_paths[platform]:\n appmanifest_files = get_appmanifests(path)\n for appmanifest_file in appmanifest_files:\n appmanifest = AppManifest(os.path.join(path, appmanifest_file))\n self._print(\n command_line,\n \" {:8} | {:<60} | {:10} | {}\".format(\n appmanifest.steamid,\n appmanifest.name or \"-\",\n platform,\n \", \".join(appmanifest.states),\n ),\n )\n\n @staticmethod\n def execute_command(command):\n \"\"\"Execute an arbitrary command in a Lutris context\n with the runtime enabled and monitored by a MonitoredCommand\n \"\"\"\n logger.info(\"Running command '%s'\", command)\n monitored_command = exec_command(command)\n try:\n GLib.MainLoop().run()\n except KeyboardInterrupt:\n monitored_command.stop()\n\n def print_steam_folders(self, command_line):\n steamapps_paths = get_steamapps_paths()\n for platform in (\"linux\", \"windows\"):\n for path in steamapps_paths[platform]:\n self._print(command_line, path)\n\n def do_shutdown(self): # pylint: disable=arguments-differ\n logger.info(\"Shutting down Lutris\")\n if self.window:\n settings.write_setting(\"selected_category\", self.window.selected_category)\n self.window.destroy()\n Gtk.Application.do_shutdown(self)\n\n def set_tray_icon(self):\n \"\"\"Creates or destroys a tray icon for the application\"\"\"\n active = settings.read_setting(\"show_tray_icon\", default=\"false\").lower() == \"true\"\n if active and not self.tray:\n self.tray = LutrisStatusIcon(application=self)\n if self.tray:\n self.tray.set_visible(active)\n", "path": "lutris/gui/application.py" } ]
[ { "content": "# pylint: disable=no-member,wrong-import-position\n#\n# Copyright (C) 2020 Mathieu Comandon <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport json\nimport logging\nimport os\nimport signal\nimport sys\nimport tempfile\nfrom gettext import gettext as _\n\nimport gi\ngi.require_version(\"Gdk\", \"3.0\")\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version(\"GnomeDesktop\", \"3.0\")\n\nfrom gi.repository import Gio, GLib, Gtk, GObject\n\nfrom lutris import settings\nfrom lutris.api import parse_installer_url\nfrom lutris.command import exec_command\nfrom lutris.database import games as games_db\nfrom lutris.game import Game\nfrom lutris.installer import get_installers\nfrom lutris.gui.dialogs import ErrorDialog, InstallOrPlayDialog\nfrom lutris.gui.dialogs.issue import IssueReportWindow\nfrom lutris.gui.installerwindow import InstallerWindow\nfrom lutris.gui.widgets.status_icon import LutrisStatusIcon\nfrom lutris.migrations import migrate\nfrom lutris.startup import init_lutris, run_all_checks\nfrom lutris.util import datapath, log\nfrom lutris.util.http import HTTPError, Request\nfrom lutris.util.jobs import AsyncCall\nfrom lutris.util.log import logger\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.util.wine.dxvk import init_dxvk_versions\nfrom lutris.services import get_services\nfrom lutris.database.services import ServiceGameCollection\n\nfrom .lutriswindow import LutrisWindow\n\n\nclass Application(Gtk.Application):\n\n def __init__(self):\n super().__init__(\n application_id=\"net.lutris.Lutris\",\n flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,\n )\n\n GObject.add_emission_hook(Game, \"game-launch\", self.on_game_launch)\n GObject.add_emission_hook(Game, \"game-start\", self.on_game_start)\n GObject.add_emission_hook(Game, \"game-stop\", self.on_game_stop)\n GObject.add_emission_hook(Game, \"game-install\", self.on_game_install)\n\n GLib.set_application_name(_(\"Lutris\"))\n self.window = None\n\n try:\n init_lutris()\n except RuntimeError as ex:\n ErrorDialog(str(ex))\n return\n\n self.running_games = Gio.ListStore.new(Game)\n self.app_windows = {}\n self.tray = None\n self.css_provider = Gtk.CssProvider.new()\n self.run_in_background = False\n\n if os.geteuid() == 0:\n ErrorDialog(_(\"Running Lutris as root is not recommended and may cause unexpected issues\"))\n\n try:\n self.css_provider.load_from_path(os.path.join(datapath.get(), \"ui\", \"lutris.css\"))\n except GLib.Error as e:\n logger.exception(e)\n\n if hasattr(self, \"add_main_option\"):\n self.add_arguments()\n else:\n ErrorDialog(_(\"Your Linux distribution is too old. Lutris won't function properly.\"))\n\n def add_arguments(self):\n if hasattr(self, \"set_option_context_summary\"):\n self.set_option_context_summary(_(\n \"Run a game directly by adding the parameter lutris:rungame/game-identifier.\\n\"\n \"If several games share the same identifier you can use the numerical ID \"\n \"(displayed when running lutris --list-games) and add \"\n \"lutris:rungameid/numerical-id.\\n\"\n \"To install a game, add lutris:install/game-identifier.\"\n ))\n else:\n logger.warning(\"GLib.set_option_context_summary missing, \" \"was added in GLib 2.56 (Released 2018-03-12)\")\n self.add_main_option(\n \"version\",\n ord(\"v\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Print the version of Lutris and exit\"),\n None,\n )\n self.add_main_option(\n \"debug\",\n ord(\"d\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Show debug messages\"),\n None,\n )\n self.add_main_option(\n \"install\",\n ord(\"i\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING,\n _(\"Install a game from a yml file\"),\n None,\n )\n self.add_main_option(\n \"output-script\",\n ord(\"b\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING,\n _(\"Generate a bash script to run a game without the client\"),\n None,\n )\n self.add_main_option(\n \"exec\",\n ord(\"e\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING,\n _(\"Execute a program with the lutris runtime\"),\n None,\n )\n self.add_main_option(\n \"list-games\",\n ord(\"l\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"List all games in database\"),\n None,\n )\n self.add_main_option(\n \"installed\",\n ord(\"o\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Only list installed games\"),\n None,\n )\n self.add_main_option(\n \"list-steam-games\",\n ord(\"s\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"List available Steam games\"),\n None,\n )\n self.add_main_option(\n \"list-steam-folders\",\n 0,\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"List all known Steam library folders\"),\n None,\n )\n self.add_main_option(\n \"json\",\n ord(\"j\"),\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Display the list of games in JSON format\"),\n None,\n )\n self.add_main_option(\n \"reinstall\",\n 0,\n GLib.OptionFlags.NONE,\n GLib.OptionArg.NONE,\n _(\"Reinstall game\"),\n None,\n )\n self.add_main_option(\"submit-issue\", 0, GLib.OptionFlags.NONE, GLib.OptionArg.NONE, _(\"Submit an issue\"), None)\n self.add_main_option(\n GLib.OPTION_REMAINING,\n 0,\n GLib.OptionFlags.NONE,\n GLib.OptionArg.STRING_ARRAY,\n _(\"uri to open\"),\n \"URI\",\n )\n\n def do_startup(self): # pylint: disable=arguments-differ\n Gtk.Application.do_startup(self)\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n action = Gio.SimpleAction.new(\"quit\")\n action.connect(\"activate\", lambda *x: self.quit())\n self.add_action(action)\n self.add_accelerator(\"<Primary>q\", \"app.quit\")\n\n def do_activate(self): # pylint: disable=arguments-differ\n if not self.window:\n self.window = LutrisWindow(application=self)\n screen = self.window.props.screen\n Gtk.StyleContext.add_provider_for_screen(screen, self.css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n if not self.run_in_background:\n self.window.present()\n else:\n # Reset run in background to False. Future calls will set it\n # accordingly\n self.run_in_background = False\n\n def show_window(self, window_class, **kwargs):\n \"\"\"Instanciate a window keeping 1 instance max\n\n Params:\n window_class (Gtk.Window): class to create the instance from\n kwargs (dict): Additional arguments to pass to the instanciated window\n\n Returns:\n Gtk.Window: the existing window instance or a newly created one\n \"\"\"\n window_key = str(window_class) + str(kwargs)\n if self.app_windows.get(window_key):\n self.app_windows[window_key].present()\n return self.app_windows[window_key]\n if issubclass(window_class, Gtk.Dialog):\n window_inst = window_class(parent=self.window, **kwargs)\n else:\n window_inst = window_class(application=self, **kwargs)\n window_inst.connect(\"destroy\", self.on_app_window_destroyed, str(kwargs))\n self.app_windows[window_key] = window_inst\n return window_inst\n\n def show_installer_window(self, installers, service=None, appid=None):\n self.show_window(\n InstallerWindow,\n installers=installers,\n service=service,\n appid=appid\n )\n\n def on_app_window_destroyed(self, app_window, kwargs_str):\n \"\"\"Remove the reference to the window when it has been destroyed\"\"\"\n window_key = str(app_window.__class__) + kwargs_str\n try:\n del self.app_windows[window_key]\n except KeyError:\n pass\n return True\n\n @staticmethod\n def _print(command_line, string):\n # Workaround broken pygobject bindings\n command_line.do_print_literal(command_line, string + \"\\n\")\n\n def generate_script(self, db_game, script_path):\n \"\"\"Output a script to a file.\n The script is capable of launching a game without the client\n \"\"\"\n game = Game(db_game[\"id\"])\n game.load_config()\n game.write_script(script_path)\n\n def do_command_line(self, command_line): # noqa: C901 # pylint: disable=arguments-differ\n # pylint: disable=too-many-locals,too-many-return-statements,too-many-branches\n # pylint: disable=too-many-statements\n # TODO: split into multiple methods to reduce complexity (35)\n options = command_line.get_options_dict()\n\n # Use stdout to output logs, only if no command line argument is\n # provided\n argc = len(sys.argv) - 1\n if \"-d\" in sys.argv or \"--debug\" in sys.argv:\n argc -= 1\n if not argc:\n # Switch back the log output to stderr (the default in Python)\n # to avoid messing with any output from command line options.\n\n # Use when targetting Python 3.7 minimum\n # console_handler.setStream(sys.stderr)\n\n # Until then...\n logger.removeHandler(log.console_handler)\n log.console_handler = logging.StreamHandler(stream=sys.stdout)\n log.console_handler.setFormatter(log.SIMPLE_FORMATTER)\n logger.addHandler(log.console_handler)\n\n # Set up logger\n if options.contains(\"debug\"):\n log.console_handler.setFormatter(log.DEBUG_FORMATTER)\n logger.setLevel(logging.DEBUG)\n\n # Text only commands\n\n # Print Lutris version and exit\n if options.contains(\"version\"):\n executable_name = os.path.basename(sys.argv[0])\n print(executable_name + \"-\" + settings.VERSION)\n logger.setLevel(logging.NOTSET)\n return 0\n\n logger.info(\"Lutris %s\", settings.VERSION)\n migrate()\n run_all_checks()\n AsyncCall(init_dxvk_versions, None)\n\n # List game\n if options.contains(\"list-games\"):\n game_list = games_db.get_games()\n if options.contains(\"installed\"):\n game_list = [game for game in game_list if game[\"installed\"]]\n if options.contains(\"json\"):\n self.print_game_json(command_line, game_list)\n else:\n self.print_game_list(command_line, game_list)\n return 0\n # List Steam games\n if options.contains(\"list-steam-games\"):\n self.print_steam_list(command_line)\n return 0\n # List Steam folders\n if options.contains(\"list-steam-folders\"):\n self.print_steam_folders(command_line)\n return 0\n\n # Execute command in Lutris context\n if options.contains(\"exec\"):\n command = options.lookup_value(\"exec\").get_string()\n self.execute_command(command)\n return 0\n\n if options.contains(\"submit-issue\"):\n IssueReportWindow(application=self)\n return 0\n\n try:\n url = options.lookup_value(GLib.OPTION_REMAINING)\n installer_info = self.get_lutris_action(url)\n except ValueError:\n self._print(command_line, _(\"%s is not a valid URI\") % url.get_strv())\n return 1\n\n game_slug = installer_info[\"game_slug\"]\n action = installer_info[\"action\"]\n\n if options.contains(\"output-script\"):\n action = \"write-script\"\n\n revision = installer_info[\"revision\"]\n\n installer_file = None\n if options.contains(\"install\"):\n installer_file = options.lookup_value(\"install\").get_string()\n if installer_file.startswith((\"http:\", \"https:\")):\n try:\n request = Request(installer_file).get()\n except HTTPError:\n self._print(command_line, _(\"Failed to download %s\") % installer_file)\n return 1\n try:\n headers = dict(request.response_headers)\n file_name = headers[\"Content-Disposition\"].split(\"=\", 1)[-1]\n except (KeyError, IndexError):\n file_name = os.path.basename(installer_file)\n file_path = os.path.join(tempfile.gettempdir(), file_name)\n self._print(command_line, _(\"download {url} to {file} started\").format(\n url=installer_file, file=file_path))\n with open(file_path, 'wb') as dest_file:\n dest_file.write(request.content)\n installer_file = file_path\n action = \"install\"\n else:\n installer_file = os.path.abspath(installer_file)\n action = \"install\"\n\n if not os.path.isfile(installer_file):\n self._print(command_line, _(\"No such file: %s\") % installer_file)\n return 1\n\n db_game = None\n if game_slug:\n if action == \"rungameid\":\n # Force db_game to use game id\n self.run_in_background = True\n db_game = games_db.get_game_by_field(game_slug, \"id\")\n elif action == \"rungame\":\n # Force db_game to use game slug\n self.run_in_background = True\n db_game = games_db.get_game_by_field(game_slug, \"slug\")\n elif action == \"install\":\n # Installers can use game or installer slugs\n self.run_in_background = True\n db_game = games_db.get_game_by_field(game_slug, \"slug\") \\\n or games_db.get_game_by_field(game_slug, \"installer_slug\")\n else:\n # Dazed and confused, try anything that might works\n db_game = (\n games_db.get_game_by_field(game_slug, \"id\")\n or games_db.get_game_by_field(game_slug, \"slug\")\n or games_db.get_game_by_field(game_slug, \"installer_slug\")\n )\n\n # If reinstall flag is passed, force the action to install\n if options.contains(\"reinstall\"):\n action = \"install\"\n\n if action == \"write-script\":\n if not db_game or not db_game[\"id\"]:\n logger.warning(\"No game provided to generate the script\")\n return 1\n self.generate_script(db_game, options.lookup_value(\"output-script\").get_string())\n return 0\n\n # Graphical commands\n self.activate()\n self.set_tray_icon()\n\n if not action:\n if db_game and db_game[\"installed\"]:\n # Game found but no action provided, ask what to do\n dlg = InstallOrPlayDialog(db_game[\"name\"])\n if not dlg.action_confirmed:\n action = None\n elif dlg.action == \"play\":\n action = \"rungame\"\n elif dlg.action == \"install\":\n action = \"install\"\n elif game_slug or installer_file:\n # No game found, default to install if a game_slug or\n # installer_file is provided\n action = \"install\"\n if action == \"install\":\n installers = get_installers(\n game_slug=game_slug,\n installer_file=installer_file,\n revision=revision,\n )\n if installers:\n self.show_installer_window(installers)\n\n elif action in (\"rungame\", \"rungameid\"):\n if not db_game or not db_game[\"id\"]:\n logger.warning(\"No game found in library\")\n if not self.window.is_visible():\n self.do_shutdown()\n return 0\n game = Game(db_game[\"id\"])\n self.on_game_launch(game)\n return 0\n\n def on_game_launch(self, game):\n game.launch()\n return True # Return True to continue handling the emission hook\n\n def on_game_start(self, game):\n self.running_games.append(game)\n if settings.read_setting(\"hide_client_on_game_start\") == \"True\":\n self.window.hide() # Hide launcher window\n return True\n\n def on_game_install(self, game):\n \"\"\"Request installation of a game\"\"\"\n if game.service:\n service = get_services()[game.service]()\n db_game = ServiceGameCollection.get_game(service.id, game.appid)\n service.install(db_game)\n return True\n\n installers = get_installers(game_slug=game.slug)\n if installers:\n self.show_installer_window(installers)\n else:\n logger.debug(\"Should generate automagical installer here but....\")\n logger.debug(\"Wait? how did you get here?\")\n return True\n\n def get_running_game_ids(self):\n ids = []\n for i in range(self.running_games.get_n_items()):\n game = self.running_games.get_item(i)\n ids.append(str(game.id))\n return ids\n\n def get_game_by_id(self, game_id):\n for i in range(self.running_games.get_n_items()):\n game = self.running_games.get_item(i)\n if str(game.id) == str(game_id):\n return game\n return None\n\n def on_game_stop(self, game):\n \"\"\"Callback to remove the game from the running games\"\"\"\n ids = self.get_running_game_ids()\n if str(game.id) in ids:\n try:\n self.running_games.remove(ids.index(str(game.id)))\n except ValueError:\n pass\n else:\n logger.warning(\"%s not in %s\", game.id, ids)\n\n game.emit(\"game-stopped\")\n if settings.read_setting(\"hide_client_on_game_start\") == \"True\":\n self.window.show() # Show launcher window\n elif not self.window.is_visible():\n if self.running_games.get_n_items() == 0:\n self.quit()\n return True\n\n @staticmethod\n def get_lutris_action(url):\n installer_info = {\"game_slug\": None, \"revision\": None, \"action\": None}\n\n if url:\n url = url.get_strv()\n\n if url:\n url = url[0]\n installer_info = parse_installer_url(url)\n if installer_info is False:\n raise ValueError\n return installer_info\n\n def print_game_list(self, command_line, game_list):\n for game in game_list:\n self._print(\n command_line,\n \"{:4} | {:<40} | {:<40} | {:<15} | {:<64}\".format(\n game[\"id\"],\n game[\"name\"][:40],\n game[\"slug\"][:40],\n game[\"runner\"] or \"-\",\n game[\"directory\"] or \"-\",\n ),\n )\n\n def print_game_json(self, command_line, game_list):\n games = [\n {\n \"id\": game[\"id\"],\n \"slug\": game[\"slug\"],\n \"name\": game[\"name\"],\n \"runner\": game[\"runner\"],\n \"platform\": game[\"platform\"],\n \"directory\": game[\"directory\"],\n } for game in game_list\n ]\n self._print(command_line, json.dumps(games, indent=2))\n\n def print_steam_list(self, command_line):\n steamapps_paths = get_steamapps_paths()\n for platform in (\"linux\", \"windows\"):\n for path in steamapps_paths[platform]:\n appmanifest_files = get_appmanifests(path)\n for appmanifest_file in appmanifest_files:\n appmanifest = AppManifest(os.path.join(path, appmanifest_file))\n self._print(\n command_line,\n \" {:8} | {:<60} | {:10} | {}\".format(\n appmanifest.steamid,\n appmanifest.name or \"-\",\n platform,\n \", \".join(appmanifest.states),\n ),\n )\n\n @staticmethod\n def execute_command(command):\n \"\"\"Execute an arbitrary command in a Lutris context\n with the runtime enabled and monitored by a MonitoredCommand\n \"\"\"\n logger.info(\"Running command '%s'\", command)\n monitored_command = exec_command(command)\n try:\n GLib.MainLoop().run()\n except KeyboardInterrupt:\n monitored_command.stop()\n\n def print_steam_folders(self, command_line):\n steamapps_paths = get_steamapps_paths()\n for platform in (\"linux\", \"windows\"):\n for path in steamapps_paths[platform]:\n self._print(command_line, path)\n\n def do_shutdown(self): # pylint: disable=arguments-differ\n logger.info(\"Shutting down Lutris\")\n if self.window:\n settings.write_setting(\"selected_category\", self.window.selected_category)\n self.window.destroy()\n Gtk.Application.do_shutdown(self)\n\n def set_tray_icon(self):\n \"\"\"Creates or destroys a tray icon for the application\"\"\"\n active = settings.read_setting(\"show_tray_icon\", default=\"false\").lower() == \"true\"\n if active and not self.tray:\n self.tray = LutrisStatusIcon(application=self)\n if self.tray:\n self.tray.set_visible(active)\n", "path": "lutris/gui/application.py" } ]
diff --git a/lutris/gui/application.py b/lutris/gui/application.py index 46e955a2ff..71e15a8d65 100644 --- a/lutris/gui/application.py +++ b/lutris/gui/application.py @@ -462,7 +462,7 @@ def do_command_line(self, command_line): # noqa: C901 # pylint: disable=argume self.do_shutdown() return 0 game = Game(db_game["id"]) - self.on_game_start(game) + self.on_game_launch(game) return 0 def on_game_launch(self, game):
django-cms__django-cms-1372
Labels missing in admin for cms.plugins.text I'm using a model that is a subclass of cms.plugins.text.models.AbstractText but in the admin the labels for all fields have disappeared. I think there should be a ``` {{ field.label_tag }} ``` at the appropriate place in /cms/plugins/text/templates/cms/plugins/text_plugin_fieldset.html (also compare with /django/contrib/admin/templates/admin/includes/fieldset.html ). The labels indeed appear when I add that piece of code.
[ { "content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template.context import RequestContext\n\nfrom django.contrib.sites.models import Site\n\nfrom cms.models import Page\nfrom cms.utils import permissions, moderator, get_language_from_request\nfrom cms.utils.permissions import has_global_page_permission\n\nNOT_FOUND_RESPONSE = \"NotFound\"\n\n\ndef get_admin_menu_item_context(request, page, filtered=False):\n \"\"\"\n Used for rendering the page tree, inserts into context everything what\n we need for single item\n \"\"\"\n has_add_page_permission = page.has_add_permission(request)\n has_move_page_permission = page.has_move_page_permission(request)\n \n site = Site.objects.get_current()\n lang = get_language_from_request(request)\n #slug = page.get_slug(language=lang, fallback=True) # why was this here ??\n metadata = \"\"\n if settings.CMS_PERMISSION:\n # jstree metadata generator \n md = []\n \n #if not has_add_page_permission:\n if not has_move_page_permission:\n md.append(('valid_children', False))\n md.append(('draggable', False))\n if md:\n # just turn it into simple javasript object\n metadata = \"{\" + \", \".join(map(lambda e: \"%s: %s\" %(e[0], \n isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + \"}\"\n \n moderator_state = moderator.page_moderator_state(request, page)\n has_add_on_same_level_permission = False\n opts = Page._meta\n if settings.CMS_PERMISSION:\n perms = has_global_page_permission(request, page.site_id, can_add=True)\n if (request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) and perms):\n has_add_on_same_level_permission = True\n \n if not has_add_on_same_level_permission and page.parent_id:\n has_add_on_same_level_permission = permissions.has_generic_permission(page.parent_id, request.user, \"add\", page.site)\n #has_add_on_same_level_permission = has_add_page_on_same_level_permission(request, page)\n context = {\n 'page': page,\n 'site': site,\n 'lang': lang,\n 'filtered': filtered,\n 'metadata': metadata,\n \n 'has_change_permission': page.has_change_permission(request),\n 'has_publish_permission': page.has_publish_permission(request),\n 'has_delete_permission': page.has_delete_permission(request),\n 'has_move_page_permission': has_move_page_permission,\n 'has_add_page_permission': has_add_page_permission,\n 'has_moderate_permission': page.has_moderate_permission(request),\n 'page_moderator_state': moderator_state,\n 'moderator_should_approve': moderator_state['state'] >= moderator.I_APPROVE,\n 'has_add_on_same_level_permission': has_add_on_same_level_permission,\n 'CMS_PERMISSION': settings.CMS_PERMISSION,\n 'CMS_MODERATOR': settings.CMS_MODERATOR,\n }\n return context\n\n\ndef render_admin_menu_item(request, page, template=None):\n \"\"\"\n Renders requested page item for the tree. This is used in case when item\n must be reloaded over ajax.\n \"\"\"\n if not template:\n template = \"admin/cms/page/menu_item.html\"\n\n if not page.pk:\n return HttpResponse(NOT_FOUND_RESPONSE) # Not found - tree will remove item\n \n # languages\n languages = []\n if page.site_id in settings.CMS_SITE_LANGUAGES:\n languages = settings.CMS_SITE_LANGUAGES[page.site_id]\n else:\n languages = [x[0] for x in settings.CMS_LANGUAGES]\n \n context = RequestContext(request, {\n 'has_add_permission': permissions.has_page_add_permission(request),\n 'site_languages': languages,\n })\n \n filtered = 'filtered' in request.REQUEST\n context.update(get_admin_menu_item_context(request, page, filtered))\n return render_to_response(template, context)\n", "path": "cms/utils/admin.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template.context import RequestContext\n\nfrom django.contrib.sites.models import Site\n\nfrom cms.models import Page\nfrom cms.utils import permissions, moderator, get_language_from_request\nfrom cms.utils.permissions import has_global_page_permission\n\nNOT_FOUND_RESPONSE = \"NotFound\"\n\n\ndef get_admin_menu_item_context(request, page, filtered=False):\n \"\"\"\n Used for rendering the page tree, inserts into context everything what\n we need for single item\n \"\"\"\n has_add_page_permission = page.has_add_permission(request)\n has_move_page_permission = page.has_move_page_permission(request)\n \n site = Site.objects.get_current()\n lang = get_language_from_request(request)\n #slug = page.get_slug(language=lang, fallback=True) # why was this here ??\n metadata = \"\"\n if settings.CMS_PERMISSION:\n # jstree metadata generator \n md = []\n \n #if not has_add_page_permission:\n if not has_move_page_permission:\n md.append(('valid_children', False))\n md.append(('draggable', False))\n if md:\n # just turn it into simple javasript object\n metadata = \"{\" + \", \".join(map(lambda e: \"%s: %s\" %(e[0], \n isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + \"}\"\n \n moderator_state = moderator.page_moderator_state(request, page)\n has_add_on_same_level_permission = False\n opts = Page._meta\n if settings.CMS_PERMISSION:\n perms = has_global_page_permission(request, page.site_id, can_add=True)\n if (request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) and perms):\n has_add_on_same_level_permission = True\n \n if not has_add_on_same_level_permission and page.parent_id:\n has_add_on_same_level_permission = permissions.has_generic_permission(page.parent_id, request.user, \"add\", page.site)\n #has_add_on_same_level_permission = has_add_page_on_same_level_permission(request, page)\n context = {\n 'page': page,\n 'site': site,\n 'lang': lang,\n 'filtered': filtered,\n 'metadata': metadata,\n \n 'has_change_permission': page.has_change_permission(request),\n 'has_publish_permission': page.has_publish_permission(request),\n 'has_delete_permission': page.has_delete_permission(request),\n 'has_move_page_permission': has_move_page_permission,\n 'has_add_page_permission': has_add_page_permission,\n 'has_moderate_permission': page.has_moderate_permission(request),\n 'page_moderator_state': moderator_state,\n 'moderator_should_approve': moderator_state['state'] >= moderator.I_APPROVE,\n 'has_add_on_same_level_permission': has_add_on_same_level_permission,\n 'CMS_PERMISSION': settings.CMS_PERMISSION,\n 'CMS_MODERATOR': settings.CMS_MODERATOR,\n }\n return context\n\n\ndef render_admin_menu_item(request, page, template=None):\n \"\"\"\n Renders requested page item for the tree. This is used in case when item\n must be reloaded over ajax.\n \"\"\"\n if not template:\n template = \"admin/cms/page/menu_item.html\"\n\n if not page.pk:\n return HttpResponse(NOT_FOUND_RESPONSE) # Not found - tree will remove item\n \n # languages\n languages = []\n if page.site_id in settings.CMS_SITE_LANGUAGES:\n languages = settings.CMS_SITE_LANGUAGES[page.site_id]\n else:\n languages = [x[0] for x in settings.CMS_LANGUAGES]\n \n context = RequestContext(request, {\n 'has_add_permission': permissions.has_page_add_permission(request),\n 'site_languages': languages,\n })\n \n filtered = 'filtered' in request.REQUEST\n context.update(get_admin_menu_item_context(request, page, filtered))\n # add mimetype to help out IE\n return render_to_response(template, context, mimetype=\"text/html; charset=utf-8\")\n", "path": "cms/utils/admin.py" } ]
diff --git a/cms/plugins/text/templates/cms/plugins/text_plugin_change_form.html b/cms/plugins/text/templates/cms/plugins/text_plugin_change_form.html index 82b4a7036e6..162fe95fec0 100644 --- a/cms/plugins/text/templates/cms/plugins/text_plugin_change_form.html +++ b/cms/plugins/text/templates/cms/plugins/text_plugin_change_form.html @@ -5,14 +5,13 @@ {% for fieldset in adminform %} {% include "admin/includes/fieldset.html" %} {% endfor %} -<script type="text/javascript" src="{{ STATIC_URL }}cms/js/libs/classy.min.js"></script> -<script type="text/javascript" src="{{ STATIC_URL }}cms/js/plugins/cms.setup.js"></script> -<script type="text/javascript" src="{{ STATIC_URL }}cms/js/plugins/cms.base.js"></script> <script type="text/javascript"> -jQuery(document).ready(function ($) { - // initialize security patch - CMS.API.Security.csrf(); -}); +(function namespacing(jQuery) { + jQuery(document).ready(function ($) { + // initialize security patch + window.CMS.API.Security.csrf(); + }); +})(window.CMS.$) </script> <style> div.field-body label {display:none} diff --git a/cms/plugins/text/templates/cms/plugins/widgets/wymeditor.html b/cms/plugins/text/templates/cms/plugins/widgets/wymeditor.html index 113e2485894..1c88d30b3be 100644 --- a/cms/plugins/text/templates/cms/plugins/widgets/wymeditor.html +++ b/cms/plugins/text/templates/cms/plugins/widgets/wymeditor.html @@ -7,110 +7,113 @@ {% include "cms/plugins/widgets/widget_lib.js" %} -jQuery(document).ready(function ($) { - // scroll to top - scrollTo(0, 0); - - // init wysiwyg - $('#id_{{ name }}').wymeditor({ - lang: '{{ language }}', - skin: 'django', - skinPath: "{{ STATIC_URL }}cms/js/wymeditor/skins/django/", - updateSelector: 'input[type=submit],', - updateEvent: 'click', - logoHtml: '', - toolsItems: [ - {{ WYM_TOOLS }} - ], - containersItems: [ - {{ WYM_CONTAINERS }} - ], - classesItems: [ - {{ WYM_CLASSES }} - ], - editorStyles: [ - {{ WYM_STYLES }} - ], - {% if WYM_STYLESHEET %} - stylesheet: - {{ WYM_STYLESHEET }} - , - {% endif %} - postInit: function(wym) { - //wym.resizable({handles: "s", maxHeight: 600}); - //construct the insertLinkButton html - html = get_plugin_html() - //add the button to the tools box - jQuery(wym._box) - .find(wym._options.toolsSelector + wym._options.toolsListSelector) - .append(html); - // Enable the placeholderbridge plugin, to allow - // the placeholder controls to talk to editor - wym.placeholderbridge({'name': '{{ name }}'}); - init_buttons("{{ name }}"); - }, - //handle click event on dialog's submit button - postInitDialog: function( wym, wdw ) { - - } - }); - - /* onclick for 'Insert object' */ - function init_buttons(placeholder){ - $('span.insert-object').click(function(){ - var select = $(this).parent().children("select"); - var pluginvalue = select.attr('value'); - var splits = window.location.href.split("?")[0].split("/"); - var parent_id = Number(splits[splits.length - 2]); - var language = $('#id_language').attr('value'); - - if (pluginvalue == "") { - alert("{% filter escapejs %}{% trans "Please select a plugin type." %}{% endfilter %}"); - return; - } - - var texteditor = get_editor(placeholder); - if (texteditor == null || texteditor.insertText == null) { - alert("{% filter escapejs %}{% trans "Text editor does not support inserting objects." %}{% endfilter %}"); - return; - } - // First create db instance using AJAX post back - add_plugin(pluginvalue, parent_id, language) - - }).css("cursor", "pointer").css("margin", "5px"); +(function namespacing(CMS) { + CMS.$(document).ready(function () { + // scroll to top + scrollTo(0, 0); - /* onclick for 'Edit selected object' */ - $('span.edit-object').click(function(){ - var texteditor = get_editor(placeholder); - if (texteditor == null || texteditor.selectedObject == null) { - alert("{% filter escapejs %}{% trans "Text editor does not support editing objects." %}{% endfilter %}"); - return; + // init wysiwyg + $('#id_{{ name }}').wymeditor({ + lang: '{{ language }}', + skin: 'django', + skinPath: "{{ STATIC_URL }}cms/js/wymeditor/skins/django/", + updateSelector: 'input[type=submit],', + updateEvent: 'click', + logoHtml: '', + toolsItems: [ + {{ WYM_TOOLS }} + ], + containersItems: [ + {{ WYM_CONTAINERS }} + ], + classesItems: [ + {{ WYM_CLASSES }} + ], + editorStyles: [ + {{ WYM_STYLES }} + ], + {% if WYM_STYLESHEET %} + stylesheet: + {{ WYM_STYLESHEET }} + , + {% endif %} + postInit: function(wym) { + //wym.resizable({handles: "s", maxHeight: 600}); + //construct the insertLinkButton html + html = get_plugin_html() + //add the button to the tools box + $(wym._box) + .find(wym._options.toolsSelector + wym._options.toolsListSelector) + .append(html); + // Enable the placeholderbridge plugin, to allow + // the placeholder controls to talk to editor + wym.placeholderbridge({'name': '{{ name }}'}); + init_buttons("{{ name }}"); + }, + //handle click event on dialog's submit button + postInitDialog: function( wym, wdw ) { + } - var imgobj = texteditor.selectedObject(); - if (imgobj == null) { - alert("{% filter escapejs %}{% trans "No object selected." %}{% endfilter %}"); - return; - } - if (imgobj.id == null || imgobj.id.indexOf("plugin_obj_") != 0) { - alert("{% filter escapejs %}{% trans "Not a plugin object" %}{% endfilter %}"); - return; - } - var plugin_id = imgobj.id.substr("plugin_obj_".length); - edit_plugin(plugin_id); - }).css("cursor", "pointer").css("margin","5px"); + }); + + /* onclick for 'Insert object' */ + function init_buttons(placeholder){ + $('span.insert-object').click(function(){ + var select = $(this).parent().children("select"); + var pluginvalue = select.attr('value'); + var splits = window.location.href.split("?")[0].split("/"); + var parent_id = Number(splits[splits.length - 2]); + var language = $('#id_language').attr('value'); + + if (pluginvalue == "") { + alert("{% filter escapejs %}{% trans "Please select a plugin type." %}{% endfilter %}"); + return; + } + + var texteditor = get_editor(placeholder); + if (texteditor == null || texteditor.insertText == null) { + alert("{% filter escapejs %}{% trans "Text editor does not support inserting objects." %}{% endfilter %}"); + return; + } + // First create db instance using AJAX post back + add_plugin(pluginvalue, parent_id, language) + + }).css("cursor", "pointer").css("margin", "5px"); + + /* onclick for 'Edit selected object' */ + $('span.edit-object').click(function(){ + var texteditor = get_editor(placeholder); + if (texteditor == null || texteditor.selectedObject == null) { + alert("{% filter escapejs %}{% trans "Text editor does not support editing objects." %}{% endfilter %}"); + return; + } + var imgobj = texteditor.selectedObject(); + if (imgobj == null) { + alert("{% filter escapejs %}{% trans "No object selected." %}{% endfilter %}"); + return; + } + if (imgobj.id == null || imgobj.id.indexOf("plugin_obj_") != 0) { + alert("{% filter escapejs %}{% trans "Not a plugin object" %}{% endfilter %}"); + return; + } + var plugin_id = imgobj.id.substr("plugin_obj_".length); + edit_plugin(plugin_id); + }).css("cursor", "pointer").css("margin","5px"); + } + }); + + function get_plugin_html(){ + html = '<li class="wym_tools_plugins">' + + '<select name="plugins">' + + '<option value="" selected="selected">{% filter escapejs %}{% trans "Available Plugins" %}{% endfilter %}</option>'{% for p in installed_plugins %} + + '<option value="{{ p.value }}">{{ p.name }}</option>'{% endfor %} + + '</select>' + + '<span class="insert-object addlink">{% filter escapejs %}{% trans "Insert plugin" %}{% endfilter %}</span>' + + '<span class="edit-object changelink">{% filter escapejs %}{% trans "Edit selected plugin" %}{% endfilter %}</span>' + + '</li>'; + return html; } -}); +})(window.CMS); -function get_plugin_html(){ - html = '<li class="wym_tools_plugins">' - + '<select name="plugins">' - + '<option value="" selected="selected">{% filter escapejs %}{% trans "Available Plugins" %}{% endfilter %}</option>'{% for p in installed_plugins %} - + '<option value="{{ p.value }}">{{ p.name }}</option>'{% endfor %} - + '</select>' - + '<span class="insert-object addlink">{% filter escapejs %}{% trans "Insert plugin" %}{% endfilter %}</span>' - + '<span class="edit-object changelink">{% filter escapejs %}{% trans "Edit selected plugin" %}{% endfilter %}</span>' - + '</li>'; -return html; -} //]]> </script> diff --git a/cms/plugins/twitter/templates/cms/plugins/twitter_recent_entries.html b/cms/plugins/twitter/templates/cms/plugins/twitter_recent_entries.html index 71382856373..4c0e6fac742 100644 --- a/cms/plugins/twitter/templates/cms/plugins/twitter_recent_entries.html +++ b/cms/plugins/twitter/templates/cms/plugins/twitter_recent_entries.html @@ -4,22 +4,24 @@ {% addtoblock "js" %} <script type="text/javascript"> //<![CDATA[ -jQuery(document).ready(function ($) { - $('#twitter-container-{{ object.pk }}').tweet({ - username: '{{ object.twitter_user }}', - avatar_size: {% if request.is_secure %}null{% else %}32{% endif %}, - count: {{ object.count }}, - join_text: 'auto', - auto_join_text_default: '{% trans "we said," %}', - auto_join_text_ed: '{% trans "we" %}', - auto_join_text_ing: '{% trans "we were" %}', - auto_join_text_reply: '{% trans "we replied to" %}', - auto_join_text_url: '{% trans "we were checking out" %}', - loading_text: '{% trans "loading tweets..." %}' - // this replaces twitter_search.html - {% if object.query %},query: '{{ object.query }}'{% endif %} +(function namespacing(CMS) { + CMS.$(document).ready(function () { + $('#twitter-container-{{ object.pk }}').tweet({ + username: '{{ object.twitter_user }}', + avatar_size: {% if request.is_secure %}null{% else %}32{% endif %}, + count: {{ object.count }}, + join_text: 'auto', + auto_join_text_default: '{% trans "we said," %}', + auto_join_text_ed: '{% trans "we" %}', + auto_join_text_ing: '{% trans "we were" %}', + auto_join_text_reply: '{% trans "we replied to" %}', + auto_join_text_url: '{% trans "we were checking out" %}', + loading_text: '{% trans "loading tweets..." %}' + // this replaces twitter_search.html + {% if object.query %},query: '{{ object.query }}'{% endif %} + }); }); -}); +})(window.CMS); //]]> </script> {% endaddtoblock %} diff --git a/cms/static/cms/js/change_list.js b/cms/static/cms/js/change_list.js index 1747d532489..73029df8817 100644 --- a/cms/static/cms/js/change_list.js +++ b/cms/static/cms/js/change_list.js @@ -1,5 +1,5 @@ // some very small jquery extensions -(function($) { +(function namespacing($) { // very simple yellow fade plugin.. $.fn.yft = function(){ this.effect("highlight", {}, 1000); }; @@ -264,28 +264,28 @@ // of the tree = current node + descendants reloadItem(jtarget, admin_base_url + "cms/page/" + pageId + "/approve/?node=1", {}, refreshIfChildren(pageId)); e.stopPropagation(); - return false; - } - - // lazy load descendants on tree open - if(jtarget.hasClass("closed")) { - // only load them once - if(jtarget.find('ul > li').length == 0 && !jtarget.hasClass("loading")) { - // keeps this event from firing multiple times before - // the dom as changed. it still needs to propagate for - // the other click event on this element to fire - jtarget.addClass("loading"); - var pageId = $(jtarget).attr("id").split("page_")[1]; + return false; + } - $.get(admin_base_url + "cms/page/" + pageId + "/descendants/", {}, function(r, status) { - jtarget.children('ul').append(r); - // show move targets if needed - if($('span.move-target-container:visible').length > 0) { - jtarget.children('ul').find('a.move-target, span.move-target-container, span.line').show(); - } - }); - } - } + // lazy load descendants on tree open + if(jtarget.hasClass("closed")) { + // only load them once + if(jtarget.find('ul > li').length == 0 && !jtarget.hasClass("loading")) { + // keeps this event from firing multiple times before + // the dom as changed. it still needs to propagate for + // the other click event on this element to fire + jtarget.addClass("loading"); + var pageId = $(jtarget).attr("id").split("page_")[1]; + + $.get(admin_base_url + "cms/page/" + pageId + "/descendants/", {}, function(r, status) { + jtarget.children('ul').append(r); + // show move targets if needed + if($('span.move-target-container:visible').length > 0) { + jtarget.children('ul').find('a.move-target, span.move-target-container, span.line').show(); + }; + }); + } + } if(jtarget.hasClass("move-target")) { if(jtarget.hasClass("left")){ @@ -321,8 +321,8 @@ var val= $(this).width(); if(val > max){max = val;} }); - $(this).each(function() { - $(this).css("width",max + 'px'); + $(this).each(function() { + $(this).css("width",max + 'px'); }); return this; }; @@ -500,4 +500,4 @@ function addUndo(node, target, position){ undos.push({node:node, target:target, position:position}); } -})(jQuery); +})(window.CMS.$); diff --git a/cms/static/cms/js/plugins/cms.base.js b/cms/static/cms/js/plugins/cms.base.js index 98748ad6123..e520ebabf75 100644 --- a/cms/static/cms/js/plugins/cms.base.js +++ b/cms/static/cms/js/plugins/cms.base.js @@ -1,125 +1,141 @@ /*##################################################|*/ /* #CMS.BASE# */ -CMS.$(document).ready(function ($) { - // assign correct jquery to $ namespace - $ = CMS.$ || $; - - /*! - * Adds security methods to api namespace - * @public_methods: - * - CMS.API.Security.csrf(); - * @compatibility: IE >= 7, FF >= 3, Safari >= 4, Chrome > =4, Opera >= 10 - */ - CMS.API.Security = { - - csrf: function () { - $.ajaxSetup({ - beforeSend: function (xhr, settings) { - if (typeof(settings.csrfTokenSet) != undefined && settings.csrfTokenSet) { - // CSRF token has already been set elsewhere so we won't touch it. - return true; - } - // get cookies without jquery.cookie.js - function getCookie(name) { - var cookieValue = null; - if(document.cookie && (document.cookie != '')) { - var cookies = document.cookie.split(';'); - for (var i = 0; i < cookies.length; i++) { - var cookie = $.trim(cookies[i]); - // Does this cookie string begin with the name we want? - if (cookie.substring(0, name.length + 1) == (name + '=')) { - cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); - break; +(function namespacing(CMS) { + CMS.$(document).ready(function ($) { + // assign correct jquery to $ namespace + $ = CMS.$ || $; + + // the following is added because IE is stupid + // $.ajax requests in IE8 fail without this hack + // ref: http://stackoverflow.com/questions/4557532/jquery-ajax-requests-failing-in-ie8-with-message-error-this-method-cannot-be-c + $.ajaxSetup({ + xhr: function() { + try{ + if(window.ActiveXObject) + return new window.ActiveXObject("Microsoft.XMLHTTP"); + } catch(e) { } + + return new window.XMLHttpRequest(); + } + }); + + /*! + * Adds security methods to api namespace + * @public_methods: + * - CMS.API.Security.csrf(); + * @compatibility: IE >= 7, FF >= 3, Safari >= 4, Chrome > =4, Opera >= 10 + */ + CMS.API.Security = { + + csrf: function () { + $.ajaxSetup({ + beforeSend: function (xhr, settings) { + if (typeof(settings.csrfTokenSet) != undefined && settings.csrfTokenSet) { + // CSRF token has already been set elsewhere so we won't touch it. + return true; + } + // get cookies without jquery.cookie.js + function getCookie(name) { + var cookieValue = null; + if(document.cookie && (document.cookie != '')) { + var cookies = document.cookie.split(';'); + for (var i = 0; i < cookies.length; i++) { + var cookie = $.trim(cookies[i]); + // Does this cookie string begin with the name we want? + if (cookie.substring(0, name.length + 1) == (name + '=')) { + cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); + break; + } } } + return cookieValue; + } + // do some url checks + var base_doc_url = document.URL.match(/^http[s]{0,1}:\/\/[^\/]+\//)[0]; + var base_settings_url = settings.url.match(/^http[s]{0,1}:\/\/[^\/]+\//); + if(base_settings_url != null) { + base_settings_url = base_settings_url[0]; + } + if(!(/^http:.*/.test(settings.url) || /^https:.*/.test(settings.url)) || base_doc_url == base_settings_url) { + // Only send the token to relative URLs i.e. locally. + xhr.setRequestHeader("X-CSRFToken", getCookie('csrftoken')); + settings.csrfTokenSet = true; } - return cookieValue; - } - // do some url checks - var base_doc_url = document.URL.match(/^http[s]{0,1}:\/\/[^\/]+\//)[0]; - var base_settings_url = settings.url.match(/^http[s]{0,1}:\/\/[^\/]+\//); - if(base_settings_url != null) { - base_settings_url = base_settings_url[0]; } - if(!(/^http:.*/.test(settings.url) || /^https:.*/.test(settings.url)) || base_doc_url == base_settings_url) { - // Only send the token to relative URLs i.e. locally. - xhr.setRequestHeader("X-CSRFToken", getCookie('csrftoken')); - settings.csrfTokenSet = true; + }); + return 'ready'; + } + + }; + + /*! + * Adds helper methods to api namespace + * @public_methods: + * - CMS.API.Helpers.reloadBrowser(); + * - CMS.API.Helpers.getUrl(urlString); + * - CMS.API.Helpers.setUrl(urlString, options); + */ + CMS.API.Helpers = { + + reloadBrowser: function () { + window.location.reload(); + }, + + getUrl: function(str) { + var o = { + 'strictMode': false, + 'key': ["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"], + 'q': { 'name': 'queryKey', 'parser': /(?:^|&)([^&=]*)=?([^&]*)/g }, + 'parser': { + 'strict': /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/, + 'loose': /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/ } - } - }); - return 'ready'; - } - - }; - - /*! - * Adds helper methods to api namespace - * @public_methods: - * - CMS.API.Helpers.reloadBrowser(); - * - CMS.API.Helpers.getUrl(urlString); - * - CMS.API.Helpers.setUrl(urlString, options); - */ - CMS.API.Helpers = { - - reloadBrowser: function () { - window.location.reload(); - }, - - getUrl: function(str) { - var o = { - 'strictMode': false, - 'key': ["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"], - 'q': { 'name': 'queryKey', 'parser': /(?:^|&)([^&=]*)=?([^&]*)/g }, - 'parser': { - 'strict': /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/, - 'loose': /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*)(?::([^:@]*))?)?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/ - } - }; - - var m = o.parser[o.strictMode ? 'strict' : 'loose'].exec(str), uri = {}, i = 14; - - while(i--) uri[o.key[i]] = m[i] || ''; - - uri[o.q.name] = {}; - uri[o.key[12]].replace(o.q.parser, function ($0, $1, $2) { - if($1) { uri[o.q.name][$1] = $2; } - }); - - return uri; - }, - - setUrl: function (str, options) { - var uri = str; - - // now we neet to get the partials of the element - var getUrlObj = this.getUrl(uri); - var query = getUrlObj.queryKey; - var serialized = ''; - var index = 0; - - // we could loop the query and replace the param at the right place - // but instead of replacing it just append it to the end of the query so its more visible - if(options && options.removeParam) delete query[options.removeParam]; - if(options && options.addParam) query[options.addParam.split('=')[0]] = options.addParam.split('=')[1]; - - $.each(query, function (key, value) { - // add & - if(index != 0) serialized += '&'; - // if a value is given attach it - serialized += (value) ? (key + '=' + value) : (key); - index++; - }); - - // check if we should add the questionmark - var addition = (serialized === '') ? '' : '?'; - var anchor = (getUrlObj.anchor) ? '#' + getUrlObj.anchor : ''; - - uri = getUrlObj.protocol + '://' + getUrlObj.authority + getUrlObj.directory + getUrlObj.file + addition + serialized + anchor; - - return uri; - } - - }; - -}); + }; + + var m = o.parser[o.strictMode ? 'strict' : 'loose'].exec(str), uri = {}, i = 14; + + while(i--) uri[o.key[i]] = m[i] || ''; + + uri[o.q.name] = {}; + uri[o.key[12]].replace(o.q.parser, function ($0, $1, $2) { + if($1) { uri[o.q.name][$1] = $2; } + }); + + return uri; + }, + + setUrl: function (str, options) { + var uri = str; + + // now we neet to get the partials of the element + var getUrlObj = this.getUrl(uri); + var query = getUrlObj.queryKey; + var serialized = ''; + var index = 0; + + // we could loop the query and replace the param at the right place + // but instead of replacing it just append it to the end of the query so its more visible + if(options && options.removeParam) delete query[options.removeParam]; + if(options && options.addParam) query[options.addParam.split('=')[0]] = options.addParam.split('=')[1]; + + $.each(query, function (key, value) { + // add & + if(index != 0) serialized += '&'; + // if a value is given attach it + serialized += (value) ? (key + '=' + value) : (key); + index++; + }); + + // check if we should add the questionmark + var addition = (serialized === '') ? '' : '?'; + var anchor = (getUrlObj.anchor) ? '#' + getUrlObj.anchor : ''; + + uri = getUrlObj.protocol + '://' + getUrlObj.authority + getUrlObj.directory + getUrlObj.file + addition + serialized + anchor; + + return uri; + } + + }; + + }); +})(window.CMS); diff --git a/cms/static/cms/js/plugins/cms.setup.js b/cms/static/cms/js/plugins/cms.setup.js index 9fe6a6dc925..28dccf99d16 100644 --- a/cms/static/cms/js/plugins/cms.setup.js +++ b/cms/static/cms/js/plugins/cms.setup.js @@ -1,15 +1,16 @@ /*##################################################|*/ /* #CMS.SETUP# */ +(function namespacing() { + // insuring django namespace is available when using on admin + django = window.django || undefined; -// insuring django namespace is available when using on admin -var django = django || undefined; + // assigning correct jquery instance to jQuery variable + var jQuery = (django) ? django.jQuery : window.jQuery || undefined; -// assigning correct jquery instance to jQuery variable -var jQuery = (django) ? django.jQuery : window.jQuery || undefined; - -// assign global namespaces -var CMS = { - '$': jQuery.noConflict(), - 'Class': Class.$noConflict(), - 'API': {} -}; \ No newline at end of file + // assign global namespaces + window.CMS = { + '$': jQuery.noConflict(), + 'Class': Class.$noConflict(), + 'API': {} + }; +})(); \ No newline at end of file diff --git a/cms/templates/admin/cms/page/change_form.html b/cms/templates/admin/cms/page/change_form.html index 68d0c7986bd..9475f3e2d97 100644 --- a/cms/templates/admin/cms/page/change_form.html +++ b/cms/templates/admin/cms/page/change_form.html @@ -25,7 +25,7 @@ } }); }); -})(jQuery); +})(window.CMS.$); //]]> </script> {% endif %} @@ -169,6 +169,7 @@ <h2 class="load_remote">{% trans 'Page states' %}</h2> {% endif %} {% if moderation_delete_request %}<script type="text/javascript"> + (function namespacing($) { $(function(){ // disable all fields function lockControls(){ @@ -179,6 +180,7 @@ <h2 class="load_remote">{% trans 'Page states' %}</h2> lockControls(); setTimeout(lockControls,200); }); + })(window.CMS.$); </script>{% endif %} {% if CMS_MODERATOR and moderation_required %} @@ -261,7 +263,7 @@ <h2 class="load_remote">{% trans 'Page states' %}</h2> } }); }); - })(jQuery); + })(window.CMS.$); //]]> </script> {% endif %} diff --git a/cms/templates/admin/cms/page/change_list.html b/cms/templates/admin/cms/page/change_list.html index 5f944ee1839..72e0b3635b3 100644 --- a/cms/templates/admin/cms/page/change_list.html +++ b/cms/templates/admin/cms/page/change_list.html @@ -45,7 +45,7 @@ {% block content %} <script type="text/javascript"> //<![CDATA[ -(function($) { +(function namespacing($) { $(document).ready(function() { {% if not cl.is_filtered %} initTree(); @@ -71,7 +71,7 @@ cmsModerator: {{ CMS_MODERATOR|js }}, debug: {{ DEBUG|js }} }; -})(jQuery); +})(window.CMS.$); //]]> </script> diff --git a/cms/templates/admin/cms/page/plugin_change_form.html b/cms/templates/admin/cms/page/plugin_change_form.html index c222225b08c..ccd808f6518 100644 --- a/cms/templates/admin/cms/page/plugin_change_form.html +++ b/cms/templates/admin/cms/page/plugin_change_form.html @@ -5,8 +5,13 @@ <script type="text/javascript" src="{% admin_static_url %}js/jquery.min.js"></script> <script type="text/javascript" src="{{ STATIC_URL }}cms/js/csrf.js"></script> <script type="text/javascript" src="{% url 'admin:jsi18n' %}"></script> + {{ media }} +<script type="text/javascript" src="{{ STATIC_URL }}cms/js/libs/classy.min.js"></script> +<script type="text/javascript" src="{{ STATIC_URL }}cms/js/plugins/cms.setup.js"></script> +<script type="text/javascript" src="{{ STATIC_URL }}cms/js/plugins/cms.base.js"></script> + <script type="text/javascript"> //<![CDATA[ (function($) { @@ -50,7 +55,7 @@ } }); }); -})(jQuery); +})(window.CMS.$); //]]> </script> diff --git a/cms/utils/admin.py b/cms/utils/admin.py index 203e2d8cd29..a5204f9092e 100644 --- a/cms/utils/admin.py +++ b/cms/utils/admin.py @@ -96,4 +96,5 @@ def render_admin_menu_item(request, page, template=None): filtered = 'filtered' in request.REQUEST context.update(get_admin_menu_item_context(request, page, filtered)) - return render_to_response(template, context) + # add mimetype to help out IE + return render_to_response(template, context, mimetype="text/html; charset=utf-8")
ethereum__web3.py-3060
Default IPC path is incorrect on Windows with Anaconda 2023.07 * Version: 6.6.1 * Python: 3.11 * OS: win I updated my Anaconda to the latest version recently, which uses Python 3.11. web3.py is no longer able to set the default IPC path for IPCProvider on Windows. The problem and fix are as follows: In [ipc.py](https://github.com/ethereum/web3.py/blob/4b509a7d5fce0b9a67dbe93151e8b8a01e83b3cc/web3/providers/ipc.py#L105), line 105 `ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")` makes the default IPC path ` '\\\\\\.\\pipe\\geth.ipc'`, which cannot be found with `os.path.exists(ipc_path)` in the next line ### How can it be fixed? In ipc.py, replace line 105 `ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc")` with `ipc_path = '\\\.\pipe\geth.ipc'` as is described in the [documentation](https://web3py.readthedocs.io/en/latest/providers.html#web3.providers.ipc.IPCProvider). ```[tasklist] ### Tasks ```
[ { "content": "from json import (\n JSONDecodeError,\n)\nimport logging\nimport os\nfrom pathlib import (\n Path,\n)\nimport socket\nimport sys\nimport threading\nfrom types import (\n TracebackType,\n)\nfrom typing import (\n Any,\n Optional,\n Type,\n Union,\n)\n\nfrom web3._utils.threads import (\n Timeout,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .base import (\n JSONBaseProvider,\n)\n\n\ndef get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:\n if sys.platform == \"win32\":\n # On Windows named pipe is used. Simulate socket with it.\n from web3._utils.windows import (\n NamedPipe,\n )\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket:\n sock = None\n\n def __init__(self, ipc_path: str) -> None:\n self.ipc_path = ipc_path\n\n def __enter__(self) -> socket.socket:\n if not self.ipc_path:\n raise FileNotFoundError(\n f\"cannot connect to IPC socket at path: {self.ipc_path!r}\"\n )\n\n if not self.sock:\n self.sock = self._open()\n return self.sock\n\n def __exit__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ) -> None:\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n def _open(self) -> socket.socket:\n return get_ipc_socket(self.ipc_path)\n\n def reset(self) -> socket.socket:\n self.sock.close()\n self.sock = self._open()\n return self.sock\n\n\ndef get_default_ipc_path() -> Optional[str]:\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\ndef get_dev_ipc_path() -> Optional[str]:\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.IPCProvider\")\n _socket = None\n\n def __init__(\n self,\n ipc_path: Union[str, Path] = None,\n timeout: int = 10,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path()\n elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):\n self.ipc_path = str(Path(ipc_path).expanduser().resolve())\n else:\n raise TypeError(\"ipc_path must be of type string or pathlib.Path\")\n\n self.timeout = timeout\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super().__init__()\n\n def __str__(self) -> str:\n return f\"<{self.__class__.__name__} {self.ipc_path}>\"\n\n def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request IPC. Path: {self.ipc_path}, Method: {method}\"\n )\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n try:\n sock.sendall(request)\n except BrokenPipeError:\n # one extra attempt, then give up\n sock = self._socket.reset()\n sock.sendall(request)\n\n raw_response = b\"\"\n with Timeout(self.timeout) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n elif has_valid_json_rpc_ending(raw_response):\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n else:\n timeout.sleep(0)\n continue\n\n\n# A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification\ndef has_valid_json_rpc_ending(raw_response: bytes) -> bool:\n stripped_raw_response = raw_response.rstrip()\n for valid_ending in [b\"}\", b\"]\"]:\n if stripped_raw_response.endswith(valid_ending):\n return True\n else:\n return False\n", "path": "web3/providers/ipc.py" } ]
[ { "content": "from json import (\n JSONDecodeError,\n)\nimport logging\nimport os\nfrom pathlib import (\n Path,\n)\nimport socket\nimport sys\nimport threading\nfrom types import (\n TracebackType,\n)\nfrom typing import (\n Any,\n Optional,\n Type,\n Union,\n)\n\nfrom web3._utils.threads import (\n Timeout,\n)\nfrom web3.types import (\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom .base import (\n JSONBaseProvider,\n)\n\n\ndef get_ipc_socket(ipc_path: str, timeout: float = 2.0) -> socket.socket:\n if sys.platform == \"win32\":\n # On Windows named pipe is used. Simulate socket with it.\n from web3._utils.windows import (\n NamedPipe,\n )\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket:\n sock = None\n\n def __init__(self, ipc_path: str) -> None:\n self.ipc_path = ipc_path\n\n def __enter__(self) -> socket.socket:\n if not self.ipc_path:\n raise FileNotFoundError(\n f\"cannot connect to IPC socket at path: {self.ipc_path!r}\"\n )\n\n if not self.sock:\n self.sock = self._open()\n return self.sock\n\n def __exit__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ) -> None:\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n def _open(self) -> socket.socket:\n return get_ipc_socket(self.ipc_path)\n\n def reset(self) -> socket.socket:\n self.sock.close()\n self.sock = self._open()\n return self.sock\n\n\ndef get_default_ipc_path() -> Optional[str]:\n if sys.platform == \"darwin\":\n ipc_path = os.path.expanduser(\n os.path.join(\"~\", \"Library\", \"Ethereum\", \"geth.ipc\")\n )\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"~\", \".ethereum\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"win32\":\n ipc_path = r\"\\\\.\\pipe\\geth.ipc\"\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\ndef get_dev_ipc_path() -> Optional[str]:\n if os.environ.get(\"WEB3_PROVIDER_URI\", \"\"):\n ipc_path = os.environ.get(\"WEB3_PROVIDER_URI\")\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"darwin\":\n tmpdir = os.environ.get(\"TMPDIR\", \"\")\n ipc_path = os.path.expanduser(os.path.join(tmpdir, \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform.startswith(\"linux\") or sys.platform.startswith(\"freebsd\"):\n ipc_path = os.path.expanduser(os.path.join(\"/tmp\", \"geth.ipc\"))\n if os.path.exists(ipc_path):\n return ipc_path\n return None\n\n elif sys.platform == \"win32\":\n ipc_path = os.path.join(\"\\\\\\\\\", \".\", \"pipe\", \"geth.ipc\")\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n f\"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/\"\n \"freebsd are supported. You must specify the ipc_path\"\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.IPCProvider\")\n _socket = None\n\n def __init__(\n self,\n ipc_path: Union[str, Path] = None,\n timeout: int = 10,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path()\n elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):\n self.ipc_path = str(Path(ipc_path).expanduser().resolve())\n else:\n raise TypeError(\"ipc_path must be of type string or pathlib.Path\")\n\n self.timeout = timeout\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super().__init__()\n\n def __str__(self) -> str:\n return f\"<{self.__class__.__name__} {self.ipc_path}>\"\n\n def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request IPC. Path: {self.ipc_path}, Method: {method}\"\n )\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n try:\n sock.sendall(request)\n except BrokenPipeError:\n # one extra attempt, then give up\n sock = self._socket.reset()\n sock.sendall(request)\n\n raw_response = b\"\"\n with Timeout(self.timeout) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n elif has_valid_json_rpc_ending(raw_response):\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n else:\n timeout.sleep(0)\n continue\n\n\n# A valid JSON RPC response can only end in } or ] http://www.jsonrpc.org/specification\ndef has_valid_json_rpc_ending(raw_response: bytes) -> bool:\n stripped_raw_response = raw_response.rstrip()\n for valid_ending in [b\"}\", b\"]\"]:\n if stripped_raw_response.endswith(valid_ending):\n return True\n else:\n return False\n", "path": "web3/providers/ipc.py" } ]
diff --git a/docs/providers.rst b/docs/providers.rst index b560aa51d1..d45e046eae 100644 --- a/docs/providers.rst +++ b/docs/providers.rst @@ -173,7 +173,7 @@ IPCProvider - On Linux and FreeBSD: ``~/.ethereum/geth.ipc`` - On Mac OS: ``~/Library/Ethereum/geth.ipc`` - - On Windows: ``\\\.\pipe\geth.ipc`` + - On Windows: ``\\.\pipe\geth.ipc`` WebsocketProvider diff --git a/newsfragments/3058.bugfix.rst b/newsfragments/3058.bugfix.rst new file mode 100644 index 0000000000..6dfadef108 --- /dev/null +++ b/newsfragments/3058.bugfix.rst @@ -0,0 +1 @@ +Fixed default windows IPC provider path to work with python 3.11 diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py index 170499a8bc..e2731b8fe3 100644 --- a/web3/providers/ipc.py +++ b/web3/providers/ipc.py @@ -102,7 +102,7 @@ def get_default_ipc_path() -> Optional[str]: return None elif sys.platform == "win32": - ipc_path = os.path.join("\\\\", ".", "pipe", "geth.ipc") + ipc_path = r"\\.\pipe\geth.ipc" if os.path.exists(ipc_path): return ipc_path return None
pyro-ppl__numpyro-1760
random_flax_module broken First and foremost **thanks** for the great work on `numpyro`! **The utility function `random_flax_module()` from the `numpyro.contrib.module` seems to be broken.** As a minimal reproducible example for the error I take the example given in the docstring of the function ([https://github.com/pyro-ppl/numpyro/blob/master/numpyro/contrib/module.py#L285](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/contrib/module.py#L285)) itself. The example with imports is given below: ``` import flax import numpyro.distributions as dist from numpyro.contrib.module import random_flax_module random_flax_module( "net", flax.linen.Dense(features=1), prior={"bias": dist.Cauchy(), "kernel": dist.Normal()}, input_shape=(4,) ) ``` This leads directly to `ValueError: First argument passed to an init function should be a `jax.PRNGKey` or a dictionary mapping strings to `jax.PRNGKey`.` Hope I just missed something obvious as the function would be very handy!
[ { "content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\nfrom contextlib import ExitStack, contextmanager\nimport functools\nimport warnings\n\nimport jax\nfrom jax import lax, random\nimport jax.numpy as jnp\n\nimport numpyro\nfrom numpyro.util import find_stack_level, identity\n\n_PYRO_STACK = []\n\nCondIndepStackFrame = namedtuple(\"CondIndepStackFrame\", [\"name\", \"dim\", \"size\"])\n\n\ndef default_process_message(msg):\n if msg[\"value\"] is None:\n if msg[\"type\"] == \"sample\":\n msg[\"value\"], msg[\"intermediates\"] = msg[\"fn\"](\n *msg[\"args\"], sample_intermediates=True, **msg[\"kwargs\"]\n )\n else:\n msg[\"value\"] = msg[\"fn\"](*msg[\"args\"], **msg[\"kwargs\"])\n\n\ndef apply_stack(msg):\n \"\"\"\n Execute the effect stack at a single site according to the following scheme:\n\n 1. For each ``Messenger`` in the stack from bottom to top,\n execute ``Messenger.process_message`` with the message;\n if the message field \"stop\" is True, stop;\n otherwise, continue\n 2. Apply default behavior (``default_process_message``) to finish remaining\n site execution\n 3. For each ``Messenger`` in the stack from top to bottom,\n execute ``Messenger.postprocess_message`` to update the message\n and internal messenger state with the site results\n \"\"\"\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n\n default_process_message(msg)\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer - 1 :]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n if fn is not None and not callable(fn):\n raise ValueError(\n \"Expected `fn` to be a Python callable object; \"\n \"instead found type(fn) = {}.\".format(type(fn))\n )\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is None:\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n else:\n # NB: this mimics Pyro exception handling\n # the wrapped function or block raised an exception\n # handler exception handling:\n # when the callee or enclosed block raises an exception,\n # find this handler's position in the stack,\n # then remove it and everything below it in the stack.\n if self in _PYRO_STACK:\n loc = _PYRO_STACK.index(self)\n for i in range(loc, len(_PYRO_STACK)):\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n if self.fn is None:\n # Assume self is being used as a decorator.\n assert len(args) == 1 and not kwargs\n self.fn = args[0]\n return self\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef _masked_observe(name, fn, obs, obs_mask, **kwargs):\n # Split into two auxiliary sample sites.\n with numpyro.handlers.mask(mask=obs_mask):\n observed = sample(f\"{name}_observed\", fn, **kwargs, obs=obs)\n with numpyro.handlers.mask(mask=(obs_mask ^ True)):\n unobserved = sample(f\"{name}_unobserved\", fn, **kwargs)\n\n # Interleave observed and unobserved events.\n shape = jnp.shape(obs_mask) + (1,) * fn.event_dim\n batch_mask = jnp.reshape(obs_mask, shape)\n value = jnp.where(batch_mask, observed, unobserved)\n return deterministic(name, value)\n\n\ndef sample(\n name, fn, obs=None, rng_key=None, sample_shape=(), infer=None, obs_mask=None\n):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n .. note::\n By design, `sample` primitive is meant to be used inside a NumPyro model.\n Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n state to `fn`. In those situations, `rng_key` keyword will take no\n effect.\n\n :param str name: name of the sample site.\n :param fn: a stochastic function that returns a sample.\n :param jnp.ndarray obs: observed value\n :param jax.random.PRNGKey rng_key: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :param dict infer: an optional dictionary containing additional information\n for inference algorithms. For example, if `fn` is a discrete distribution,\n setting `infer={'enumerate': 'parallel'}` to tell MCMC marginalize\n this discrete latent site.\n :param jnp.ndarray obs_mask: Optional boolean array mask of shape\n broadcastable with ``fn.batch_shape``. If provided, events with\n mask=True will be conditioned on ``obs`` and remaining events will be\n imputed by sampling. This introduces a latent sample site named ``name\n + \"_unobserved\"`` which should be used by guides in SVI. Note that this\n argument is not intended to be used with MCMC.\n :return: sample from the stochastic `fn`.\n \"\"\"\n assert isinstance(\n sample_shape, tuple\n ), \"sample_shape needs to be a tuple of integers\"\n if not isinstance(fn, numpyro.distributions.Distribution):\n type_error = TypeError(\n \"It looks like you tried to use a fn that isn't an instance of \"\n \"numpyro.distributions.Distribution, funsor.Funsor or \"\n \"tensorflow_probability.distributions.Distribution. If you're using \"\n \"funsor or tensorflow_probability, make sure they are correctly installed.\"\n )\n\n # fn can be a funsor.Funsor, but this won't be installed for all users\n try:\n from funsor import Funsor\n except ImportError:\n Funsor = None\n\n # if Funsor import failed, or fn is not a Funsor it's also possible fn could be\n # a tensorflow_probability distribution\n if Funsor is None or not isinstance(fn, Funsor):\n try:\n from tensorflow_probability.substrates.jax import distributions as tfd\n\n from numpyro.contrib.tfp.distributions import TFPDistribution\n except ImportError:\n # if tensorflow_probability fails to import here, then fn is not a\n # numpyro Distribution or a Funsor, and it can't have been a tfp\n # distribution either, so raising TypeError is ok\n raise type_error\n\n if isinstance(fn, tfd.Distribution):\n with warnings.catch_warnings():\n # ignore FutureWarnings when instantiating TFPDistribution\n warnings.simplefilter(\"ignore\", category=FutureWarning)\n # if fn is a tfp distribution we need to wrap it\n fn = TFPDistribution[fn.__class__](**fn.parameters)\n else:\n # if tensorflow_probability imported, but fn is not tfd.Distribution we\n # still need to raise a type error\n raise type_error\n\n # if no active Messengers, draw a sample or return obs as expected:\n if not _PYRO_STACK:\n if obs is None:\n return fn(rng_key=rng_key, sample_shape=sample_shape)\n else:\n return obs\n\n if obs_mask is not None:\n return _masked_observe(\n name, fn, obs, obs_mask, rng_key=rng_key, sample_shape=(), infer=infer\n )\n\n # Otherwise, we initialize a message...\n initial_msg = {\n \"type\": \"sample\",\n \"name\": name,\n \"fn\": fn,\n \"args\": (),\n \"kwargs\": {\"rng_key\": rng_key, \"sample_shape\": sample_shape},\n \"value\": obs,\n \"scale\": None,\n \"is_observed\": obs is not None,\n \"intermediates\": [],\n \"cond_indep_stack\": [],\n \"infer\": {} if infer is None else infer,\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.example_libraries.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :class:`~numpyro.infer.SVI`.\n\n :param str name: name of site.\n :param init_value: initial value specified by the user or a lazy callable\n that accepts a JAX random PRNGKey and returns an array.\n Note that the onus of using this to initialize the optimizer is\n on the user inference algorithm, since there is no global parameter\n store in NumPyro.\n :type init_value: jnp.ndarray or callable\n :param constraint: NumPyro constraint, defaults to ``constraints.real``.\n :type constraint: numpyro.distributions.constraints.Constraint\n :param int event_dim: (optional) number of rightmost dimensions unrelated\n to batching. Dimension to the left of this will be considered batch\n dimensions; if the param statement is inside a subsampled plate, then\n corresponding batch dimensions of the parameter will be correspondingly\n subsampled. If unspecified, all dimensions will be considered event\n dims and no subsampling will be performed.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n assert not callable(\n init_value\n ), \"A callable init_value needs to be put inside a numpyro.handlers.seed handler.\"\n return init_value\n\n if callable(init_value):\n\n def fn(init_fn, *args, **kwargs):\n return init_fn(prng_key())\n\n else:\n fn = identity\n\n # Otherwise, we initialize a message...\n initial_msg = {\n \"type\": \"param\",\n \"name\": name,\n \"fn\": fn,\n \"args\": (init_value,),\n \"kwargs\": kwargs,\n \"value\": None,\n \"scale\": None,\n \"cond_indep_stack\": [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef deterministic(name, value):\n \"\"\"\n Used to designate deterministic sites in the model. Note that most effect\n handlers will not operate on deterministic sites (except\n :func:`~numpyro.handlers.trace`), so deterministic sites should be\n side-effect free. The use case for deterministic nodes is to record any\n values in the model execution trace.\n\n :param str name: name of the deterministic site.\n :param jnp.ndarray value: deterministic value to record in the trace.\n \"\"\"\n if not _PYRO_STACK:\n return value\n\n initial_msg = {\n \"type\": \"deterministic\",\n \"name\": name,\n \"value\": value,\n \"cond_indep_stack\": [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef mutable(name, init_value=None):\n \"\"\"\n This primitive is used to store a mutable value that can be changed\n during model execution::\n\n a = numpyro.mutable(\"a\", {\"value\": 1.})\n a[\"value\"] = 2.\n assert numpyro.mutable(\"a\")[\"value\"] == 2.\n\n For example, this can be used to store and update information like\n running mean/variance in a neural network batch normalization layer.\n\n :param str name: name of the mutable site.\n :param init_value: mutable value to record in the trace.\n \"\"\"\n if not _PYRO_STACK:\n return init_value\n\n initial_msg = {\n \"type\": \"mutable\",\n \"name\": name,\n \"fn\": identity,\n \"args\": (init_value,),\n \"kwargs\": {},\n \"value\": init_value,\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef _inspect():\n \"\"\"\n EXPERIMENTAL Inspect the Pyro stack.\n\n .. warning:: The format of the returned message may change at any time and\n does not guarantee backwards compatibility.\n\n :returns: A message with mask effects applied.\n :rtype: dict\n \"\"\"\n # NB: this is different from Pyro that in Pyro, all effects applied.\n # Here, we only apply mask effect handler.\n msg = {\n \"type\": \"inspect\",\n \"fn\": lambda: True,\n \"args\": (),\n \"kwargs\": {},\n \"value\": None,\n \"mask\": None,\n }\n apply_stack(msg)\n return msg\n\n\ndef get_mask():\n \"\"\"\n Records the effects of enclosing ``handlers.mask`` handlers.\n This is useful for avoiding expensive ``numpyro.factor()`` computations during\n prediction, when the log density need not be computed, e.g.::\n\n def model():\n # ...\n if numpyro.get_mask() is not False:\n log_density = my_expensive_computation()\n numpyro.factor(\"foo\", log_density)\n # ...\n\n :returns: The mask.\n :rtype: None, bool, or jnp.ndarray\n \"\"\"\n return _inspect()[\"mask\"]\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.example_libraries.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.example_libraries.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + \"$params\"\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError(\"Valid value for `input_shape` needed to initialize.\")\n rng_key = prng_key()\n _, nn_params = nn_init(rng_key, input_shape)\n param(module_key, nn_params)\n return functools.partial(nn_apply, nn_params)\n\n\ndef _subsample_fn(size, subsample_size, rng_key=None):\n if rng_key is None:\n raise ValueError(\n \"Missing random key to generate subsample indices.\"\n \" Algorithms like HMC/NUTS do not support subsampling.\"\n \" You might want to use SVI or HMCECS instead.\"\n )\n if jax.default_backend() == \"cpu\":\n # ref: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm\n rng_keys = random.split(rng_key, subsample_size)\n\n def body_fn(val, idx):\n i_p1 = size - idx\n i = i_p1 - 1\n j = random.randint(rng_keys[idx], (), 0, i_p1)\n val = val.at[jnp.array([i, j])].set(val[jnp.array([j, i])])\n return val, None\n\n val, _ = lax.scan(body_fn, jnp.arange(size), jnp.arange(subsample_size))\n return val[-subsample_size:]\n else:\n return random.choice(rng_key, size, (subsample_size,), replace=False)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n .. note:: This can be used to subsample minibatches of data:\n\n .. code-block:: python\n\n with plate(\"data\", len(data), subsample_size=100) as ind:\n batch = data[ind]\n assert len(batch) == 100\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the rightmost available dim\n is allocated.\n \"\"\"\n\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n assert size > 0, \"size of plate should be positive\"\n self.size = size\n if dim is not None and dim >= 0:\n raise ValueError(\"dim arg must be negative.\")\n self.dim, self._indices = self._subsample(\n self.name, self.size, subsample_size, dim\n )\n self.subsample_size = self._indices.shape[0]\n super(plate, self).__init__()\n\n # XXX: different from Pyro, this method returns dim and indices\n @staticmethod\n def _subsample(name, size, subsample_size, dim):\n msg = {\n \"type\": \"plate\",\n \"fn\": _subsample_fn,\n \"name\": name,\n \"args\": (size, subsample_size),\n \"kwargs\": {\"rng_key\": None},\n \"value\": (\n None\n if (subsample_size is not None and size != subsample_size)\n else jnp.arange(size)\n ),\n \"scale\": 1.0,\n \"cond_indep_stack\": [],\n }\n apply_stack(msg)\n subsample = msg[\"value\"]\n subsample_size = msg[\"args\"][1]\n if subsample_size is not None and subsample_size != subsample.shape[0]:\n warnings.warn(\n \"subsample_size does not match len(subsample), {} vs {}.\".format(\n subsample_size, len(subsample)\n )\n + \" Did you accidentally use different subsample_size in the model and guide?\",\n stacklevel=find_stack_level(),\n )\n cond_indep_stack = msg[\"cond_indep_stack\"]\n occupied_dims = {f.dim for f in cond_indep_stack}\n if dim is None:\n new_dim = -1\n while new_dim in occupied_dims:\n new_dim -= 1\n dim = new_dim\n else:\n assert dim not in occupied_dims\n return dim, subsample\n\n def __enter__(self):\n super().__enter__()\n return self._indices\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n if msg[\"type\"] not in (\"param\", \"sample\", \"plate\", \"deterministic\"):\n if msg[\"type\"] == \"control_flow\":\n raise NotImplementedError(\n \"Cannot use control flow primitive under a `plate` primitive.\"\n \" Please move those `plate` statements into the control flow\"\n \" body function. See `scan` documentation for more information.\"\n )\n return\n\n cond_indep_stack = msg[\"cond_indep_stack\"]\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n if msg[\"type\"] == \"deterministic\":\n return\n if msg[\"type\"] == \"sample\":\n expected_shape = self._get_batch_shape(cond_indep_stack)\n dist_batch_shape = msg[\"fn\"].batch_shape\n if \"sample_shape\" in msg[\"kwargs\"]:\n dist_batch_shape = msg[\"kwargs\"][\"sample_shape\"] + dist_batch_shape\n msg[\"kwargs\"][\"sample_shape\"] = ()\n overlap_idx = max(len(expected_shape) - len(dist_batch_shape), 0)\n trailing_shape = expected_shape[overlap_idx:]\n broadcast_shape = lax.broadcast_shapes(\n trailing_shape, tuple(dist_batch_shape)\n )\n batch_shape = expected_shape[:overlap_idx] + broadcast_shape\n msg[\"fn\"] = msg[\"fn\"].expand(batch_shape)\n if self.size != self.subsample_size:\n scale = 1.0 if msg[\"scale\"] is None else msg[\"scale\"]\n msg[\"scale\"] = scale * (\n self.size / self.subsample_size if self.subsample_size else 1\n )\n\n def postprocess_message(self, msg):\n if msg[\"type\"] in (\"subsample\", \"param\") and self.dim is not None:\n event_dim = msg[\"kwargs\"].get(\"event_dim\")\n if event_dim is not None:\n assert event_dim >= 0\n dim = self.dim - event_dim\n shape = jnp.shape(msg[\"value\"])\n if len(shape) >= -dim and shape[dim] != 1:\n if shape[dim] != self.size:\n if msg[\"type\"] == \"param\":\n statement = \"numpyro.param({}, ..., event_dim={})\".format(\n msg[\"name\"], event_dim\n )\n else:\n statement = \"numpyro.subsample(..., event_dim={})\".format(\n event_dim\n )\n raise ValueError(\n \"Inside numpyro.plate({}, {}, dim={}) invalid shape of {}: {}\".format(\n self.name, self.size, self.dim, statement, shape\n )\n )\n if self.subsample_size < self.size:\n value = msg[\"value\"]\n new_value = jnp.take(value, self._indices, dim)\n msg[\"value\"] = new_value\n\n\n@contextmanager\ndef plate_stack(prefix, sizes, rightmost_dim=-1):\n \"\"\"\n Create a contiguous stack of :class:`plate` s with dimensions::\n\n rightmost_dim - len(sizes), ..., rightmost_dim\n\n :param str prefix: Name prefix for plates.\n :param iterable sizes: An iterable of plate sizes.\n :param int rightmost_dim: The rightmost dim, counting from the right.\n \"\"\"\n assert rightmost_dim < 0\n with ExitStack() as stack:\n for i, size in enumerate(reversed(sizes)):\n plate_i = plate(\"{}_{}\".format(prefix, i), size, dim=rightmost_dim - i)\n stack.enter_context(plate_i)\n yield\n\n\ndef factor(name, log_factor):\n \"\"\"\n Factor statement to add arbitrary log probability factor to a\n probabilistic model.\n\n :param str name: Name of the trivial sample.\n :param jnp.ndarray log_factor: A possibly batched log probability factor.\n \"\"\"\n unit_dist = numpyro.distributions.distribution.Unit(log_factor)\n unit_value = unit_dist.sample(None)\n sample(name, unit_dist, obs=unit_value, infer={\"is_auxiliary\": True})\n\n\ndef prng_key():\n \"\"\"\n A statement to draw a pseudo-random number generator key\n :func:`~jax.random.PRNGKey` under :class:`~numpyro.handlers.seed` handler.\n\n :return: a PRNG key of shape (2,) and dtype unit32.\n \"\"\"\n if not _PYRO_STACK:\n return\n\n initial_msg = {\n \"type\": \"prng_key\",\n \"fn\": lambda rng_key: rng_key,\n \"args\": (),\n \"kwargs\": {\"rng_key\": None},\n \"value\": None,\n }\n\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef subsample(data, event_dim):\n \"\"\"\n EXPERIMENTAL Subsampling statement to subsample data based on enclosing\n :class:`~numpyro.primitives.plate` s.\n\n This is typically called on arguments to ``model()`` when subsampling is\n performed automatically by :class:`~numpyro.primitives.plate` s by passing\n ``subsample_size`` kwarg. For example the following are equivalent::\n\n # Version 1. using indexing\n def model(data):\n with numpyro.plate(\"data\", len(data), subsample_size=10, dim=-data.dim()) as ind:\n data = data[ind]\n # ...\n\n # Version 2. using numpyro.subsample()\n def model(data):\n with numpyro.plate(\"data\", len(data), subsample_size=10, dim=-data.dim()):\n data = numpyro.subsample(data, event_dim=0)\n # ...\n\n :param jnp.ndarray data: A tensor of batched data.\n :param int event_dim: The event dimension of the data tensor. Dimensions to\n the left are considered batch dimensions.\n :returns: A subsampled version of ``data``\n :rtype: ~jnp.ndarray\n \"\"\"\n if not _PYRO_STACK:\n return data\n\n assert isinstance(event_dim, int) and event_dim >= 0\n initial_msg = {\n \"type\": \"subsample\",\n \"value\": data,\n \"kwargs\": {\"event_dim\": event_dim},\n }\n\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n", "path": "numpyro/primitives.py" } ]
[ { "content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\nfrom contextlib import ExitStack, contextmanager\nimport functools\nimport warnings\n\nimport jax\nfrom jax import lax, random\nimport jax.numpy as jnp\n\nimport numpyro\nfrom numpyro.util import find_stack_level, identity\n\n_PYRO_STACK = []\n\nCondIndepStackFrame = namedtuple(\"CondIndepStackFrame\", [\"name\", \"dim\", \"size\"])\n\n\ndef default_process_message(msg):\n if msg[\"value\"] is None:\n if msg[\"type\"] == \"sample\":\n msg[\"value\"], msg[\"intermediates\"] = msg[\"fn\"](\n *msg[\"args\"], sample_intermediates=True, **msg[\"kwargs\"]\n )\n else:\n msg[\"value\"] = msg[\"fn\"](*msg[\"args\"], **msg[\"kwargs\"])\n\n\ndef apply_stack(msg):\n \"\"\"\n Execute the effect stack at a single site according to the following scheme:\n\n 1. For each ``Messenger`` in the stack from bottom to top,\n execute ``Messenger.process_message`` with the message;\n if the message field \"stop\" is True, stop;\n otherwise, continue\n 2. Apply default behavior (``default_process_message``) to finish remaining\n site execution\n 3. For each ``Messenger`` in the stack from top to bottom,\n execute ``Messenger.postprocess_message`` to update the message\n and internal messenger state with the site results\n \"\"\"\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n\n default_process_message(msg)\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer - 1 :]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n if fn is not None and not callable(fn):\n raise ValueError(\n \"Expected `fn` to be a Python callable object; \"\n \"instead found type(fn) = {}.\".format(type(fn))\n )\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is None:\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n else:\n # NB: this mimics Pyro exception handling\n # the wrapped function or block raised an exception\n # handler exception handling:\n # when the callee or enclosed block raises an exception,\n # find this handler's position in the stack,\n # then remove it and everything below it in the stack.\n if self in _PYRO_STACK:\n loc = _PYRO_STACK.index(self)\n for i in range(loc, len(_PYRO_STACK)):\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n if self.fn is None:\n # Assume self is being used as a decorator.\n assert len(args) == 1 and not kwargs\n self.fn = args[0]\n return self\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef _masked_observe(name, fn, obs, obs_mask, **kwargs):\n # Split into two auxiliary sample sites.\n with numpyro.handlers.mask(mask=obs_mask):\n observed = sample(f\"{name}_observed\", fn, **kwargs, obs=obs)\n with numpyro.handlers.mask(mask=(obs_mask ^ True)):\n unobserved = sample(f\"{name}_unobserved\", fn, **kwargs)\n\n # Interleave observed and unobserved events.\n shape = jnp.shape(obs_mask) + (1,) * fn.event_dim\n batch_mask = jnp.reshape(obs_mask, shape)\n value = jnp.where(batch_mask, observed, unobserved)\n return deterministic(name, value)\n\n\ndef sample(\n name, fn, obs=None, rng_key=None, sample_shape=(), infer=None, obs_mask=None\n):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n .. note::\n By design, `sample` primitive is meant to be used inside a NumPyro model.\n Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n state to `fn`. In those situations, `rng_key` keyword will take no\n effect.\n\n :param str name: name of the sample site.\n :param fn: a stochastic function that returns a sample.\n :param jnp.ndarray obs: observed value\n :param jax.random.PRNGKey rng_key: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :param dict infer: an optional dictionary containing additional information\n for inference algorithms. For example, if `fn` is a discrete distribution,\n setting `infer={'enumerate': 'parallel'}` to tell MCMC marginalize\n this discrete latent site.\n :param jnp.ndarray obs_mask: Optional boolean array mask of shape\n broadcastable with ``fn.batch_shape``. If provided, events with\n mask=True will be conditioned on ``obs`` and remaining events will be\n imputed by sampling. This introduces a latent sample site named ``name\n + \"_unobserved\"`` which should be used by guides in SVI. Note that this\n argument is not intended to be used with MCMC.\n :return: sample from the stochastic `fn`.\n \"\"\"\n assert isinstance(\n sample_shape, tuple\n ), \"sample_shape needs to be a tuple of integers\"\n if not isinstance(fn, numpyro.distributions.Distribution):\n type_error = TypeError(\n \"It looks like you tried to use a fn that isn't an instance of \"\n \"numpyro.distributions.Distribution, funsor.Funsor or \"\n \"tensorflow_probability.distributions.Distribution. If you're using \"\n \"funsor or tensorflow_probability, make sure they are correctly installed.\"\n )\n\n # fn can be a funsor.Funsor, but this won't be installed for all users\n try:\n from funsor import Funsor\n except ImportError:\n Funsor = None\n\n # if Funsor import failed, or fn is not a Funsor it's also possible fn could be\n # a tensorflow_probability distribution\n if Funsor is None or not isinstance(fn, Funsor):\n try:\n from tensorflow_probability.substrates.jax import distributions as tfd\n\n from numpyro.contrib.tfp.distributions import TFPDistribution\n except ImportError:\n # if tensorflow_probability fails to import here, then fn is not a\n # numpyro Distribution or a Funsor, and it can't have been a tfp\n # distribution either, so raising TypeError is ok\n raise type_error\n\n if isinstance(fn, tfd.Distribution):\n with warnings.catch_warnings():\n # ignore FutureWarnings when instantiating TFPDistribution\n warnings.simplefilter(\"ignore\", category=FutureWarning)\n # if fn is a tfp distribution we need to wrap it\n fn = TFPDistribution[fn.__class__](**fn.parameters)\n else:\n # if tensorflow_probability imported, but fn is not tfd.Distribution we\n # still need to raise a type error\n raise type_error\n\n # if no active Messengers, draw a sample or return obs as expected:\n if not _PYRO_STACK:\n if obs is None:\n return fn(rng_key=rng_key, sample_shape=sample_shape)\n else:\n return obs\n\n if obs_mask is not None:\n return _masked_observe(\n name, fn, obs, obs_mask, rng_key=rng_key, sample_shape=(), infer=infer\n )\n\n # Otherwise, we initialize a message...\n initial_msg = {\n \"type\": \"sample\",\n \"name\": name,\n \"fn\": fn,\n \"args\": (),\n \"kwargs\": {\"rng_key\": rng_key, \"sample_shape\": sample_shape},\n \"value\": obs,\n \"scale\": None,\n \"is_observed\": obs is not None,\n \"intermediates\": [],\n \"cond_indep_stack\": [],\n \"infer\": {} if infer is None else infer,\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.example_libraries.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :class:`~numpyro.infer.SVI`.\n\n :param str name: name of site.\n :param init_value: initial value specified by the user or a lazy callable\n that accepts a JAX random PRNGKey and returns an array.\n Note that the onus of using this to initialize the optimizer is\n on the user inference algorithm, since there is no global parameter\n store in NumPyro.\n :type init_value: jnp.ndarray or callable\n :param constraint: NumPyro constraint, defaults to ``constraints.real``.\n :type constraint: numpyro.distributions.constraints.Constraint\n :param int event_dim: (optional) number of rightmost dimensions unrelated\n to batching. Dimension to the left of this will be considered batch\n dimensions; if the param statement is inside a subsampled plate, then\n corresponding batch dimensions of the parameter will be correspondingly\n subsampled. If unspecified, all dimensions will be considered event\n dims and no subsampling will be performed.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n assert not callable(\n init_value\n ), \"A callable init_value needs to be put inside a numpyro.handlers.seed handler.\"\n return init_value\n\n if callable(init_value):\n\n def fn(init_fn, *args, **kwargs):\n return init_fn(prng_key())\n\n else:\n fn = identity\n\n # Otherwise, we initialize a message...\n initial_msg = {\n \"type\": \"param\",\n \"name\": name,\n \"fn\": fn,\n \"args\": (init_value,),\n \"kwargs\": kwargs,\n \"value\": None,\n \"scale\": None,\n \"cond_indep_stack\": [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef deterministic(name, value):\n \"\"\"\n Used to designate deterministic sites in the model. Note that most effect\n handlers will not operate on deterministic sites (except\n :func:`~numpyro.handlers.trace`), so deterministic sites should be\n side-effect free. The use case for deterministic nodes is to record any\n values in the model execution trace.\n\n :param str name: name of the deterministic site.\n :param jnp.ndarray value: deterministic value to record in the trace.\n \"\"\"\n if not _PYRO_STACK:\n return value\n\n initial_msg = {\n \"type\": \"deterministic\",\n \"name\": name,\n \"value\": value,\n \"cond_indep_stack\": [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef mutable(name, init_value=None):\n \"\"\"\n This primitive is used to store a mutable value that can be changed\n during model execution::\n\n a = numpyro.mutable(\"a\", {\"value\": 1.})\n a[\"value\"] = 2.\n assert numpyro.mutable(\"a\")[\"value\"] == 2.\n\n For example, this can be used to store and update information like\n running mean/variance in a neural network batch normalization layer.\n\n :param str name: name of the mutable site.\n :param init_value: mutable value to record in the trace.\n \"\"\"\n if not _PYRO_STACK:\n return init_value\n\n initial_msg = {\n \"type\": \"mutable\",\n \"name\": name,\n \"fn\": identity,\n \"args\": (init_value,),\n \"kwargs\": {},\n \"value\": init_value,\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef _inspect():\n \"\"\"\n EXPERIMENTAL Inspect the Pyro stack.\n\n .. warning:: The format of the returned message may change at any time and\n does not guarantee backwards compatibility.\n\n :returns: A message with mask effects applied.\n :rtype: dict\n \"\"\"\n # NB: this is different from Pyro that in Pyro, all effects applied.\n # Here, we only apply mask effect handler.\n msg = {\n \"type\": \"inspect\",\n \"fn\": lambda: True,\n \"args\": (),\n \"kwargs\": {},\n \"value\": None,\n \"mask\": None,\n }\n apply_stack(msg)\n return msg\n\n\ndef get_mask():\n \"\"\"\n Records the effects of enclosing ``handlers.mask`` handlers.\n This is useful for avoiding expensive ``numpyro.factor()`` computations during\n prediction, when the log density need not be computed, e.g.::\n\n def model():\n # ...\n if numpyro.get_mask() is not False:\n log_density = my_expensive_computation()\n numpyro.factor(\"foo\", log_density)\n # ...\n\n :returns: The mask.\n :rtype: None, bool, or jnp.ndarray\n \"\"\"\n return _inspect()[\"mask\"]\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.example_libraries.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.example_libraries.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + \"$params\"\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError(\"Valid value for `input_shape` needed to initialize.\")\n rng_key = prng_key()\n _, nn_params = nn_init(rng_key, input_shape)\n param(module_key, nn_params)\n return functools.partial(nn_apply, nn_params)\n\n\ndef _subsample_fn(size, subsample_size, rng_key=None):\n if rng_key is None:\n raise ValueError(\n \"Missing random key to generate subsample indices.\"\n \" Algorithms like HMC/NUTS do not support subsampling.\"\n \" You might want to use SVI or HMCECS instead.\"\n )\n if jax.default_backend() == \"cpu\":\n # ref: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm\n rng_keys = random.split(rng_key, subsample_size)\n\n def body_fn(val, idx):\n i_p1 = size - idx\n i = i_p1 - 1\n j = random.randint(rng_keys[idx], (), 0, i_p1)\n val = val.at[jnp.array([i, j])].set(val[jnp.array([j, i])])\n return val, None\n\n val, _ = lax.scan(body_fn, jnp.arange(size), jnp.arange(subsample_size))\n return val[-subsample_size:]\n else:\n return random.choice(rng_key, size, (subsample_size,), replace=False)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n .. note:: This can be used to subsample minibatches of data:\n\n .. code-block:: python\n\n with plate(\"data\", len(data), subsample_size=100) as ind:\n batch = data[ind]\n assert len(batch) == 100\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the rightmost available dim\n is allocated.\n \"\"\"\n\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n assert size > 0, \"size of plate should be positive\"\n self.size = size\n if dim is not None and dim >= 0:\n raise ValueError(\"dim arg must be negative.\")\n self.dim, self._indices = self._subsample(\n self.name, self.size, subsample_size, dim\n )\n self.subsample_size = self._indices.shape[0]\n super(plate, self).__init__()\n\n # XXX: different from Pyro, this method returns dim and indices\n @staticmethod\n def _subsample(name, size, subsample_size, dim):\n msg = {\n \"type\": \"plate\",\n \"fn\": _subsample_fn,\n \"name\": name,\n \"args\": (size, subsample_size),\n \"kwargs\": {\"rng_key\": None},\n \"value\": (\n None\n if (subsample_size is not None and size != subsample_size)\n else jnp.arange(size)\n ),\n \"scale\": 1.0,\n \"cond_indep_stack\": [],\n }\n apply_stack(msg)\n subsample = msg[\"value\"]\n subsample_size = msg[\"args\"][1]\n if subsample_size is not None and subsample_size != subsample.shape[0]:\n warnings.warn(\n \"subsample_size does not match len(subsample), {} vs {}.\".format(\n subsample_size, len(subsample)\n )\n + \" Did you accidentally use different subsample_size in the model and guide?\",\n stacklevel=find_stack_level(),\n )\n cond_indep_stack = msg[\"cond_indep_stack\"]\n occupied_dims = {f.dim for f in cond_indep_stack}\n if dim is None:\n new_dim = -1\n while new_dim in occupied_dims:\n new_dim -= 1\n dim = new_dim\n else:\n assert dim not in occupied_dims\n return dim, subsample\n\n def __enter__(self):\n super().__enter__()\n return self._indices\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n if msg[\"type\"] not in (\"param\", \"sample\", \"plate\", \"deterministic\"):\n if msg[\"type\"] == \"control_flow\":\n raise NotImplementedError(\n \"Cannot use control flow primitive under a `plate` primitive.\"\n \" Please move those `plate` statements into the control flow\"\n \" body function. See `scan` documentation for more information.\"\n )\n return\n\n cond_indep_stack = msg[\"cond_indep_stack\"]\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n if msg[\"type\"] == \"deterministic\":\n return\n if msg[\"type\"] == \"sample\":\n expected_shape = self._get_batch_shape(cond_indep_stack)\n dist_batch_shape = msg[\"fn\"].batch_shape\n if \"sample_shape\" in msg[\"kwargs\"]:\n dist_batch_shape = msg[\"kwargs\"][\"sample_shape\"] + dist_batch_shape\n msg[\"kwargs\"][\"sample_shape\"] = ()\n overlap_idx = max(len(expected_shape) - len(dist_batch_shape), 0)\n trailing_shape = expected_shape[overlap_idx:]\n broadcast_shape = lax.broadcast_shapes(\n trailing_shape, tuple(dist_batch_shape)\n )\n batch_shape = expected_shape[:overlap_idx] + broadcast_shape\n msg[\"fn\"] = msg[\"fn\"].expand(batch_shape)\n if self.size != self.subsample_size:\n scale = 1.0 if msg[\"scale\"] is None else msg[\"scale\"]\n msg[\"scale\"] = scale * (\n self.size / self.subsample_size if self.subsample_size else 1\n )\n\n def postprocess_message(self, msg):\n if msg[\"type\"] in (\"subsample\", \"param\") and self.dim is not None:\n event_dim = msg[\"kwargs\"].get(\"event_dim\")\n if event_dim is not None:\n assert event_dim >= 0\n dim = self.dim - event_dim\n shape = jnp.shape(msg[\"value\"])\n if len(shape) >= -dim and shape[dim] != 1:\n if shape[dim] != self.size:\n if msg[\"type\"] == \"param\":\n statement = \"numpyro.param({}, ..., event_dim={})\".format(\n msg[\"name\"], event_dim\n )\n else:\n statement = \"numpyro.subsample(..., event_dim={})\".format(\n event_dim\n )\n raise ValueError(\n \"Inside numpyro.plate({}, {}, dim={}) invalid shape of {}: {}\".format(\n self.name, self.size, self.dim, statement, shape\n )\n )\n if self.subsample_size < self.size:\n value = msg[\"value\"]\n new_value = jnp.take(value, self._indices, dim)\n msg[\"value\"] = new_value\n\n\n@contextmanager\ndef plate_stack(prefix, sizes, rightmost_dim=-1):\n \"\"\"\n Create a contiguous stack of :class:`plate` s with dimensions::\n\n rightmost_dim - len(sizes), ..., rightmost_dim\n\n :param str prefix: Name prefix for plates.\n :param iterable sizes: An iterable of plate sizes.\n :param int rightmost_dim: The rightmost dim, counting from the right.\n \"\"\"\n assert rightmost_dim < 0\n with ExitStack() as stack:\n for i, size in enumerate(reversed(sizes)):\n plate_i = plate(\"{}_{}\".format(prefix, i), size, dim=rightmost_dim - i)\n stack.enter_context(plate_i)\n yield\n\n\ndef factor(name, log_factor):\n \"\"\"\n Factor statement to add arbitrary log probability factor to a\n probabilistic model.\n\n :param str name: Name of the trivial sample.\n :param jnp.ndarray log_factor: A possibly batched log probability factor.\n \"\"\"\n unit_dist = numpyro.distributions.distribution.Unit(log_factor)\n unit_value = unit_dist.sample(None)\n sample(name, unit_dist, obs=unit_value, infer={\"is_auxiliary\": True})\n\n\ndef prng_key():\n \"\"\"\n A statement to draw a pseudo-random number generator key\n :func:`~jax.random.PRNGKey` under :class:`~numpyro.handlers.seed` handler.\n\n :return: a PRNG key of shape (2,) and dtype unit32.\n \"\"\"\n if not _PYRO_STACK:\n warnings.warn(\n \"Cannot generate JAX PRNG key outside of `seed` handler.\",\n stacklevel=find_stack_level(),\n )\n return\n\n initial_msg = {\n \"type\": \"prng_key\",\n \"fn\": lambda rng_key: rng_key,\n \"args\": (),\n \"kwargs\": {\"rng_key\": None},\n \"value\": None,\n }\n\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n\n\ndef subsample(data, event_dim):\n \"\"\"\n EXPERIMENTAL Subsampling statement to subsample data based on enclosing\n :class:`~numpyro.primitives.plate` s.\n\n This is typically called on arguments to ``model()`` when subsampling is\n performed automatically by :class:`~numpyro.primitives.plate` s by passing\n ``subsample_size`` kwarg. For example the following are equivalent::\n\n # Version 1. using indexing\n def model(data):\n with numpyro.plate(\"data\", len(data), subsample_size=10, dim=-data.dim()) as ind:\n data = data[ind]\n # ...\n\n # Version 2. using numpyro.subsample()\n def model(data):\n with numpyro.plate(\"data\", len(data), subsample_size=10, dim=-data.dim()):\n data = numpyro.subsample(data, event_dim=0)\n # ...\n\n :param jnp.ndarray data: A tensor of batched data.\n :param int event_dim: The event dimension of the data tensor. Dimensions to\n the left are considered batch dimensions.\n :returns: A subsampled version of ``data``\n :rtype: ~jnp.ndarray\n \"\"\"\n if not _PYRO_STACK:\n return data\n\n assert isinstance(event_dim, int) and event_dim >= 0\n initial_msg = {\n \"type\": \"subsample\",\n \"value\": data,\n \"kwargs\": {\"event_dim\": event_dim},\n }\n\n msg = apply_stack(initial_msg)\n return msg[\"value\"]\n", "path": "numpyro/primitives.py" } ]
diff --git a/numpyro/primitives.py b/numpyro/primitives.py index 99cf35902..ac02a8856 100644 --- a/numpyro/primitives.py +++ b/numpyro/primitives.py @@ -622,6 +622,10 @@ def prng_key(): :return: a PRNG key of shape (2,) and dtype unit32. """ if not _PYRO_STACK: + warnings.warn( + "Cannot generate JAX PRNG key outside of `seed` handler.", + stacklevel=find_stack_level(), + ) return initial_msg = { diff --git a/test/test_handlers.py b/test/test_handlers.py index e24e22890..518f856dc 100644 --- a/test/test_handlers.py +++ b/test/test_handlers.py @@ -778,7 +778,8 @@ def guide(): def test_prng_key(): - assert numpyro.prng_key() is None + with pytest.warns(Warning, match="outside of `seed`"): + assert numpyro.prng_key() is None with handlers.seed(rng_seed=0): rng_key = numpyro.prng_key()
chainer__chainer-1022
ChainList failed to copy its children. ``` #!/usr/bin/env python from chainer import Chain, ChainList import chainer.links as L model0 = ChainList(L.Linear(10, 10)) model1 = model0.copy() model0.to_gpu(0) model1.to_gpu(1) print(model1[0].W.data.device) # => <CUDA Device 0> model0 = Chain(c=L.Linear(10, 10)) model1 = model0.copy() model0.to_gpu(0) model1.to_gpu(1) print(model1.c.W.data.device) # => <CUDA Device 1> model0 = L.Linear(10, 10) model1 = model0.copy() model0.to_gpu(0) model1.to_gpu(1) print(model1.W.data.device) # => <CUDA Device 1> ``` This issue is reported by @snuke. Thank you!
[ { "content": "import copy\n\nimport numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import variable\n\n\nclass Link(object):\n\n \"\"\"Building block of model definitions.\n\n Link is a building block of neural network models that support various\n features like handling parameters, defining network fragments,\n serialization, etc.\n\n Link is the primitive structure for the model definitions. It supports\n management of parameter variables and *persistent values* that should be\n incorporated to serialization. Parameters are variables registered via\n the :meth:`add_param` method, or given to the initializer method.\n Persistent values are arrays, scalars, or any other serializable values\n registered via the :meth:`add_persistent` method.\n\n .. note::\n Whereas arbitrary serializable objects can be registered as persistent\n values, it is strongly recommended to just register values that should\n be treated as results of learning. A typical example of persistent\n values is ones computed during training and required for testing, e.g.\n running statistics for batch normalization.\n\n Parameters and persistent values are referred by their names. They can be\n accessed as attributes of the names. Link class itself manages the lists\n of names of parameters and persistent values to distinguish parameters and\n persistent values from other attributes.\n\n Link can be composed into more complex models. This composition feature is\n supported by child classes like :class:`Chain` and :class:`ChainList`. One\n can create a chain by combining one or more links. See the documents for\n these classes for details.\n\n As noted above, Link supports the serialization protocol of the\n :class:`~chainer.Serializer` class. **Note that only parameters and\n persistent values are saved and loaded.** Other attributes are considered\n as a part of user program (i.e. a part of network definition). In order to\n construct a link from saved file, other attributes must be identically\n reconstructed by user codes.\n\n .. admonition:: Example\n\n This is a simple example of custom link definition. Chainer itself also\n provides many links defined under the :mod:`~chainer.links` module. They\n might serve as examples, too.\n\n Consider we want to define a simple primitive link that implements a\n fully-connected layer based on the :func:`~functions.linear` function.\n Note that this function takes input units, a weight variable, and a bias\n variable as arguments. Then, the fully-connected layer can be defined as\n follows::\n\n import chainer\n import chainer.functions as F\n import numpy as np\n\n class LinearLayer(chainer.Link):\n\n def __init__(self, n_in, n_out):\n # Parameters are initialized as a numpy array of given shape.\n super(LinearLayer, self).__init__(\n W=(n_out, n_in),\n b=(n_out,),\n )\n self.W.data[...] = np.random.randn(n_out, n_in)\n self.b.data.fill(0)\n\n def __call__(self, x):\n return F.linear(x, self.W, self.b)\n\n This example shows that a user can define arbitrary parameters and use\n them in any methods. Links typically implement the ``__call__``\n operator.\n\n Args:\n params: Shapes of initial parameters. The keywords are used as their\n names. The names are also set to the parameter variables.\n\n Attributes:\n name (str): Name of this link, given by the parent chain (if exists).\n\n \"\"\"\n def __init__(self, **params):\n self._params = []\n self._persistent = []\n self._cpu = True\n self.name = None\n\n for name, shape in six.iteritems(params):\n self.add_param(name, shape)\n\n @property\n def xp(self):\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n return numpy if self._cpu else cuda.cupy\n\n def add_param(self, name, shape, dtype=numpy.float32):\n \"\"\"Registers a parameter to the link.\n\n The registered parameter is saved and loaded on serialization and\n deserialization, and involved in the optimization. The data and\n gradient of the variable are initialized by NaN arrays.\n\n The parameter is set to an attribute of the link with the given name.\n\n Args:\n name (str): Name of the parameter. This name is also used as the\n attribute name.\n shape (int or tuple of ints): Shape of the parameter array.\n dtype: Data type of the parameter array.\n\n \"\"\"\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new parameter %s: attribute exists'\n % name)\n data = self.xp.full(shape, numpy.nan, dtype=dtype)\n grad = data.copy()\n var = variable.Variable(data, volatile='auto', name=name)\n var.grad = grad\n self._params.append(name)\n d[name] = var\n\n def add_persistent(self, name, value):\n \"\"\"Registers a persistent value to the link.\n\n The resitered value is saved and loaded on serialization and\n deserialization. The value is set to an attribute of the link.\n\n Args:\n name (str): Name of the persistent value. This name is also used\n for the attribute name.\n value: Value to be registered.\n\n \"\"\"\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new persistent value %s: attribute exists'\n % name)\n self._persistent.append(name)\n d[name] = value\n\n def copy(self):\n \"\"\"Copies the link hierearchy to new one.\n\n The whole hierarchy rooted by this link is copied. The copy is\n basically shallow, except that the parameter variables are also\n shallowly copied. It means that the parameter variables of copied one\n are different from ones of original link, while they share the data and\n gradient arrays.\n\n The name of the link is reset on the copy, since the copied instance\n does not belong to the original parent chain (even if exists).\n\n Returns:\n Link: Copied link object.\n\n \"\"\"\n ret = copy.copy(self)\n ret._params = list(self._params)\n ret._persistent = list(self._persistent)\n ret.name = None\n d = ret.__dict__\n for name in ret._params:\n d[name] = copy.copy(d[name])\n return ret\n\n def to_cpu(self):\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override this method to do so.\n\n Returns: self\n\n \"\"\"\n if self._cpu:\n return self\n d = self.__dict__\n for name in self._params:\n d[name].to_cpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, cuda.ndarray):\n d[name] = value.get()\n self._cpu = True\n return self\n\n def to_gpu(self, device=None):\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copoied to GPU, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n if not self._cpu:\n return self\n d = self.__dict__\n with cuda.get_device(device):\n for name in self._params:\n d[name].to_gpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, numpy.ndarray):\n d[name] = cuda.to_gpu(value)\n self._cpu = False\n return self\n\n def params(self):\n \"\"\"Returns a generator of all parameters under the link hierarchy.\n\n Returns:\n A generator object that generates all parameters.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n yield d[name]\n\n def namedparams(self):\n \"\"\"Returns a generator of all (path, param) pairs under the hierarchy.\n\n Returns:\n A generator object that generates all (path, parameter) pairs. The\n paths are relative from this link.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n yield '/' + name, d[name]\n\n def links(self, skipself=False):\n \"\"\"Returns a generator of all links under the hierarchy.\n\n Args:\n skipself (bool): If True, then the generator skips this link and\n starts with the first child link.\n\n Returns:\n A generator object that generates all links.\n\n \"\"\"\n if not skipself:\n yield self\n\n def namedlinks(self, skipself=False):\n \"\"\"Returns a generator of all (path, link) pairs under the hierarchy.\n\n Args:\n skipself (bool): If True, then the generator skips this link and\n starts with the first child link.\n\n Returns:\n A generator object that generates all (path, link) pairs.\n\n \"\"\"\n if not skipself:\n yield '/', self\n\n def children(self):\n \"\"\"Returns a generator of all child links.\n\n Returns:\n A generator object that generates all child links.\n\n \"\"\"\n if 0:\n yield\n\n def copyparams(self, link):\n \"\"\"Copies all parameters from given link.\n\n This method copies data arrays of all parameters in the hierarchy. The\n copy is even done across the host and devices. Note that this method\n does not copy the gradient arrays.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].copydata(src[name])\n\n def zerograds(self):\n \"\"\"Initializes all gradient arrays by zero.\n\n This method should be called before the backward computation at every\n iteration of the optimizations.\n\n \"\"\"\n for param in self.params():\n param.zerograd()\n\n def addgrads(self, link):\n \"\"\"Accumulates gradient values from given link.\n\n This method adds each gradient array of the given link to corresponding\n gradient array of this link. The accumulation is even done across\n host and different devices.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].addgrad(src[name])\n\n def serialize(self, serializer):\n \"\"\"Serializes the link object.\n\n Args:\n serializer (~chainer.AbstractSerializer): Serializer object.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n serializer(name, d[name].data)\n for name in self._persistent:\n d[name] = serializer(name, d[name])\n\n\nclass Chain(Link):\n\n \"\"\"Composable link with object-like interface.\n\n Composability is one of the most important features of neural nets. Neural\n net models consist of many reusable fragments, and each model itself might\n be embedded into a larger learnable system. Chain enables us to write a\n neural net based on composition, without bothering about routine works like\n collecting parameters, serialization, copying the structure with parameters\n shared, etc.\n\n This class actually provides a way to compose one or more links into one\n structure. A chain can contain one or more *child links*. Child link is a\n link registered to the chain with its own name. The child link is stored to\n an attribute of the chain with the name. User can write a whole model or a\n fragment of neural nets as a child class of Chain.\n\n Each chain itself is also a link. Therefore, one can combine chains into\n higher-level chains. In this way, links and chains construct a *link\n hierarchy*. Link hierarchy forms a tree structure, where each node is\n identified by the path from the root. The path is represented by a string\n like a file path in UNIX, consisting of names of nodes on the path, joined\n by slashes ``/``.\n\n .. admonition:: Example\n\n This is a simple example of custom chain definition. Chainer itself also\n provides some chains defined under the :mod:`~chainer.links` module.\n They might serve as examples, too.\n\n Consider we want to define a multi-layer perceptron consisting of two\n hidden layers with rectifiers as activation functions. We can use the\n :class:`~chainer.links.Linear` link as a building block::\n\n import chainer\n import chainer.functions as F\n import chainer.links as L\n\n class MultiLayerPerceptron(chainer.Chain):\n\n def __init__(self, n_in, n_hidden, n_out):\n # Create and register three layers for this MLP\n super(MultiLayerPerceptron, self).__init__(\n layer1=L.Linear(n_in, n_hidden),\n layer2=L.Linear(n_hidden, n_hidden),\n layer3=L.Linear(n_hidden, n_out),\n )\n\n def __call__(self, x):\n # Forward propagation\n h1 = F.relu(self.layer1(x))\n h2 = F.relu(self.layer2(h1))\n return self.layer3(h2)\n\n Child links are registered via the initializer method. They also can be\n registered by the :meth:`add_link` method. The forward propagation is\n often implemented as The ``__call__`` operator as the above example,\n though it is not mandatory.\n\n Args:\n links: Child links. The keywords are used as their names. The names are\n also set to the links.\n\n \"\"\"\n def __init__(self, **links):\n super(Chain, self).__init__()\n self._children = []\n\n for name, link in six.iteritems(links):\n self.add_link(name, link)\n\n def __getitem__(self, name):\n \"\"\"Equivalent to getattr.\"\"\"\n return getattr(self, name)\n\n def add_link(self, name, link):\n \"\"\"Regsiters a child link to this chain.\n\n The registered link is saved and loaded on serialization and\n deserialization, and involved in the optimization. The registered link\n is called a child. The child link is set to an attribute of the chain\n with the given name.\n\n This method also sets the :attr:`~Link.name` attribute of the\n registered link. If the given link already has the name attribute set,\n then it raises an error.\n\n Args:\n name (str): Name of the child link. This name is also used as the\n attribute name.\n link (Link): The link object to be registered.\n\n \"\"\"\n if link.name is not None:\n raise ValueError(\n 'given link is already registered to another chain by name %s'\n % link.name)\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new link %s: attribute exists' % name)\n self._children.append(name)\n link.name = name\n d[name] = link\n\n def copy(self):\n ret = super(Chain, self).copy()\n ret._children = list(ret._children)\n d = ret.__dict__\n for name in ret._children:\n # copy child links recursively\n copied = d[name].copy()\n copied.name = name\n d[name] = copied\n return ret\n\n def to_cpu(self):\n super(Chain, self).to_cpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda.get_device(device):\n super(Chain, self).to_gpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_gpu()\n return self\n\n def params(self):\n for param in super(Chain, self).params():\n yield param\n d = self.__dict__\n for name in self._children:\n for param in d[name].params():\n yield param\n\n def namedparams(self):\n for ret in super(Chain, self).namedparams():\n yield ret\n d = self.__dict__\n for name in self._children:\n prefix = '/' + name\n for path, param in d[name].namedparams():\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n d = self.__dict__\n for name in self._children:\n for link in d[name].links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n d = self.__dict__\n for name in self._children:\n child = d[name]\n prefix = '/' + name\n yield prefix, child\n for path, link in d[name].namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n d = self.__dict__\n for name in self._children:\n yield d[name]\n\n def copyparams(self, link):\n super(Chain, self).copyparams(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].copyparams(src[name])\n\n def zerograds(self):\n super(Chain, self).zerograds()\n d = self.__dict__\n for name in self._children:\n d[name].zerograds()\n\n def addgrads(self, link):\n super(Chain, self).addgrads(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].addgrads(src[name])\n\n def serialize(self, serializer):\n super(Chain, self).serialize(serializer)\n d = self.__dict__\n for name in self._children:\n d[name].serialize(serializer[name])\n\n\nclass ChainList(Link):\n\n \"\"\"Composable link with list-like interface.\n\n This is another example of compositional link. Unlike :class:`Chain`, this\n class can be used like a list of child links. Each child link is indexed by\n a non-negative integer, and it maintains the current number of registered\n child links. The :meth:`add_link` method inserts a new link at the end of\n the list. It is useful to write a chain with arbitrary number of child\n links, e.g. an arbitrarily deep multi-layer perceptron.\n\n Note that this class does not implement all methods of :class:`list`.\n\n Args:\n links: Initial child links.\n\n \"\"\"\n def __init__(self, *links):\n super(ChainList, self).__init__()\n self._children = []\n\n for link in links:\n self.add_link(link)\n\n def __getitem__(self, index):\n \"\"\"Returns the child at given index.\n\n Args:\n index (int): Index of the child in the list.\n\n Returns:\n Link: The ``index``-th child link.\n\n \"\"\"\n return self._children[index]\n\n def __iter__(self):\n return iter(self._children)\n\n def __len__(self):\n \"\"\"Returns a number of children.\"\"\"\n return len(self._children)\n\n def add_link(self, link):\n \"\"\"Registers a child link to this chain.\n\n The registered link is saved and loaded on serialization and\n deserialization, and involved in the optimization. The registered link\n is called a child. The child link is accessible via :meth:`children`\n generator, which returns a generator running through the children in\n registered order.\n\n This method also sets the :attr:`~Link.name` attribute of the\n registered link. If the given link already has the name attribute set,\n then it raises an error.\n\n Args:\n link (Link): The link object to be registered.\n\n \"\"\"\n if link.name is not None:\n raise ValueError(\n 'given link is already registered to another chain by name %s'\n % link.name)\n link.name = str(len(self._children))\n self._children.append(link)\n\n def copy(self):\n ret = super(ChainList, self).copy()\n children = ret._children\n for i, child in enumerate(children):\n child = child.copy()\n child.name = str(i)\n children[i] = child\n return ret\n\n def to_cpu(self):\n super(ChainList, self).to_cpu()\n for link in self._children:\n link.to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda.get_device(device):\n super(ChainList, self).to_gpu()\n for link in self._children:\n link.to_gpu()\n return self\n\n def params(self):\n for param in super(ChainList, self).params():\n yield param\n for link in self._children:\n for param in link.params():\n yield param\n\n def namedparams(self):\n for ret in super(ChainList, self).namedparams():\n yield ret\n for idx, link in enumerate(self._children):\n prefix = '/%d' % idx\n for path, param in link.namedparams():\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n for child in self._children:\n for link in child.links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n for idx, child in enumerate(self._children):\n prefix = '/%d' % idx\n yield prefix, child\n for path, link in child.namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n for child in self._children:\n yield child\n\n def copyparams(self, link):\n super(ChainList, self).copyparams(link)\n for idx, child in enumerate(self._children):\n child.copyparams(link[idx])\n\n def zerograds(self):\n super(ChainList, self).zerograds()\n for child in self._children:\n child.zerograds()\n\n def addgrads(self, link):\n super(ChainList, self).addgrads(link)\n for idx, child in enumerate(self._children):\n child.addgrads(link[idx])\n\n def serialize(self, serializer):\n super(ChainList, self).serialize(serializer)\n for idx, child in enumerate(self._children):\n child.serialize(serializer['%d' % idx])\n", "path": "chainer/link.py" } ]
[ { "content": "import copy\n\nimport numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import variable\n\n\nclass Link(object):\n\n \"\"\"Building block of model definitions.\n\n Link is a building block of neural network models that support various\n features like handling parameters, defining network fragments,\n serialization, etc.\n\n Link is the primitive structure for the model definitions. It supports\n management of parameter variables and *persistent values* that should be\n incorporated to serialization. Parameters are variables registered via\n the :meth:`add_param` method, or given to the initializer method.\n Persistent values are arrays, scalars, or any other serializable values\n registered via the :meth:`add_persistent` method.\n\n .. note::\n Whereas arbitrary serializable objects can be registered as persistent\n values, it is strongly recommended to just register values that should\n be treated as results of learning. A typical example of persistent\n values is ones computed during training and required for testing, e.g.\n running statistics for batch normalization.\n\n Parameters and persistent values are referred by their names. They can be\n accessed as attributes of the names. Link class itself manages the lists\n of names of parameters and persistent values to distinguish parameters and\n persistent values from other attributes.\n\n Link can be composed into more complex models. This composition feature is\n supported by child classes like :class:`Chain` and :class:`ChainList`. One\n can create a chain by combining one or more links. See the documents for\n these classes for details.\n\n As noted above, Link supports the serialization protocol of the\n :class:`~chainer.Serializer` class. **Note that only parameters and\n persistent values are saved and loaded.** Other attributes are considered\n as a part of user program (i.e. a part of network definition). In order to\n construct a link from saved file, other attributes must be identically\n reconstructed by user codes.\n\n .. admonition:: Example\n\n This is a simple example of custom link definition. Chainer itself also\n provides many links defined under the :mod:`~chainer.links` module. They\n might serve as examples, too.\n\n Consider we want to define a simple primitive link that implements a\n fully-connected layer based on the :func:`~functions.linear` function.\n Note that this function takes input units, a weight variable, and a bias\n variable as arguments. Then, the fully-connected layer can be defined as\n follows::\n\n import chainer\n import chainer.functions as F\n import numpy as np\n\n class LinearLayer(chainer.Link):\n\n def __init__(self, n_in, n_out):\n # Parameters are initialized as a numpy array of given shape.\n super(LinearLayer, self).__init__(\n W=(n_out, n_in),\n b=(n_out,),\n )\n self.W.data[...] = np.random.randn(n_out, n_in)\n self.b.data.fill(0)\n\n def __call__(self, x):\n return F.linear(x, self.W, self.b)\n\n This example shows that a user can define arbitrary parameters and use\n them in any methods. Links typically implement the ``__call__``\n operator.\n\n Args:\n params: Shapes of initial parameters. The keywords are used as their\n names. The names are also set to the parameter variables.\n\n Attributes:\n name (str): Name of this link, given by the parent chain (if exists).\n\n \"\"\"\n def __init__(self, **params):\n self._params = []\n self._persistent = []\n self._cpu = True\n self.name = None\n\n for name, shape in six.iteritems(params):\n self.add_param(name, shape)\n\n @property\n def xp(self):\n \"\"\"Array module for this link.\n\n Depending on which of CPU/GPU this link is on, this property returns\n :mod:`numpy` or :mod:`cupy`.\n\n \"\"\"\n return numpy if self._cpu else cuda.cupy\n\n def add_param(self, name, shape, dtype=numpy.float32):\n \"\"\"Registers a parameter to the link.\n\n The registered parameter is saved and loaded on serialization and\n deserialization, and involved in the optimization. The data and\n gradient of the variable are initialized by NaN arrays.\n\n The parameter is set to an attribute of the link with the given name.\n\n Args:\n name (str): Name of the parameter. This name is also used as the\n attribute name.\n shape (int or tuple of ints): Shape of the parameter array.\n dtype: Data type of the parameter array.\n\n \"\"\"\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new parameter %s: attribute exists'\n % name)\n data = self.xp.full(shape, numpy.nan, dtype=dtype)\n grad = data.copy()\n var = variable.Variable(data, volatile='auto', name=name)\n var.grad = grad\n self._params.append(name)\n d[name] = var\n\n def add_persistent(self, name, value):\n \"\"\"Registers a persistent value to the link.\n\n The resitered value is saved and loaded on serialization and\n deserialization. The value is set to an attribute of the link.\n\n Args:\n name (str): Name of the persistent value. This name is also used\n for the attribute name.\n value: Value to be registered.\n\n \"\"\"\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new persistent value %s: attribute exists'\n % name)\n self._persistent.append(name)\n d[name] = value\n\n def copy(self):\n \"\"\"Copies the link hierearchy to new one.\n\n The whole hierarchy rooted by this link is copied. The copy is\n basically shallow, except that the parameter variables are also\n shallowly copied. It means that the parameter variables of copied one\n are different from ones of original link, while they share the data and\n gradient arrays.\n\n The name of the link is reset on the copy, since the copied instance\n does not belong to the original parent chain (even if exists).\n\n Returns:\n Link: Copied link object.\n\n \"\"\"\n ret = copy.copy(self)\n ret._params = list(self._params)\n ret._persistent = list(self._persistent)\n ret.name = None\n d = ret.__dict__\n for name in ret._params:\n d[name] = copy.copy(d[name])\n return ret\n\n def to_cpu(self):\n \"\"\"Copies parameter variables and persistent values to CPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copied to CPU, the link implementation must\n override this method to do so.\n\n Returns: self\n\n \"\"\"\n if self._cpu:\n return self\n d = self.__dict__\n for name in self._params:\n d[name].to_cpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, cuda.ndarray):\n d[name] = value.get()\n self._cpu = True\n return self\n\n def to_gpu(self, device=None):\n \"\"\"Copies parameter variables and persistent values to GPU.\n\n This method does not handle non-registered attributes. If some of such\n attributes must be copoied to GPU, the link implementation must\n override this method to do so.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n Returns: self\n\n \"\"\"\n cuda.check_cuda_available()\n if not self._cpu:\n return self\n d = self.__dict__\n with cuda.get_device(device):\n for name in self._params:\n d[name].to_gpu()\n for name in self._persistent:\n value = d[name]\n if isinstance(value, numpy.ndarray):\n d[name] = cuda.to_gpu(value)\n self._cpu = False\n return self\n\n def params(self):\n \"\"\"Returns a generator of all parameters under the link hierarchy.\n\n Returns:\n A generator object that generates all parameters.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n yield d[name]\n\n def namedparams(self):\n \"\"\"Returns a generator of all (path, param) pairs under the hierarchy.\n\n Returns:\n A generator object that generates all (path, parameter) pairs. The\n paths are relative from this link.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n yield '/' + name, d[name]\n\n def links(self, skipself=False):\n \"\"\"Returns a generator of all links under the hierarchy.\n\n Args:\n skipself (bool): If True, then the generator skips this link and\n starts with the first child link.\n\n Returns:\n A generator object that generates all links.\n\n \"\"\"\n if not skipself:\n yield self\n\n def namedlinks(self, skipself=False):\n \"\"\"Returns a generator of all (path, link) pairs under the hierarchy.\n\n Args:\n skipself (bool): If True, then the generator skips this link and\n starts with the first child link.\n\n Returns:\n A generator object that generates all (path, link) pairs.\n\n \"\"\"\n if not skipself:\n yield '/', self\n\n def children(self):\n \"\"\"Returns a generator of all child links.\n\n Returns:\n A generator object that generates all child links.\n\n \"\"\"\n if 0:\n yield\n\n def copyparams(self, link):\n \"\"\"Copies all parameters from given link.\n\n This method copies data arrays of all parameters in the hierarchy. The\n copy is even done across the host and devices. Note that this method\n does not copy the gradient arrays.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].copydata(src[name])\n\n def zerograds(self):\n \"\"\"Initializes all gradient arrays by zero.\n\n This method should be called before the backward computation at every\n iteration of the optimizations.\n\n \"\"\"\n for param in self.params():\n param.zerograd()\n\n def addgrads(self, link):\n \"\"\"Accumulates gradient values from given link.\n\n This method adds each gradient array of the given link to corresponding\n gradient array of this link. The accumulation is even done across\n host and different devices.\n\n Args:\n link (Link): Source link object.\n\n \"\"\"\n src = link.__dict__\n dst = self.__dict__\n for name in self._params:\n dst[name].addgrad(src[name])\n\n def serialize(self, serializer):\n \"\"\"Serializes the link object.\n\n Args:\n serializer (~chainer.AbstractSerializer): Serializer object.\n\n \"\"\"\n d = self.__dict__\n for name in self._params:\n serializer(name, d[name].data)\n for name in self._persistent:\n d[name] = serializer(name, d[name])\n\n\nclass Chain(Link):\n\n \"\"\"Composable link with object-like interface.\n\n Composability is one of the most important features of neural nets. Neural\n net models consist of many reusable fragments, and each model itself might\n be embedded into a larger learnable system. Chain enables us to write a\n neural net based on composition, without bothering about routine works like\n collecting parameters, serialization, copying the structure with parameters\n shared, etc.\n\n This class actually provides a way to compose one or more links into one\n structure. A chain can contain one or more *child links*. Child link is a\n link registered to the chain with its own name. The child link is stored to\n an attribute of the chain with the name. User can write a whole model or a\n fragment of neural nets as a child class of Chain.\n\n Each chain itself is also a link. Therefore, one can combine chains into\n higher-level chains. In this way, links and chains construct a *link\n hierarchy*. Link hierarchy forms a tree structure, where each node is\n identified by the path from the root. The path is represented by a string\n like a file path in UNIX, consisting of names of nodes on the path, joined\n by slashes ``/``.\n\n .. admonition:: Example\n\n This is a simple example of custom chain definition. Chainer itself also\n provides some chains defined under the :mod:`~chainer.links` module.\n They might serve as examples, too.\n\n Consider we want to define a multi-layer perceptron consisting of two\n hidden layers with rectifiers as activation functions. We can use the\n :class:`~chainer.links.Linear` link as a building block::\n\n import chainer\n import chainer.functions as F\n import chainer.links as L\n\n class MultiLayerPerceptron(chainer.Chain):\n\n def __init__(self, n_in, n_hidden, n_out):\n # Create and register three layers for this MLP\n super(MultiLayerPerceptron, self).__init__(\n layer1=L.Linear(n_in, n_hidden),\n layer2=L.Linear(n_hidden, n_hidden),\n layer3=L.Linear(n_hidden, n_out),\n )\n\n def __call__(self, x):\n # Forward propagation\n h1 = F.relu(self.layer1(x))\n h2 = F.relu(self.layer2(h1))\n return self.layer3(h2)\n\n Child links are registered via the initializer method. They also can be\n registered by the :meth:`add_link` method. The forward propagation is\n often implemented as The ``__call__`` operator as the above example,\n though it is not mandatory.\n\n Args:\n links: Child links. The keywords are used as their names. The names are\n also set to the links.\n\n \"\"\"\n def __init__(self, **links):\n super(Chain, self).__init__()\n self._children = []\n\n for name, link in six.iteritems(links):\n self.add_link(name, link)\n\n def __getitem__(self, name):\n \"\"\"Equivalent to getattr.\"\"\"\n return getattr(self, name)\n\n def add_link(self, name, link):\n \"\"\"Regsiters a child link to this chain.\n\n The registered link is saved and loaded on serialization and\n deserialization, and involved in the optimization. The registered link\n is called a child. The child link is set to an attribute of the chain\n with the given name.\n\n This method also sets the :attr:`~Link.name` attribute of the\n registered link. If the given link already has the name attribute set,\n then it raises an error.\n\n Args:\n name (str): Name of the child link. This name is also used as the\n attribute name.\n link (Link): The link object to be registered.\n\n \"\"\"\n if link.name is not None:\n raise ValueError(\n 'given link is already registered to another chain by name %s'\n % link.name)\n d = self.__dict__\n if name in d:\n raise AttributeError(\n 'cannot register a new link %s: attribute exists' % name)\n self._children.append(name)\n link.name = name\n d[name] = link\n\n def copy(self):\n ret = super(Chain, self).copy()\n ret._children = list(ret._children)\n d = ret.__dict__\n for name in ret._children:\n # copy child links recursively\n copied = d[name].copy()\n copied.name = name\n d[name] = copied\n return ret\n\n def to_cpu(self):\n super(Chain, self).to_cpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda.get_device(device):\n super(Chain, self).to_gpu()\n d = self.__dict__\n for name in self._children:\n d[name].to_gpu()\n return self\n\n def params(self):\n for param in super(Chain, self).params():\n yield param\n d = self.__dict__\n for name in self._children:\n for param in d[name].params():\n yield param\n\n def namedparams(self):\n for ret in super(Chain, self).namedparams():\n yield ret\n d = self.__dict__\n for name in self._children:\n prefix = '/' + name\n for path, param in d[name].namedparams():\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n d = self.__dict__\n for name in self._children:\n for link in d[name].links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n d = self.__dict__\n for name in self._children:\n child = d[name]\n prefix = '/' + name\n yield prefix, child\n for path, link in d[name].namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n d = self.__dict__\n for name in self._children:\n yield d[name]\n\n def copyparams(self, link):\n super(Chain, self).copyparams(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].copyparams(src[name])\n\n def zerograds(self):\n super(Chain, self).zerograds()\n d = self.__dict__\n for name in self._children:\n d[name].zerograds()\n\n def addgrads(self, link):\n super(Chain, self).addgrads(link)\n src = link.__dict__\n dst = self.__dict__\n for name in self._children:\n dst[name].addgrads(src[name])\n\n def serialize(self, serializer):\n super(Chain, self).serialize(serializer)\n d = self.__dict__\n for name in self._children:\n d[name].serialize(serializer[name])\n\n\nclass ChainList(Link):\n\n \"\"\"Composable link with list-like interface.\n\n This is another example of compositional link. Unlike :class:`Chain`, this\n class can be used like a list of child links. Each child link is indexed by\n a non-negative integer, and it maintains the current number of registered\n child links. The :meth:`add_link` method inserts a new link at the end of\n the list. It is useful to write a chain with arbitrary number of child\n links, e.g. an arbitrarily deep multi-layer perceptron.\n\n Note that this class does not implement all methods of :class:`list`.\n\n Args:\n links: Initial child links.\n\n \"\"\"\n def __init__(self, *links):\n super(ChainList, self).__init__()\n self._children = []\n\n for link in links:\n self.add_link(link)\n\n def __getitem__(self, index):\n \"\"\"Returns the child at given index.\n\n Args:\n index (int): Index of the child in the list.\n\n Returns:\n Link: The ``index``-th child link.\n\n \"\"\"\n return self._children[index]\n\n def __iter__(self):\n return iter(self._children)\n\n def __len__(self):\n \"\"\"Returns a number of children.\"\"\"\n return len(self._children)\n\n def add_link(self, link):\n \"\"\"Registers a child link to this chain.\n\n The registered link is saved and loaded on serialization and\n deserialization, and involved in the optimization. The registered link\n is called a child. The child link is accessible via :meth:`children`\n generator, which returns a generator running through the children in\n registered order.\n\n This method also sets the :attr:`~Link.name` attribute of the\n registered link. If the given link already has the name attribute set,\n then it raises an error.\n\n Args:\n link (Link): The link object to be registered.\n\n \"\"\"\n if link.name is not None:\n raise ValueError(\n 'given link is already registered to another chain by name %s'\n % link.name)\n link.name = str(len(self._children))\n self._children.append(link)\n\n def copy(self):\n ret = super(ChainList, self).copy()\n ret._children = list(ret._children) # copy\n children = ret._children\n for i, child in enumerate(children):\n child = child.copy()\n child.name = str(i)\n children[i] = child\n return ret\n\n def to_cpu(self):\n super(ChainList, self).to_cpu()\n for link in self._children:\n link.to_cpu()\n return self\n\n def to_gpu(self, device=None):\n with cuda.get_device(device):\n super(ChainList, self).to_gpu()\n for link in self._children:\n link.to_gpu()\n return self\n\n def params(self):\n for param in super(ChainList, self).params():\n yield param\n for link in self._children:\n for param in link.params():\n yield param\n\n def namedparams(self):\n for ret in super(ChainList, self).namedparams():\n yield ret\n for idx, link in enumerate(self._children):\n prefix = '/%d' % idx\n for path, param in link.namedparams():\n yield prefix + path, param\n\n def links(self, skipself=False):\n if not skipself:\n yield self\n for child in self._children:\n for link in child.links():\n yield link\n\n def namedlinks(self, skipself=False):\n if not skipself:\n yield '/', self\n for idx, child in enumerate(self._children):\n prefix = '/%d' % idx\n yield prefix, child\n for path, link in child.namedlinks(True):\n yield prefix + path, link\n\n def children(self):\n for child in self._children:\n yield child\n\n def copyparams(self, link):\n super(ChainList, self).copyparams(link)\n for idx, child in enumerate(self._children):\n child.copyparams(link[idx])\n\n def zerograds(self):\n super(ChainList, self).zerograds()\n for child in self._children:\n child.zerograds()\n\n def addgrads(self, link):\n super(ChainList, self).addgrads(link)\n for idx, child in enumerate(self._children):\n child.addgrads(link[idx])\n\n def serialize(self, serializer):\n super(ChainList, self).serialize(serializer)\n for idx, child in enumerate(self._children):\n child.serialize(serializer['%d' % idx])\n", "path": "chainer/link.py" } ]
diff --git a/chainer/link.py b/chainer/link.py index 0ca1df1dc929..907ddf2ee38b 100644 --- a/chainer/link.py +++ b/chainer/link.py @@ -615,6 +615,7 @@ def add_link(self, link): def copy(self): ret = super(ChainList, self).copy() + ret._children = list(ret._children) # copy children = ret._children for i, child in enumerate(children): child = child.copy() diff --git a/tests/chainer_tests/test_link.py b/tests/chainer_tests/test_link.py index 689aebdb092b..cc9c4c37e218 100644 --- a/tests/chainer_tests/test_link.py +++ b/tests/chainer_tests/test_link.py @@ -463,6 +463,34 @@ def test_copy(self): self.assertIs(c2[1].x.data, self.l3.x.data) self.assertIs(c2[1].x.grad, None) + @attr.gpu + def test_copy_and_send_to_gpu(self): + c2 = self.c2.copy() + self.c2.to_gpu() + self.assertIsInstance(self.c2[0][0].x.data, cuda.cupy.ndarray) + self.assertIsInstance(self.c2[0][1].x.data, cuda.cupy.ndarray) + self.assertIsInstance(c2[0][0].x.data, numpy.ndarray) + self.assertIsInstance(c2[0][1].x.data, numpy.ndarray) + + @attr.gpu + def test_copy_and_send_to_gpu_2(self): + c2 = self.c2.copy() + c2.to_gpu() + self.assertIsInstance(self.c2[0][0].x.data, numpy.ndarray) + self.assertIsInstance(self.c2[0][1].x.data, numpy.ndarray) + self.assertIsInstance(c2[0][0].x.data, cuda.cupy.ndarray) + self.assertIsInstance(c2[0][1].x.data, cuda.cupy.ndarray) + + @attr.multi_gpu(2) + def test_copy_and_send_to_gpu_multi(self): + c2 = self.c2.copy() + self.c2.to_gpu(0) + c2.to_gpu(1) + self.assertEqual(self.c2[0][0].x.data.device.id, 0) + self.assertEqual(self.c2[0][1].x.data.device.id, 0) + self.assertEqual(c2[0][0].x.data.device.id, 1) + self.assertEqual(c2[0][1].x.data.device.id, 1) + def test_to_cpu_on_cpu(self): x1 = self.l1.x.data gx1 = self.l1.x.grad
pennersr__django-allauth-3283
`OpenIDConnectProvider` generates slug using `_server_id` and does not fallback to inherited implementation KeycloakProvider started using `openid_connect` as its base url since the `OpenIDConnectProvider` implemented `get_slug`
[ { "content": "# -*- coding: utf-8 -*-\nfrom allauth.account.models import EmailAddress\nfrom allauth.socialaccount import app_settings\nfrom allauth.socialaccount.providers.base import ProviderAccount\nfrom allauth.socialaccount.providers.oauth2.provider import OAuth2Provider\n\n\nclass OpenIDConnectProviderAccount(ProviderAccount):\n def to_str(self):\n dflt = super(OpenIDConnectProviderAccount, self).to_str()\n return self.account.extra_data.get(\"name\", dflt)\n\n\nclass OpenIDConnectProvider(OAuth2Provider):\n id = \"openid_connect\"\n name = \"OpenID Connect\"\n _server_id = None\n _server_url = None\n account_class = OpenIDConnectProviderAccount\n\n @property\n def server_url(self):\n well_known_uri = \"/.well-known/openid-configuration\"\n url = self._server_url\n if not url.endswith(well_known_uri):\n url += well_known_uri\n return url\n\n @property\n def token_auth_method(self):\n return app_settings.PROVIDERS.get(self.id, {}).get(\"token_auth_method\")\n\n @classmethod\n def get_slug(cls):\n return cls._server_id if cls._server_id else \"openid_connect\"\n\n def get_default_scope(self):\n return [\"openid\", \"profile\", \"email\"]\n\n def extract_uid(self, data):\n return str(data[\"sub\"])\n\n def extract_common_fields(self, data):\n return dict(\n email=data.get(\"email\"),\n username=data.get(\"preferred_username\"),\n name=data.get(\"name\"),\n user_id=data.get(\"user_id\"),\n picture=data.get(\"picture\"),\n )\n\n def extract_email_addresses(self, data):\n addresses = []\n email = data.get(\"email\")\n if email:\n addresses.append(\n EmailAddress(\n email=email,\n verified=data.get(\"email_verified\", False),\n primary=True,\n )\n )\n return addresses\n\n\ndef _provider_factory(server_settings):\n class OpenIDConnectProviderServer(OpenIDConnectProvider):\n name = server_settings.get(\"name\", OpenIDConnectProvider.name)\n id = server_settings[\"id\"]\n _server_id = server_settings[\"id\"]\n _server_url = server_settings[\"server_url\"]\n\n def get_app(self, request, config=None):\n return super().get_app(request, config=server_settings.get(\"APP\"))\n\n OpenIDConnectProviderServer.__name__ = (\n \"OpenIDConnectProviderServer_\" + server_settings[\"id\"]\n )\n app_settings.PROVIDERS.setdefault(OpenIDConnectProviderServer.id, {})\n app_settings.PROVIDERS[OpenIDConnectProviderServer.id].update(server_settings)\n return OpenIDConnectProviderServer\n\n\nprovider_classes = [\n _provider_factory(server_settings)\n for server_settings in app_settings.PROVIDERS.get(OpenIDConnectProvider.id, {}).get(\n \"SERVERS\", []\n )\n]\n", "path": "allauth/socialaccount/providers/openid_connect/provider.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom allauth.account.models import EmailAddress\nfrom allauth.socialaccount import app_settings\nfrom allauth.socialaccount.providers.base import ProviderAccount\nfrom allauth.socialaccount.providers.oauth2.provider import OAuth2Provider\n\n\nclass OpenIDConnectProviderAccount(ProviderAccount):\n def to_str(self):\n dflt = super(OpenIDConnectProviderAccount, self).to_str()\n return self.account.extra_data.get(\"name\", dflt)\n\n\nclass OpenIDConnectProvider(OAuth2Provider):\n id = \"openid_connect\"\n name = \"OpenID Connect\"\n _server_id = None\n _server_url = None\n account_class = OpenIDConnectProviderAccount\n\n @property\n def server_url(self):\n well_known_uri = \"/.well-known/openid-configuration\"\n url = self._server_url\n if not url.endswith(well_known_uri):\n url += well_known_uri\n return url\n\n @property\n def token_auth_method(self):\n return app_settings.PROVIDERS.get(self.id, {}).get(\"token_auth_method\")\n\n @classmethod\n def get_slug(cls):\n return cls._server_id or super().get_slug()\n\n def get_default_scope(self):\n return [\"openid\", \"profile\", \"email\"]\n\n def extract_uid(self, data):\n return str(data[\"sub\"])\n\n def extract_common_fields(self, data):\n return dict(\n email=data.get(\"email\"),\n username=data.get(\"preferred_username\"),\n name=data.get(\"name\"),\n user_id=data.get(\"user_id\"),\n picture=data.get(\"picture\"),\n )\n\n def extract_email_addresses(self, data):\n addresses = []\n email = data.get(\"email\")\n if email:\n addresses.append(\n EmailAddress(\n email=email,\n verified=data.get(\"email_verified\", False),\n primary=True,\n )\n )\n return addresses\n\n\ndef _provider_factory(server_settings):\n class OpenIDConnectProviderServer(OpenIDConnectProvider):\n name = server_settings.get(\"name\", OpenIDConnectProvider.name)\n id = server_settings[\"id\"]\n _server_id = server_settings[\"id\"]\n _server_url = server_settings[\"server_url\"]\n\n def get_app(self, request, config=None):\n return super().get_app(request, config=server_settings.get(\"APP\"))\n\n OpenIDConnectProviderServer.__name__ = (\n \"OpenIDConnectProviderServer_\" + server_settings[\"id\"]\n )\n app_settings.PROVIDERS.setdefault(OpenIDConnectProviderServer.id, {})\n app_settings.PROVIDERS[OpenIDConnectProviderServer.id].update(server_settings)\n return OpenIDConnectProviderServer\n\n\nprovider_classes = [\n _provider_factory(server_settings)\n for server_settings in app_settings.PROVIDERS.get(OpenIDConnectProvider.id, {}).get(\n \"SERVERS\", []\n )\n]\n", "path": "allauth/socialaccount/providers/openid_connect/provider.py" } ]
diff --git a/allauth/socialaccount/providers/openid_connect/provider.py b/allauth/socialaccount/providers/openid_connect/provider.py index 455f2566e0..c94ad717be 100644 --- a/allauth/socialaccount/providers/openid_connect/provider.py +++ b/allauth/socialaccount/providers/openid_connect/provider.py @@ -32,7 +32,7 @@ def token_auth_method(self): @classmethod def get_slug(cls): - return cls._server_id if cls._server_id else "openid_connect" + return cls._server_id or super().get_slug() def get_default_scope(self): return ["openid", "profile", "email"]
scikit-image__scikit-image-3152
skimage.test does not execute the unit test ## Description `skimage.test` does not run the unit tests. ``` ~$ python -c "import skimage; print(skimage.__version__); skimage.test()" 0.14.0 ====================================================================== test session starts ====================================================================== platform linux -- Python 3.6.5, pytest-3.6.1, py-1.5.3, pluggy-0.6.0 rootdir: /home/jhelmus, inifile: ================================================================= no tests ran in 0.00 seconds ================================================================== ERROR: file not found: skimage ``` <details><summary>Environment Details</summary> ``` $ conda info active environment : sktest active env location : /home/jhelmus/anaconda3/envs/sktest shell level : 1 user config file : /home/jhelmus/.condarc populated config files : /home/jhelmus/.condarc conda version : 4.5.4 conda-build version : 3.9.1 python version : 3.6.4.final.0 base environment : /home/jhelmus/anaconda3 (writable) channel URLs : https://repo.anaconda.com/pkgs/main/linux-64 https://repo.anaconda.com/pkgs/main/noarch https://repo.anaconda.com/pkgs/free/linux-64 https://repo.anaconda.com/pkgs/free/noarch https://repo.anaconda.com/pkgs/r/linux-64 https://repo.anaconda.com/pkgs/r/noarch https://repo.anaconda.com/pkgs/pro/linux-64 https://repo.anaconda.com/pkgs/pro/noarch package cache : /home/jhelmus/anaconda3/pkgs /home/jhelmus/.conda/pkgs envs directories : /home/jhelmus/anaconda3/envs /home/jhelmus/.conda/envs platform : linux-64 user-agent : conda/4.5.4 requests/2.18.4 CPython/3.6.4 Linux/4.13.0-41-generic ubuntu/16.04 glibc/2.23 UID:GID : 1000:1000 netrc file : None offline mode : False $ conda create -n sktest python=3.6 pip Solving environment: done ## Package Plan ## environment location: /home/jhelmus/anaconda3/envs/sktest added / updated specs: - pip - python=3.6 The following NEW packages will be INSTALLED: ca-certificates: 2018.03.07-0 defaults certifi: 2018.4.16-py36_0 defaults libedit: 3.1.20170329-h6b74fdf_2 defaults libffi: 3.2.1-hd88cf55_4 defaults libgcc-ng: 7.2.0-hdf63c60_3 defaults libstdcxx-ng: 7.2.0-hdf63c60_3 defaults ncurses: 6.1-hf484d3e_0 defaults openssl: 1.0.2o-h20670df_0 defaults pip: 10.0.1-py36_0 defaults python: 3.6.5-hc3d631a_2 defaults readline: 7.0-ha6073c6_4 defaults setuptools: 39.2.0-py36_0 defaults sqlite: 3.23.1-he433501_0 defaults tk: 8.6.7-hc745277_3 defaults wheel: 0.31.1-py36_0 defaults xz: 5.2.4-h14c3975_4 defaults zlib: 1.2.11-ha838bed_2 defaults Proceed ([y]/n)? y ... $ pip install scikit-image Collecting scikit-image Using cached https://files.pythonhosted.org/packages/34/79/cefff573a53ca3fb4c390739d19541b95f371e24d2990aed4cd8837971f0/scikit_image-0.14.0-cp36-cp36m-manylinux1_x86_64.whl ... Successfully installed PyWavelets-0.5.2 cloudpickle-0.5.3 cycler-0.10.0 dask-0.17.5 decorator-4.3.0 kiwisolver-1.0.1 matplotlib-2.2.2 networkx-2.1 numpy-1.14.3 pillow-5.1.0 pyparsing-2.2.0 python-dateutil-2.7.3 pytz-2018.4 scikit-image-0.14.0 scipy-1.1.0 six-1.11.0 toolz-0.9.0 $ pip install pytest Collecting pytest Using cached https://files.pythonhosted.org/packages/d3/75/e79b66c9fe6166a90004bb8fb02bab06213c3348e93f3be41d7eaf625554/pytest-3.6.1-py2.py3-none-any.whl Collecting pluggy<0.7,>=0.5 (from pytest) ... Successfully installed atomicwrites-1.1.5 attrs-18.1.0 more-itertools-4.2.0 pluggy-0.6.0 py-1.5.3 pytest-3.6.1 ``` </details> ## Way to reproduce - [x] Code example - [ ] Relevant images (if any) - [x] Operating system and version - [x] Python version - [x] scikit-image version (run `skimage.__version__`) This has been observed on conda-forge, see conda-forge/scikit-image-feedstock#23
[ { "content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilters\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as osp\nimport imp\nimport functools\nimport warnings\nimport sys\n\npkg_dir = osp.abspath(osp.dirname(__file__))\ndata_dir = osp.join(pkg_dir, 'data')\n\n__version__ = '0.15dev'\n\n\nif sys.version_info < (3,):\n raise ImportError(\"\"\"\n\nYou are running scikit-image on Python 2.\n\nUnfortunately, scikit-image 0.15 and above no longer work on this\nversion of Python. You therefore have two options: either upgrade to\nPython 3, or install an older version of scikit-image using\n\n $ pip install 'scikit-image<0.15'\n\nPlease also consider updating `pip` and `setuptools`:\n\n $ pip install pip setuptools --upgrade\n\nNewer versions of these tools avoid installing packages incompatible\nwith your version of Python.\n\"\"\")\n\n\ntry:\n imp.find_module('pytest')\nexcept ImportError:\n def _test(doctest=False, verbose=False):\n \"\"\"This would run all unit tests, but pytest couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load pytest. Unit tests not available.\")\n\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import pytest\n import warnings\n args = ['skimage']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--doctest-modules'])\n # Make sure warnings do not break the doc tests\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n success = pytest.main(args)\n else:\n success = pytest.main(args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\n# Logic for checking for improper install and importing while in the source\n# tree when package has not been installed inplace.\n# Code adapted from scikit-learn's __check_build module.\n_INPLACE_MSG = \"\"\"\nIt appears that you are importing a local scikit-image source tree. For\nthis, you need to have an inplace install. Maybe you are in the source\ndirectory and you need to try from another location.\"\"\"\n\n_STANDARD_MSG = \"\"\"\nYour install of scikit-image appears to be broken.\nTry re-installing the package following the instructions at:\nhttp://scikit-image.org/docs/stable/install.html \"\"\"\n\n\ndef _raise_build_error(e):\n # Raise a comprehensible error\n local_dir = osp.split(__file__)[0]\n msg = _STANDARD_MSG\n if local_dir == \"skimage\":\n # Picking up the local install: this will work only if the\n # install is an 'inplace build'\n msg = _INPLACE_MSG\n raise ImportError(\"\"\"%s\nIt seems that scikit-image has not been built correctly.\n%s\"\"\" % (e, msg))\n\ntry:\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __SKIMAGE_SETUP__\nexcept NameError:\n __SKIMAGE_SETUP__ = False\n\nif __SKIMAGE_SETUP__:\n sys.stderr.write('Partial import of skimage during the build process.\\n')\n # We are not importing the rest of the scikit during the build\n # process, as it may not be compiled yet\nelse:\n try:\n from ._shared import geometry\n del geometry\n except ImportError as e:\n _raise_build_error(e)\n from .util.dtype import *\n\n\ndef lookfor(what):\n \"\"\"Do a keyword search on scikit-image docstrings.\n\n Parameters\n ----------\n what : str\n Words to look for.\n\n \"\"\"\n import numpy as np\n import sys\n return np.lookfor(what, sys.modules[__name__])\n\n\ndel warnings, functools, osp, imp, sys\n", "path": "skimage/__init__.py" } ]
[ { "content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilters\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as osp\nimport imp\nimport functools\nimport warnings\nimport sys\n\npkg_dir = osp.abspath(osp.dirname(__file__))\ndata_dir = osp.join(pkg_dir, 'data')\n\n__version__ = '0.15dev'\n\n\nif sys.version_info < (3,):\n raise ImportError(\"\"\"\n\nYou are running scikit-image on Python 2.\n\nUnfortunately, scikit-image 0.15 and above no longer work on this\nversion of Python. You therefore have two options: either upgrade to\nPython 3, or install an older version of scikit-image using\n\n $ pip install 'scikit-image<0.15'\n\nPlease also consider updating `pip` and `setuptools`:\n\n $ pip install pip setuptools --upgrade\n\nNewer versions of these tools avoid installing packages incompatible\nwith your version of Python.\n\"\"\")\n\n\ntry:\n imp.find_module('pytest')\nexcept ImportError:\n def _test(doctest=False, verbose=False):\n \"\"\"This would run all unit tests, but pytest couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load pytest. Unit tests not available.\")\n\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import pytest\n import warnings\n args = ['--pyargs', 'skimage']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--doctest-modules'])\n # Make sure warnings do not break the doc tests\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n success = pytest.main(args)\n else:\n success = pytest.main(args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\n# Logic for checking for improper install and importing while in the source\n# tree when package has not been installed inplace.\n# Code adapted from scikit-learn's __check_build module.\n_INPLACE_MSG = \"\"\"\nIt appears that you are importing a local scikit-image source tree. For\nthis, you need to have an inplace install. Maybe you are in the source\ndirectory and you need to try from another location.\"\"\"\n\n_STANDARD_MSG = \"\"\"\nYour install of scikit-image appears to be broken.\nTry re-installing the package following the instructions at:\nhttp://scikit-image.org/docs/stable/install.html \"\"\"\n\n\ndef _raise_build_error(e):\n # Raise a comprehensible error\n local_dir = osp.split(__file__)[0]\n msg = _STANDARD_MSG\n if local_dir == \"skimage\":\n # Picking up the local install: this will work only if the\n # install is an 'inplace build'\n msg = _INPLACE_MSG\n raise ImportError(\"\"\"%s\nIt seems that scikit-image has not been built correctly.\n%s\"\"\" % (e, msg))\n\ntry:\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __SKIMAGE_SETUP__\nexcept NameError:\n __SKIMAGE_SETUP__ = False\n\nif __SKIMAGE_SETUP__:\n sys.stderr.write('Partial import of skimage during the build process.\\n')\n # We are not importing the rest of the scikit during the build\n # process, as it may not be compiled yet\nelse:\n try:\n from ._shared import geometry\n del geometry\n except ImportError as e:\n _raise_build_error(e)\n from .util.dtype import *\n\n\ndef lookfor(what):\n \"\"\"Do a keyword search on scikit-image docstrings.\n\n Parameters\n ----------\n what : str\n Words to look for.\n\n \"\"\"\n import numpy as np\n import sys\n return np.lookfor(what, sys.modules[__name__])\n\n\ndel warnings, functools, osp, imp, sys\n", "path": "skimage/__init__.py" } ]
diff --git a/skimage/__init__.py b/skimage/__init__.py index ac7c24c32bc..6c88dfca090 100644 --- a/skimage/__init__.py +++ b/skimage/__init__.py @@ -102,7 +102,7 @@ def _test(doctest=False, verbose=False): """Run all unit tests.""" import pytest import warnings - args = ['skimage'] + args = ['--pyargs', 'skimage'] if verbose: args.extend(['-v', '-s']) if doctest:
elastic__apm-agent-python-1947
dbapi2 fails to extract table name when using square brackets **Describe the bug**: ... Queries made to tables which requires escaping end up with the wrong span name. The following spans are SELECTs from four different tables, but only two unique span names appear. ![Screenshot 2023-10-31 at 19 13 46](https://github.com/elastic/apm-agent-python/assets/435885/5384fd4d-df13-4828-a9f2-b7e1c309f07f) **To Reproduce** Import package and call [extract_signature](https://github.com/elastic/apm-agent-python/blob/05332cd007560615b4421b1567659ff9f9634088/elasticapm/instrumentation/packages/dbapi2.py#L153): ```python >>> from elasticapm.instrumentation.packages import dbapi2 >>> dbapi2.extract_signature("SELECT username FROM user") 'SELECT FROM user' >>> dbapi2.extract_signature("SELECT username FROM [user]") 'SELECT FROM [' ``` **Environment (please complete the following information)** - OS: [e.g. Linux] - Python version: - Framework and version [e.g. Django 2.1]: - APM Server version: - Agent version: **Additional context** Add any other context about the problem here. - Agent config options <!-- be careful not to post sensitive information --> <details> <summary>Click to expand</summary> ``` replace this line with your agent config options remember to mask any sensitive fields like tokens ``` </details> - `requirements.txt`: <details> <summary>Click to expand</summary> ``` replace this line with your `requirements.txt` ``` </details>
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Provides classes to instrument dbapi2 providers\n\nhttps://www.python.org/dev/peps/pep-0249/\n\"\"\"\n\nimport re\n\nimport wrapt\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.encoding import force_text, shorten\n\n\nclass Literal(object):\n def __init__(self, literal_type, content) -> None:\n self.literal_type = literal_type\n self.content = content\n\n def __eq__(self, other):\n return isinstance(other, Literal) and self.literal_type == other.literal_type and self.content == other.content\n\n def __repr__(self):\n return \"<Literal {}{}{}>\".format(self.literal_type, self.content, self.literal_type)\n\n\ndef look_for_table(sql, keyword):\n tokens = tokenize(sql)\n table_name = _scan_for_table_with_tokens(tokens, keyword)\n if isinstance(table_name, Literal):\n table_name = table_name.content.strip(table_name.literal_type)\n return table_name\n\n\ndef _scan_for_table_with_tokens(tokens, keyword):\n seen_keyword = False\n for idx, lexeme in scan(tokens):\n if seen_keyword:\n if lexeme == \"(\":\n return _scan_for_table_with_tokens(tokens[idx:], keyword)\n else:\n return lexeme\n\n if isinstance(lexeme, str) and lexeme.upper() == keyword:\n seen_keyword = True\n\n\ndef tokenize(sql):\n # split on anything that is not a word character, excluding dots\n return [t for t in re.split(r\"([^\\w.])\", sql) if t != \"\"]\n\n\ndef scan(tokens):\n literal_start_idx = None\n literal_started = None\n prev_was_escape = False\n lexeme = []\n\n i = 0\n while i < len(tokens):\n token = tokens[i]\n if literal_start_idx:\n if prev_was_escape:\n prev_was_escape = False\n lexeme.append(token)\n else:\n if token == literal_started:\n if literal_started == \"'\" and len(tokens) > i + 1 and tokens[i + 1] == \"'\": # double quotes\n i += 1\n lexeme.append(\"'\")\n else:\n yield i, Literal(literal_started, \"\".join(lexeme))\n literal_start_idx = None\n literal_started = None\n lexeme = []\n else:\n if token == \"\\\\\":\n prev_was_escape = token\n else:\n prev_was_escape = False\n lexeme.append(token)\n elif literal_start_idx is None:\n if token in [\"'\", '\"', \"`\"]:\n literal_start_idx = i\n literal_started = token\n elif token == \"$\":\n # Postgres can use arbitrary characters between two $'s as a\n # literal separation token, e.g.: $fish$ literal $fish$\n # This part will detect that and skip over the literal.\n try:\n # Closing dollar of the opening quote,\n # i.e. the second $ in the first $fish$\n closing_dollar_idx = tokens.index(\"$\", i + 1)\n except ValueError:\n pass\n else:\n quote = tokens[i : closing_dollar_idx + 1]\n length = len(quote)\n # Opening dollar of the closing quote,\n # i.e. the first $ in the second $fish$\n closing_quote_idx = closing_dollar_idx + 1\n while True:\n try:\n closing_quote_idx = tokens.index(\"$\", closing_quote_idx)\n except ValueError:\n break\n if tokens[closing_quote_idx : closing_quote_idx + length] == quote:\n yield i, Literal(\n \"\".join(quote), \"\".join(tokens[closing_dollar_idx + 1 : closing_quote_idx])\n )\n i = closing_quote_idx + length\n break\n closing_quote_idx += 1\n else:\n if token != \" \":\n yield i, token\n i += 1\n\n if lexeme:\n yield i, lexeme\n\n\ndef extract_signature(sql):\n \"\"\"\n Extracts a minimal signature from a given SQL query\n :param sql: the SQL statement\n :return: a string representing the signature\n \"\"\"\n sql = force_text(sql)\n sql = sql.strip()\n first_space = sql.find(\" \")\n if first_space < 0:\n return sql\n\n second_space = sql.find(\" \", first_space + 1)\n\n sql_type = sql[0:first_space].upper()\n\n if sql_type in [\"INSERT\", \"DELETE\"]:\n keyword = \"INTO\" if sql_type == \"INSERT\" else \"FROM\"\n sql_type = sql_type + \" \" + keyword\n\n object_name = look_for_table(sql, keyword)\n elif sql_type in [\"CREATE\", \"DROP\"]:\n # 2nd word is part of SQL type\n sql_type = sql_type + sql[first_space:second_space]\n object_name = \"\"\n elif sql_type == \"UPDATE\":\n object_name = look_for_table(sql, \"UPDATE\")\n elif sql_type == \"SELECT\":\n # Name is first table\n try:\n sql_type = \"SELECT FROM\"\n object_name = look_for_table(sql, \"FROM\")\n except Exception:\n object_name = \"\"\n elif sql_type in [\"EXEC\", \"EXECUTE\"]:\n sql_type = \"EXECUTE\"\n end = second_space if second_space > first_space else len(sql)\n object_name = sql[first_space + 1 : end]\n elif sql_type == \"CALL\":\n first_paren = sql.find(\"(\", first_space)\n end = first_paren if first_paren > first_space else len(sql)\n procedure_name = sql[first_space + 1 : end].rstrip(\";\")\n object_name = procedure_name + \"()\"\n else:\n # No name\n object_name = \"\"\n\n signature = \" \".join(filter(bool, [sql_type, object_name]))\n return signature\n\n\nQUERY_ACTION = \"query\"\nEXEC_ACTION = \"exec\"\nPROCEDURE_STATEMENTS = [\"EXEC\", \"EXECUTE\", \"CALL\"]\n\n\ndef extract_action_from_signature(signature, default):\n if signature.split(\" \")[0] in PROCEDURE_STATEMENTS:\n return EXEC_ACTION\n return default\n\n\nclass CursorProxy(wrapt.ObjectProxy):\n provider_name = None\n DML_QUERIES = (\"INSERT\", \"DELETE\", \"UPDATE\")\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(CursorProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info or {}\n\n def callproc(self, procname, params=None):\n return self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION)\n\n def execute(self, sql, params=None):\n return self._trace_sql(self.__wrapped__.execute, sql, params)\n\n def executemany(self, sql, param_list):\n return self._trace_sql(self.__wrapped__.executemany, sql, param_list)\n\n def _bake_sql(self, sql):\n \"\"\"\n Method to turn the \"sql\" argument into a string. Most database backends simply return\n the given object, as it is already a string\n \"\"\"\n return sql\n\n def _trace_sql(self, method, sql, params, action=QUERY_ACTION):\n sql_string = self._bake_sql(sql)\n if action == EXEC_ACTION:\n signature = sql_string + \"()\"\n else:\n signature = self.extract_signature(sql_string)\n action = extract_action_from_signature(signature, action)\n\n # Truncate sql_string to 10000 characters to prevent large queries from\n # causing an error to APM server.\n sql_string = shorten(sql_string, string_length=10000)\n\n with capture_span(\n signature,\n span_type=\"db\",\n span_subtype=self.provider_name,\n span_action=action,\n extra={\n \"db\": {\"type\": \"sql\", \"statement\": sql_string, \"instance\": getattr(self, \"_self_database\", None)},\n \"destination\": self._self_destination_info,\n },\n skip_frames=1,\n leaf=True,\n ) as span:\n if params is None:\n result = method(sql)\n else:\n result = method(sql, params)\n # store \"rows affected\", but only for DML queries like insert/update/delete\n if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES):\n span.update_context(\"db\", {\"rows_affected\": self.rowcount})\n return result\n\n def extract_signature(self, sql):\n raise NotImplementedError()\n\n\nclass ConnectionProxy(wrapt.ObjectProxy):\n cursor_proxy = CursorProxy\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(ConnectionProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info\n\n def cursor(self, *args, **kwargs):\n return self.cursor_proxy(self.__wrapped__.cursor(*args, **kwargs), self._self_destination_info)\n\n\nclass DbApi2Instrumentation(AbstractInstrumentedModule):\n connect_method = None\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n return ConnectionProxy(wrapped(*args, **kwargs))\n\n def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):\n # Contrasting to the superclass implementation, we *always* want to\n # return a proxied connection, even if there is no ongoing elasticapm\n # transaction yet. This ensures that we instrument the cursor once\n # the transaction started.\n return self.call(module, method, wrapped, instance, args, kwargs)\n", "path": "elasticapm/instrumentation/packages/dbapi2.py" } ]
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Provides classes to instrument dbapi2 providers\n\nhttps://www.python.org/dev/peps/pep-0249/\n\"\"\"\n\nimport re\n\nimport wrapt\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.encoding import force_text, shorten\n\n\nclass Literal(object):\n def __init__(self, literal_type, content) -> None:\n self.literal_type = literal_type\n self.content = content\n\n def __eq__(self, other):\n return isinstance(other, Literal) and self.literal_type == other.literal_type and self.content == other.content\n\n def __repr__(self):\n return \"<Literal {}{}{}>\".format(self.literal_type, self.content, self.literal_type)\n\n\ndef look_for_table(sql, keyword):\n tokens = tokenize(sql)\n table_name = _scan_for_table_with_tokens(tokens, keyword)\n if isinstance(table_name, Literal):\n table_name = table_name.content.strip(table_name.literal_type)\n return table_name\n\n\ndef _scan_for_table_with_tokens(tokens, keyword):\n seen_keyword = False\n for idx, lexeme in scan(tokens):\n if seen_keyword:\n if lexeme == \"(\":\n return _scan_for_table_with_tokens(tokens[idx:], keyword)\n else:\n return lexeme\n\n if isinstance(lexeme, str) and lexeme.upper() == keyword:\n seen_keyword = True\n\n\ndef tokenize(sql):\n # split on anything that is not a word character or a square bracket, excluding dots\n return [t for t in re.split(r\"([^\\w.\\[\\]])\", sql) if t != \"\"]\n\n\ndef scan(tokens):\n literal_start_idx = None\n literal_started = None\n prev_was_escape = False\n lexeme = []\n\n i = 0\n while i < len(tokens):\n token = tokens[i]\n if literal_start_idx:\n if prev_was_escape:\n prev_was_escape = False\n lexeme.append(token)\n else:\n if token == literal_started:\n if literal_started == \"'\" and len(tokens) > i + 1 and tokens[i + 1] == \"'\": # double quotes\n i += 1\n lexeme.append(\"'\")\n else:\n yield i, Literal(literal_started, \"\".join(lexeme))\n literal_start_idx = None\n literal_started = None\n lexeme = []\n else:\n if token == \"\\\\\":\n prev_was_escape = token\n else:\n prev_was_escape = False\n lexeme.append(token)\n elif literal_start_idx is None:\n if token in [\"'\", '\"', \"`\"]:\n literal_start_idx = i\n literal_started = token\n elif token == \"$\":\n # Postgres can use arbitrary characters between two $'s as a\n # literal separation token, e.g.: $fish$ literal $fish$\n # This part will detect that and skip over the literal.\n try:\n # Closing dollar of the opening quote,\n # i.e. the second $ in the first $fish$\n closing_dollar_idx = tokens.index(\"$\", i + 1)\n except ValueError:\n pass\n else:\n quote = tokens[i : closing_dollar_idx + 1]\n length = len(quote)\n # Opening dollar of the closing quote,\n # i.e. the first $ in the second $fish$\n closing_quote_idx = closing_dollar_idx + 1\n while True:\n try:\n closing_quote_idx = tokens.index(\"$\", closing_quote_idx)\n except ValueError:\n break\n if tokens[closing_quote_idx : closing_quote_idx + length] == quote:\n yield i, Literal(\n \"\".join(quote), \"\".join(tokens[closing_dollar_idx + 1 : closing_quote_idx])\n )\n i = closing_quote_idx + length\n break\n closing_quote_idx += 1\n else:\n if token != \" \":\n yield i, token\n i += 1\n\n if lexeme:\n yield i, lexeme\n\n\ndef extract_signature(sql):\n \"\"\"\n Extracts a minimal signature from a given SQL query\n :param sql: the SQL statement\n :return: a string representing the signature\n \"\"\"\n sql = force_text(sql)\n sql = sql.strip()\n first_space = sql.find(\" \")\n if first_space < 0:\n return sql\n\n second_space = sql.find(\" \", first_space + 1)\n\n sql_type = sql[0:first_space].upper()\n\n if sql_type in [\"INSERT\", \"DELETE\"]:\n keyword = \"INTO\" if sql_type == \"INSERT\" else \"FROM\"\n sql_type = sql_type + \" \" + keyword\n\n object_name = look_for_table(sql, keyword)\n elif sql_type in [\"CREATE\", \"DROP\"]:\n # 2nd word is part of SQL type\n sql_type = sql_type + sql[first_space:second_space]\n object_name = \"\"\n elif sql_type == \"UPDATE\":\n object_name = look_for_table(sql, \"UPDATE\")\n elif sql_type == \"SELECT\":\n # Name is first table\n try:\n sql_type = \"SELECT FROM\"\n object_name = look_for_table(sql, \"FROM\")\n except Exception:\n object_name = \"\"\n elif sql_type in [\"EXEC\", \"EXECUTE\"]:\n sql_type = \"EXECUTE\"\n end = second_space if second_space > first_space else len(sql)\n object_name = sql[first_space + 1 : end]\n elif sql_type == \"CALL\":\n first_paren = sql.find(\"(\", first_space)\n end = first_paren if first_paren > first_space else len(sql)\n procedure_name = sql[first_space + 1 : end].rstrip(\";\")\n object_name = procedure_name + \"()\"\n else:\n # No name\n object_name = \"\"\n\n signature = \" \".join(filter(bool, [sql_type, object_name]))\n return signature\n\n\nQUERY_ACTION = \"query\"\nEXEC_ACTION = \"exec\"\nPROCEDURE_STATEMENTS = [\"EXEC\", \"EXECUTE\", \"CALL\"]\n\n\ndef extract_action_from_signature(signature, default):\n if signature.split(\" \")[0] in PROCEDURE_STATEMENTS:\n return EXEC_ACTION\n return default\n\n\nclass CursorProxy(wrapt.ObjectProxy):\n provider_name = None\n DML_QUERIES = (\"INSERT\", \"DELETE\", \"UPDATE\")\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(CursorProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info or {}\n\n def callproc(self, procname, params=None):\n return self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION)\n\n def execute(self, sql, params=None):\n return self._trace_sql(self.__wrapped__.execute, sql, params)\n\n def executemany(self, sql, param_list):\n return self._trace_sql(self.__wrapped__.executemany, sql, param_list)\n\n def _bake_sql(self, sql):\n \"\"\"\n Method to turn the \"sql\" argument into a string. Most database backends simply return\n the given object, as it is already a string\n \"\"\"\n return sql\n\n def _trace_sql(self, method, sql, params, action=QUERY_ACTION):\n sql_string = self._bake_sql(sql)\n if action == EXEC_ACTION:\n signature = sql_string + \"()\"\n else:\n signature = self.extract_signature(sql_string)\n action = extract_action_from_signature(signature, action)\n\n # Truncate sql_string to 10000 characters to prevent large queries from\n # causing an error to APM server.\n sql_string = shorten(sql_string, string_length=10000)\n\n with capture_span(\n signature,\n span_type=\"db\",\n span_subtype=self.provider_name,\n span_action=action,\n extra={\n \"db\": {\"type\": \"sql\", \"statement\": sql_string, \"instance\": getattr(self, \"_self_database\", None)},\n \"destination\": self._self_destination_info,\n },\n skip_frames=1,\n leaf=True,\n ) as span:\n if params is None:\n result = method(sql)\n else:\n result = method(sql, params)\n # store \"rows affected\", but only for DML queries like insert/update/delete\n if span and self.rowcount not in (-1, None) and signature.startswith(self.DML_QUERIES):\n span.update_context(\"db\", {\"rows_affected\": self.rowcount})\n return result\n\n def extract_signature(self, sql):\n raise NotImplementedError()\n\n\nclass ConnectionProxy(wrapt.ObjectProxy):\n cursor_proxy = CursorProxy\n\n def __init__(self, wrapped, destination_info=None) -> None:\n super(ConnectionProxy, self).__init__(wrapped)\n self._self_destination_info = destination_info\n\n def cursor(self, *args, **kwargs):\n return self.cursor_proxy(self.__wrapped__.cursor(*args, **kwargs), self._self_destination_info)\n\n\nclass DbApi2Instrumentation(AbstractInstrumentedModule):\n connect_method = None\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n return ConnectionProxy(wrapped(*args, **kwargs))\n\n def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):\n # Contrasting to the superclass implementation, we *always* want to\n # return a proxied connection, even if there is no ongoing elasticapm\n # transaction yet. This ensures that we instrument the cursor once\n # the transaction started.\n return self.call(module, method, wrapped, instance, args, kwargs)\n", "path": "elasticapm/instrumentation/packages/dbapi2.py" } ]
diff --git a/elasticapm/instrumentation/packages/dbapi2.py b/elasticapm/instrumentation/packages/dbapi2.py index cbe34be59..fb49723c2 100644 --- a/elasticapm/instrumentation/packages/dbapi2.py +++ b/elasticapm/instrumentation/packages/dbapi2.py @@ -76,8 +76,8 @@ def _scan_for_table_with_tokens(tokens, keyword): def tokenize(sql): - # split on anything that is not a word character, excluding dots - return [t for t in re.split(r"([^\w.])", sql) if t != ""] + # split on anything that is not a word character or a square bracket, excluding dots + return [t for t in re.split(r"([^\w.\[\]])", sql) if t != ""] def scan(tokens): diff --git a/tests/instrumentation/dbapi2_tests.py b/tests/instrumentation/dbapi2_tests.py index d2fc84aab..3d72b6632 100644 --- a/tests/instrumentation/dbapi2_tests.py +++ b/tests/instrumentation/dbapi2_tests.py @@ -154,3 +154,18 @@ def test_extract_signature_for_procedure_call(sql, expected): def test_extract_action_from_signature(sql, expected): actual = extract_action_from_signature(sql, "query") assert actual == expected + + [email protected]( + ["sql", "expected"], + [ + ("SELECT username FROM user", "SELECT FROM user"), + ("SELECT username FROM [user]", "SELECT FROM [user]"), + ("SELECT username FROM [db].[user]", "SELECT FROM [db].[user]"), + ("SELECT username FROM db.[user]", "SELECT FROM db.[user]"), + ("SELECT username FROM [db].user", "SELECT FROM [db].user"), + ], +) +def test_extract_signature_when_using_square_brackets(sql, expected): + actual = extract_signature(sql) + assert actual == expected
sublimelsp__LSP-1417
Advertise window.showMessageRequest.messageActionItem.additionalPropertiesSupport See: https://github.com/microsoft/language-server-protocol/commit/4a29ca0725469624fc07425c3fa0fde386e7ee55
[ { "content": "from .edit import apply_workspace_edit\nfrom .edit import parse_workspace_edit\nfrom .logging import debug\nfrom .logging import exception_log\nfrom .promise import Promise\nfrom .protocol import CompletionItemTag\nfrom .protocol import Error\nfrom .protocol import ErrorCode\nfrom .protocol import Notification\nfrom .protocol import Request\nfrom .protocol import Response\nfrom .protocol import WorkspaceFolder\nfrom .settings import client_configs\nfrom .transports import Transport\nfrom .transports import TransportCallbacks\nfrom .types import Capabilities\nfrom .types import ClientConfig\nfrom .types import ClientStates\nfrom .types import debounced\nfrom .types import diff\nfrom .types import DocumentSelector\nfrom .types import method_to_capability\nfrom .typing import Callable, Dict, Any, Optional, List, Tuple, Generator, Type, Protocol, Mapping\nfrom .url import uri_to_filename\nfrom .version import __version__\nfrom .views import COMPLETION_KINDS\nfrom .views import did_change_configuration\nfrom .views import extract_variables\nfrom .views import get_storage_path\nfrom .views import SYMBOL_KINDS\nfrom .workspace import is_subpath_of\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom weakref import WeakSet\nimport functools\nimport os\nimport sublime\nimport weakref\n\n\nInitCallback = Callable[['Session', bool], None]\n\n\nclass Manager(metaclass=ABCMeta):\n \"\"\"\n A Manager is a container of Sessions.\n \"\"\"\n\n # Observers\n\n @abstractmethod\n def window(self) -> sublime.Window:\n \"\"\"\n Get the window associated with this manager.\n \"\"\"\n pass\n\n @abstractmethod\n def sessions(self, view: sublime.View, capability: Optional[str] = None) -> 'Generator[Session, None, None]':\n \"\"\"\n Iterate over the sessions stored in this manager, applicable to the given view, with the given capability.\n \"\"\"\n pass\n\n @abstractmethod\n def get_project_path(self, file_path: str) -> Optional[str]:\n \"\"\"\n Get the project path for the given file.\n \"\"\"\n pass\n\n # Mutators\n\n @abstractmethod\n def start_async(self, configuration: ClientConfig, initiating_view: sublime.View) -> None:\n \"\"\"\n Start a new Session with the given configuration. The initiating view is the view that caused this method to\n be called.\n\n A normal flow of calls would be start -> on_post_initialize -> do language server things -> on_post_exit.\n However, it is possible that the subprocess cannot start, in which case on_post_initialize will never be called.\n \"\"\"\n pass\n\n @abstractmethod\n def update_diagnostics_panel_async(self) -> None:\n pass\n\n @abstractmethod\n def show_diagnostics_panel_async(self) -> None:\n pass\n\n @abstractmethod\n def hide_diagnostics_panel_async(self) -> None:\n pass\n\n # Event callbacks\n\n @abstractmethod\n def on_post_exit_async(self, session: 'Session', exit_code: int, exception: Optional[Exception]) -> None:\n \"\"\"\n The given Session has stopped with the given exit code.\n \"\"\"\n pass\n\n\ndef get_initialize_params(variables: Dict[str, str], workspace_folders: List[WorkspaceFolder],\n config: ClientConfig) -> dict:\n completion_kinds = list(range(1, len(COMPLETION_KINDS) + 1))\n symbol_kinds = list(range(1, len(SYMBOL_KINDS) + 1))\n completion_tag_value_set = [v for k, v in CompletionItemTag.__dict__.items() if not k.startswith('_')]\n first_folder = workspace_folders[0] if workspace_folders else None\n capabilities = {\n \"textDocument\": {\n \"synchronization\": {\n \"dynamicRegistration\": True, # exceptional\n \"didSave\": True,\n \"willSave\": True,\n \"willSaveWaitUntil\": True\n },\n \"hover\": {\n \"dynamicRegistration\": True,\n \"contentFormat\": [\"markdown\", \"plaintext\"]\n },\n \"completion\": {\n \"dynamicRegistration\": True,\n \"completionItem\": {\n \"snippetSupport\": True,\n \"deprecatedSupport\": True,\n \"documentationFormat\": [\"markdown\", \"plaintext\"],\n \"tagSupport\": {\n \"valueSet\": completion_tag_value_set\n }\n },\n \"completionItemKind\": {\n \"valueSet\": completion_kinds\n }\n },\n \"signatureHelp\": {\n \"dynamicRegistration\": True,\n \"signatureInformation\": {\n \"documentationFormat\": [\"markdown\", \"plaintext\"],\n \"parameterInformation\": {\n \"labelOffsetSupport\": True\n }\n }\n },\n \"references\": {\n \"dynamicRegistration\": True\n },\n \"documentHighlight\": {\n \"dynamicRegistration\": True\n },\n \"documentSymbol\": {\n \"dynamicRegistration\": True,\n \"hierarchicalDocumentSymbolSupport\": True,\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n },\n \"formatting\": {\n \"dynamicRegistration\": True # exceptional\n },\n \"rangeFormatting\": {\n \"dynamicRegistration\": True\n },\n \"declaration\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"definition\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"typeDefinition\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"implementation\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"codeAction\": {\n \"dynamicRegistration\": True,\n \"codeActionLiteralSupport\": {\n \"codeActionKind\": {\n \"valueSet\": [\n \"quickfix\",\n \"refactor\",\n \"refactor.extract\",\n \"refactor.inline\",\n \"refactor.rewrite\",\n \"source.organizeImports\"\n ]\n }\n }\n },\n \"rename\": {\n \"dynamicRegistration\": True,\n \"prepareSupport\": True\n },\n \"colorProvider\": {\n \"dynamicRegistration\": True # exceptional\n },\n \"publishDiagnostics\": {\n \"relatedInformation\": True\n },\n \"selectionRange\": {\n \"dynamicRegistration\": True\n }\n },\n \"workspace\": {\n \"applyEdit\": True,\n \"didChangeConfiguration\": {\n \"dynamicRegistration\": True\n },\n \"executeCommand\": {},\n \"workspaceEdit\": {\n \"documentChanges\": True,\n \"failureHandling\": \"abort\",\n },\n \"workspaceFolders\": True,\n \"symbol\": {\n \"dynamicRegistration\": True, # exceptional\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n },\n \"configuration\": True\n },\n \"window\": {\n \"workDoneProgress\": True\n }\n }\n if config.experimental_capabilities is not None:\n capabilities['experimental'] = config.experimental_capabilities\n return {\n \"processId\": os.getpid(),\n \"clientInfo\": {\n \"name\": \"Sublime Text LSP\",\n \"version\": \".\".join(map(str, __version__))\n },\n \"rootUri\": first_folder.uri() if first_folder else None,\n \"rootPath\": first_folder.path if first_folder else None,\n \"workspaceFolders\": [folder.to_lsp() for folder in workspace_folders] if workspace_folders else None,\n \"capabilities\": capabilities,\n \"initializationOptions\": sublime.expand_variables(config.init_options.get(), variables)\n }\n\n\nclass SessionViewProtocol(Protocol):\n\n session = None # type: Session\n view = None # type: sublime.View\n listener = None # type: Any\n session_buffer = None # type: Any\n\n def on_capability_added_async(self, capability_path: str, options: Dict[str, Any]) -> None:\n ...\n\n def on_capability_removed_async(self, discarded_capabilities: Dict[str, Any]) -> None:\n ...\n\n def has_capability_async(self, capability_path: str) -> bool:\n ...\n\n def shutdown_async(self) -> None:\n ...\n\n def present_diagnostics_async(self, flags: int) -> None:\n ...\n\n def on_request_started_async(self, request_id: int, request: Request) -> None:\n ...\n\n def on_request_finished_async(self, request_id: int) -> None:\n ...\n\n\nclass SessionBufferProtocol(Protocol):\n\n session = None # type: Session\n session_views = None # type: WeakSet[SessionViewProtocol]\n file_name = None # type: str\n language_id = None # type: str\n\n def register_capability_async(\n self,\n registration_id: str,\n capability_path: str,\n registration_path: str,\n options: Dict[str, Any]\n ) -> None:\n ...\n\n def unregister_capability_async(\n self,\n registration_id: str,\n capability_path: str,\n registration_path: str\n ) -> None:\n ...\n\n def on_diagnostics_async(self, diagnostics: List[Dict[str, Any]], version: Optional[int]) -> None:\n ...\n\n\nclass AbstractPlugin(metaclass=ABCMeta):\n \"\"\"\n Inherit from this class to handle non-standard requests and notifications.\n Given a request/notification, replace the non-alphabetic characters with an underscore, and prepend it with \"m_\".\n This will be the name of your method.\n For instance, to implement the non-standard eslint/openDoc request, define the Python method\n\n def m_eslint_openDoc(self, params, request_id):\n session = self.weaksession()\n if session:\n webbrowser.open_tab(params['url'])\n session.send_response(Response(request_id, None))\n\n To handle the non-standard eslint/status notification, define the Python method\n\n def m_eslint_status(self, params):\n pass\n\n To understand how this works, see the __getattr__ method of the Session class.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def name(cls) -> str:\n \"\"\"\n A human-friendly name. If your plugin is called \"LSP-foobar\", then this should return \"foobar\". If you also\n have your settings file called \"LSP-foobar.sublime-settings\", then you don't even need to re-implement the\n configuration method (see below).\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def configuration(cls) -> Tuple[sublime.Settings, str]:\n \"\"\"\n Return the Settings object that defines the \"command\", \"languages\", and optionally the \"initializationOptions\",\n \"default_settings\", \"env\" and \"tcp_port\" as the first element in the tuple, and the path to the base settings\n filename as the second element in the tuple.\n\n The second element in the tuple is used to handle \"settings\" overrides from users properly. For example, if your\n plugin is called LSP-foobar, you would return \"Packages/LSP-foobar/LSP-foobar.sublime-settings\".\n\n The \"command\", \"initializationOptions\" and \"env\" are subject to template string substitution. The following\n template strings are recognized:\n\n $file\n $file_base_name\n $file_extension\n $file_name\n $file_path\n $platform\n $project\n $project_base_name\n $project_extension\n $project_name\n $project_path\n\n These are just the values from window.extract_variables(). Additionally,\n\n $storage_path The path to the package storage (see AbstractPlugin.storage_path)\n $cache_path sublime.cache_path()\n $temp_dir tempfile.gettempdir()\n $home os.path.expanduser('~')\n $port A random free TCP-port on localhost in case \"tcp_port\" is set to 0. This string template can only\n be used in the \"command\"\n\n The \"command\" and \"env\" are expanded upon starting the subprocess of the Session. The \"initializationOptions\"\n are expanded upon doing the initialize request. \"initializationOptions\" does not expand $port.\n\n When you're managing your own server binary, you would typically place it in sublime.cache_path(). So your\n \"command\" should look like this: \"command\": [\"$cache_path/LSP-foobar/server_binary\", \"--stdio\"]\n \"\"\"\n name = cls.name()\n basename = \"LSP-{}.sublime-settings\".format(name)\n filepath = \"Packages/LSP-{}/{}\".format(name, basename)\n return sublime.load_settings(basename), filepath\n\n @classmethod\n def additional_variables(cls) -> Optional[Dict[str, str]]:\n \"\"\"\n In addition to the above variables, add more variables here to be expanded.\n \"\"\"\n return None\n\n @classmethod\n def storage_path(cls) -> str:\n \"\"\"\n The storage path. Use this as your base directory to install server files. Its path is '$DATA/Package Storage'.\n You should have an additional subdirectory preferrably the same name as your plugin. For instance:\n\n ```python\n from LSP.plugin import AbstractPlugin\n import os\n\n\n class MyPlugin(AbstractPlugin):\n\n @classmethod\n def name(cls) -> str:\n return \"my-plugin\"\n\n @classmethod\n def basedir(cls) -> str:\n # Do everything relative to this directory\n return os.path.join(cls.storage_path(), cls.name())\n ```\n \"\"\"\n return get_storage_path()\n\n @classmethod\n def needs_update_or_installation(cls) -> bool:\n \"\"\"\n If this plugin manages its own server binary, then this is the place to check whether the binary needs\n an update, or whether it needs to be installed before starting the language server.\n \"\"\"\n return False\n\n @classmethod\n def install_or_update(cls) -> None:\n \"\"\"\n Do the actual update/installation of the server binary. This runs in a separate thread, so don't spawn threads\n yourself here.\n \"\"\"\n pass\n\n @classmethod\n def can_start(cls, window: sublime.Window, initiating_view: sublime.View,\n workspace_folders: List[WorkspaceFolder], configuration: ClientConfig) -> Optional[str]:\n \"\"\"\n Determines ability to start. This is called after needs_update_or_installation and after install_or_update.\n So you may assume that if you're managing your server binary, then it is already installed when this\n classmethod is called.\n\n :param window: The window\n :param initiating_view: The initiating view\n :param workspace_folders: The workspace folders\n :param configuration: The configuration\n\n :returns: A string describing the reason why we should not start a language server session, or None if we\n should go ahead and start a session.\n \"\"\"\n return None\n\n def __init__(self, weaksession: 'weakref.ref[Session]') -> None:\n \"\"\"\n Constructs a new instance.\n\n :param weaksession: A weak reference to the Session. You can grab a strong reference through\n self.weaksession(), but don't hold on to that reference.\n \"\"\"\n self.weaksession = weaksession\n\n def on_workspace_configuration(self, params: Dict, configuration: Any) -> None:\n \"\"\"\n Override to augment configuration returned for the workspace/configuration request.\n\n :param params: A ConfigurationItem for which configuration is requested.\n :param configuration: The resolved configuration for given params.\n \"\"\"\n pass\n\n def on_pre_server_command(self, command: Mapping[str, Any], done_callback: Callable[[], None]) -> bool:\n \"\"\"\n Intercept a command that is about to be sent to the language server.\n\n :param command: The payload containing a \"command\" and optionally \"arguments\".\n :param done_callback: The callback that you promise to invoke when you return true.\n\n :returns: True if *YOU* will handle this command plugin-side, false otherwise. You must invoke the\n passed `done_callback` when you're done.\n \"\"\"\n return False\n\n\n_plugins = {} # type: Dict[str, Type[AbstractPlugin]]\n\n\ndef _register_plugin_impl(plugin: Type[AbstractPlugin], notify_listener: bool) -> None:\n global _plugins\n name = plugin.name()\n try:\n settings, base_file = plugin.configuration()\n if client_configs.add_external_config(name, settings, base_file, notify_listener):\n _plugins[name] = plugin\n except Exception as ex:\n exception_log('Failed to register plugin \"{}\"'.format(name), ex)\n\n\ndef register_plugin(plugin: Type[AbstractPlugin], notify_listener: bool = True) -> None:\n \"\"\"\n Register an LSP plugin in LSP.\n\n You should put a call to this function in your `plugin_loaded` callback. This way, when your package is disabled\n by a user and then re-enabled again by a user, the changes in state are picked up by LSP, and your language server\n will start for the relevant views.\n\n While your helper package may still work without calling `register_plugin` in `plugin_loaded`, the user will have a\n better experience when you do call this function.\n\n Your implementation should look something like this:\n\n ```python\n from LSP.plugin import register_plugin\n from LSP.plugin import unregister_plugin\n from LSP.plugin import AbstractPlugin\n\n\n class MyPlugin(AbstractPlugin):\n ...\n\n\n def plugin_loaded():\n register_plugin(MyPlugin)\n\n def plugin_unloaded():\n unregister_plugin(MyPlugin)\n ```\n\n If you need to install supplementary files (e.g. javascript source code that implements the actual server), do so\n in `AbstractPlugin.install_or_update` in a blocking manner, without the use of Python's `threading` module.\n \"\"\"\n if notify_listener:\n # There is a bug in Sublime Text's `plugin_loaded` callback. When the package is in the list of\n # `\"ignored_packages\"` in Packages/User/Preferences.sublime-settings, and then removed from that list, the\n # sublime.Settings object has missing keys/values. To circumvent this, we run the actual registration one tick\n # later. At that point, the settings object is fully loaded. At least, it seems that way. For more context,\n # see https://github.com/sublimehq/sublime_text/issues/3379\n # and https://github.com/sublimehq/sublime_text/issues/2099\n sublime.set_timeout(lambda: _register_plugin_impl(plugin, notify_listener))\n else:\n _register_plugin_impl(plugin, notify_listener)\n\n\ndef unregister_plugin(plugin: Type[AbstractPlugin]) -> None:\n \"\"\"\n Unregister an LSP plugin in LSP.\n\n You should put a call to this function in your `plugin_unloaded` callback. this way, when your package is disabled\n by a user, your language server is shut down for the views that it is attached to. This results in a good user\n experience.\n \"\"\"\n global _plugins\n name = plugin.name()\n try:\n _plugins.pop(name, None)\n client_configs.remove_external_config(name)\n except Exception as ex:\n exception_log('Failed to unregister plugin \"{}\"'.format(name), ex)\n\n\ndef get_plugin(name: str) -> Optional[Type[AbstractPlugin]]:\n global _plugins\n return _plugins.get(name, None)\n\n\nclass Logger(metaclass=ABCMeta):\n\n @abstractmethod\n def stderr_message(self, message: str) -> None:\n pass\n\n @abstractmethod\n def outgoing_response(self, request_id: Any, params: Any) -> None:\n pass\n\n @abstractmethod\n def outgoing_error_response(self, request_id: Any, error: Error) -> None:\n pass\n\n @abstractmethod\n def outgoing_request(self, request_id: int, method: str, params: Any) -> None:\n pass\n\n @abstractmethod\n def outgoing_notification(self, method: str, params: Any) -> None:\n pass\n\n @abstractmethod\n def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:\n pass\n\n @abstractmethod\n def incoming_request(self, request_id: Any, method: str, params: Any) -> None:\n pass\n\n @abstractmethod\n def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:\n pass\n\n\ndef print_to_status_bar(error: Dict[str, Any]) -> None:\n sublime.status_message(error[\"message\"])\n\n\ndef method2attr(method: str) -> str:\n # window/messageRequest -> m_window_messageRequest\n # $/progress -> m___progress\n # client/registerCapability -> m_client_registerCapability\n return 'm_' + ''.join(map(lambda c: c if c.isalpha() else '_', method))\n\n\nclass _RegistrationData:\n\n __slots__ = (\"registration_id\", \"capability_path\", \"registration_path\", \"options\", \"session_buffers\", \"selector\")\n\n def __init__(\n self,\n registration_id: str,\n capability_path: str,\n registration_path: str,\n options: Dict[str, Any]\n ) -> None:\n self.registration_id = registration_id\n self.registration_path = registration_path\n self.capability_path = capability_path\n document_selector = options.pop(\"documentSelector\", None)\n if not isinstance(document_selector, list):\n document_selector = []\n self.selector = DocumentSelector(document_selector)\n self.options = options\n self.session_buffers = WeakSet() # type: WeakSet[SessionBufferProtocol]\n\n def __del__(self) -> None:\n for sb in self.session_buffers:\n sb.unregister_capability_async(self.registration_id, self.capability_path, self.registration_path)\n\n def check_applicable(self, sb: SessionBufferProtocol) -> None:\n for sv in sb.session_views:\n if self.selector.matches(sv.view):\n self.session_buffers.add(sb)\n sb.register_capability_async(\n self.registration_id, self.capability_path, self.registration_path, self.options)\n return\n\n\nclass Session(TransportCallbacks):\n\n def __init__(self, manager: Manager, logger: Logger, workspace_folders: List[WorkspaceFolder],\n config: ClientConfig, plugin_class: Optional[Type[AbstractPlugin]]) -> None:\n self.transport = None # type: Optional[Transport]\n self.request_id = 0 # Our request IDs are always integers.\n self._logger = logger\n self._response_handlers = {} # type: Dict[int, Tuple[Request, Callable, Optional[Callable[[Any], None]]]]\n self.config = config\n self.manager = weakref.ref(manager)\n self.window = manager.window()\n self.state = ClientStates.STARTING\n self.capabilities = Capabilities()\n self.exiting = False\n self._registrations = {} # type: Dict[str, _RegistrationData]\n self._init_callback = None # type: Optional[InitCallback]\n self._initialize_error = None # type: Optional[Tuple[int, Optional[Exception]]]\n self._views_opened = 0\n self._workspace_folders = workspace_folders\n self._session_views = WeakSet() # type: WeakSet[SessionViewProtocol]\n self._session_buffers = WeakSet() # type: WeakSet[SessionBufferProtocol]\n self._progress = {} # type: Dict[Any, Dict[str, str]]\n self._plugin_class = plugin_class\n self._plugin = None # type: Optional[AbstractPlugin]\n\n def __del__(self) -> None:\n debug(self.config.command, \"ended\")\n for token in self._progress.keys():\n key = self._progress_status_key(token)\n for sv in self.session_views_async():\n if sv.view.is_valid():\n sv.view.erase_status(key)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"\n If we don't have a request/notification handler, look up the request/notification handler in the plugin.\n \"\"\"\n if name.startswith('m_'):\n attr = getattr(self._plugin, name)\n if attr is not None:\n return attr\n raise AttributeError(name)\n\n # TODO: Create an assurance that the API doesn't change here as it can be used by plugins.\n def get_workspace_folders(self) -> List[WorkspaceFolder]:\n return self._workspace_folders\n\n # --- session view management --------------------------------------------------------------------------------------\n\n def register_session_view_async(self, sv: SessionViewProtocol) -> None:\n self._session_views.add(sv)\n self._views_opened += 1\n\n def unregister_session_view_async(self, sv: SessionViewProtocol) -> None:\n self._session_views.discard(sv)\n if not self._session_views:\n current_count = self._views_opened\n debounced(self.end_async, 3000, lambda: self._views_opened == current_count, async_thread=True)\n\n def session_views_async(self) -> Generator[SessionViewProtocol, None, None]:\n \"\"\"\n It is only safe to iterate over this in the async thread\n \"\"\"\n yield from self._session_views\n\n def session_view_for_view_async(self, view: sublime.View) -> Optional[SessionViewProtocol]:\n for sv in self.session_views_async():\n if sv.view == view:\n return sv\n return None\n\n # --- session buffer management ------------------------------------------------------------------------------------\n\n def register_session_buffer_async(self, sb: SessionBufferProtocol) -> None:\n self._session_buffers.add(sb)\n for data in self._registrations.values():\n data.check_applicable(sb)\n\n def unregister_session_buffer_async(self, sb: SessionBufferProtocol) -> None:\n self._session_buffers.discard(sb)\n\n def session_buffers_async(self) -> Generator[SessionBufferProtocol, None, None]:\n \"\"\"\n It is only safe to iterate over this in the async thread\n \"\"\"\n yield from self._session_buffers\n\n def get_session_buffer_for_uri_async(self, uri: str) -> Optional[SessionBufferProtocol]:\n file_name = uri_to_filename(uri)\n for sb in self.session_buffers_async():\n try:\n if sb.file_name == file_name or os.path.samefile(file_name, sb.file_name):\n return sb\n except FileNotFoundError:\n pass\n return None\n\n # --- capability observers -----------------------------------------------------------------------------------------\n\n def can_handle(self, view: sublime.View, capability: Optional[str], inside_workspace: bool) -> bool:\n file_name = view.file_name() or ''\n if (self.config.match_view(view)\n and self.state == ClientStates.READY\n and self.handles_path(file_name, inside_workspace)):\n # If there's no capability requirement then this session can handle the view\n if capability is None:\n return True\n sv = self.session_view_for_view_async(view)\n if sv:\n return sv.has_capability_async(capability)\n else:\n return self.has_capability(capability)\n return False\n\n def has_capability(self, capability: str) -> bool:\n value = self.get_capability(capability)\n return value is not False and value is not None\n\n def get_capability(self, capability: str) -> Optional[Any]:\n return self.capabilities.get(capability)\n\n def should_notify_did_open(self) -> bool:\n return self.capabilities.should_notify_did_open()\n\n def text_sync_kind(self) -> int:\n return self.capabilities.text_sync_kind()\n\n def should_notify_did_change(self) -> bool:\n return self.capabilities.should_notify_did_change()\n\n def should_notify_did_change_workspace_folders(self) -> bool:\n return self.capabilities.should_notify_did_change_workspace_folders()\n\n def should_notify_will_save(self) -> bool:\n return self.capabilities.should_notify_will_save()\n\n def should_notify_did_save(self) -> Tuple[bool, bool]:\n return self.capabilities.should_notify_did_save()\n\n def should_notify_did_close(self) -> bool:\n return self.capabilities.should_notify_did_close()\n\n # --- misc methods -------------------------------------------------------------------------------------------------\n\n def handles_path(self, file_path: Optional[str], inside_workspace: bool) -> bool:\n if self._supports_workspace_folders():\n # A workspace-aware language server handles any path, both inside and outside the workspaces.\n return True\n # If we end up here then the language server is workspace-unaware. This means there can be more than one\n # language server with the same config name. So we have to actually do the subpath checks.\n if not file_path:\n return False\n if not self._workspace_folders or not inside_workspace:\n return True\n for folder in self._workspace_folders:\n if is_subpath_of(file_path, folder.path):\n return True\n return False\n\n def update_folders(self, folders: List[WorkspaceFolder]) -> None:\n if self.should_notify_did_change_workspace_folders():\n added, removed = diff(self._workspace_folders, folders)\n if added or removed:\n params = {\n \"event\": {\n \"added\": [a.to_lsp() for a in added],\n \"removed\": [r.to_lsp() for r in removed]\n }\n }\n self.send_notification(Notification.didChangeWorkspaceFolders(params))\n if self._supports_workspace_folders():\n self._workspace_folders = folders\n else:\n self._workspace_folders = folders[:1]\n\n def initialize_async(self, variables: Dict[str, str], transport: Transport, init_callback: InitCallback) -> None:\n self.transport = transport\n params = get_initialize_params(variables, self._workspace_folders, self.config)\n self._init_callback = init_callback\n self.send_request_async(\n Request.initialize(params), self._handle_initialize_success, self._handle_initialize_error)\n\n def _handle_initialize_success(self, result: Any) -> None:\n self.capabilities.assign(result.get('capabilities', dict()))\n if self._workspace_folders and not self._supports_workspace_folders():\n self._workspace_folders = self._workspace_folders[:1]\n self.state = ClientStates.READY\n if self._plugin_class is not None:\n self._plugin = self._plugin_class(weakref.ref(self))\n self.send_notification(Notification.initialized())\n self._maybe_send_did_change_configuration()\n execute_commands = self.get_capability('executeCommandProvider.commands')\n if execute_commands:\n debug(\"{}: Supported execute commands: {}\".format(self.config.name, execute_commands))\n code_action_kinds = self.get_capability('codeActionProvider.codeActionKinds')\n if code_action_kinds:\n debug('{}: supported code action kinds: {}'.format(self.config.name, code_action_kinds))\n if self._init_callback:\n self._init_callback(self, False)\n self._init_callback = None\n\n def _handle_initialize_error(self, result: Any) -> None:\n self._initialize_error = (result.get('code', -1), Exception(result.get('message', 'Error initializing server')))\n # Init callback called after transport is closed to avoid pre-mature GC of Session.\n self.end_async()\n\n def call_manager(self, method: str, *args: Any) -> None:\n mgr = self.manager()\n if mgr:\n getattr(mgr, method)(*args)\n\n def clear_diagnostics_async(self) -> None:\n # XXX: Remove this functionality?\n for sb in self.session_buffers_async():\n sb.on_diagnostics_async([], None)\n\n def on_stderr_message(self, message: str) -> None:\n self.call_manager('handle_stderr_log', self, message)\n self._logger.stderr_message(message)\n\n def _supports_workspace_folders(self) -> bool:\n return self.has_capability(\"workspace.workspaceFolders.supported\")\n\n def _maybe_send_did_change_configuration(self) -> None:\n if self.config.settings:\n self.send_notification(did_change_configuration(self.config.settings, self._template_variables()))\n\n def _template_variables(self) -> Dict[str, str]:\n variables = extract_variables(self.window)\n if self._plugin_class is not None:\n extra_vars = self._plugin_class.additional_variables()\n if extra_vars:\n variables.update(extra_vars)\n return variables\n\n def run_command(self, command: Mapping[str, Any]) -> Promise:\n \"\"\"Run a command from any thread. Your .then() continuations will run in Sublime's worker thread.\"\"\"\n if self._plugin:\n promise, callback = Promise.packaged_task()\n if self._plugin.on_pre_server_command(command, callback):\n return promise\n # TODO: Our Promise class should be able to handle errors/exceptions\n return Promise(\n lambda resolve: self.send_request(\n Request.executeCommand(command),\n resolve,\n lambda err: resolve(Error(err[\"code\"], err[\"message\"], err.get(\"data\")))\n )\n )\n\n def run_code_action_async(self, code_action: Mapping[str, Any]) -> Promise:\n command = code_action.get(\"command\")\n if isinstance(command, str):\n # This is actually a command.\n return self.run_command(code_action)\n # At this point it cannot be a command anymore, it has to be a proper code action.\n # A code action can have an edit and/or command. Note that it can have *both*. In case both are present, we\n # must apply the edits before running the command.\n edit = code_action.get(\"edit\")\n promise = self._apply_workspace_edit_async(edit) if edit else Promise.resolve()\n return promise.then(lambda _: self.run_command(command) if isinstance(command, dict) else Promise.resolve())\n\n def _apply_workspace_edit_async(self, edit: Any) -> Promise:\n \"\"\"\n Apply workspace edits, and return a promise that resolves on the async thread again after the edits have been\n applied.\n \"\"\"\n changes = parse_workspace_edit(edit)\n return Promise.on_main_thread() \\\n .then(lambda _: apply_workspace_edit(self.window, changes)) \\\n .then(Promise.on_async_thread)\n\n # --- server request handlers --------------------------------------------------------------------------------------\n\n def m_window_showMessageRequest(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the window/showMessageRequest request\"\"\"\n self.call_manager('handle_message_request', self, params, request_id)\n\n def m_window_showMessage(self, params: Any) -> None:\n \"\"\"handles the window/showMessage notification\"\"\"\n self.call_manager('handle_show_message', self, params)\n\n def m_window_logMessage(self, params: Any) -> None:\n \"\"\"handles the window/logMessage notification\"\"\"\n self.call_manager('handle_log_message', self, params)\n\n def m_workspace_workspaceFolders(self, _: Any, request_id: Any) -> None:\n \"\"\"handles the workspace/workspaceFolders request\"\"\"\n self.send_response(Response(request_id, [wf.to_lsp() for wf in self._workspace_folders]))\n\n def m_workspace_configuration(self, params: Dict[str, Any], request_id: Any) -> None:\n \"\"\"handles the workspace/configuration request\"\"\"\n items = [] # type: List[Any]\n requested_items = params.get(\"items\") or []\n for requested_item in requested_items:\n configuration = self.config.settings.copy(requested_item.get('section') or None)\n if self._plugin:\n self._plugin.on_workspace_configuration(requested_item, configuration)\n items.append(configuration)\n self.send_response(Response(request_id, sublime.expand_variables(items, self._template_variables())))\n\n def m_workspace_applyEdit(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the workspace/applyEdit request\"\"\"\n self._apply_workspace_edit_async(params.get('edit', {})).then(\n lambda _: self.send_response(Response(request_id, {\"applied\": True})))\n\n def m_textDocument_publishDiagnostics(self, params: Any) -> None:\n \"\"\"handles the textDocument/publishDiagnostics notification\"\"\"\n uri = params[\"uri\"]\n sb = self.get_session_buffer_for_uri_async(uri)\n if sb:\n sb.on_diagnostics_async(params[\"diagnostics\"], params.get(\"version\"))\n\n def m_client_registerCapability(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the client/registerCapability request\"\"\"\n registrations = params[\"registrations\"]\n for registration in registrations:\n registration_id = registration[\"id\"]\n capability_path, registration_path = method_to_capability(registration[\"method\"])\n debug(\"{}: registering capability:\".format(self.config.name), capability_path)\n options = registration.get(\"registerOptions\") # type: Optional[Dict[str, Any]]\n if not isinstance(options, dict):\n options = {}\n data = _RegistrationData(registration_id, capability_path, registration_path, options)\n self._registrations[registration_id] = data\n if data.selector:\n # The registration is applicable only to certain buffers, so let's check which buffers apply.\n for sb in self.session_buffers_async():\n data.check_applicable(sb)\n else:\n # The registration applies globally to all buffers.\n self.capabilities.register(registration_id, capability_path, registration_path, options)\n self.send_response(Response(request_id, None))\n\n def m_client_unregisterCapability(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the client/unregisterCapability request\"\"\"\n unregistrations = params[\"unregisterations\"] # typo in the official specification\n for unregistration in unregistrations:\n registration_id = unregistration[\"id\"]\n capability_path, registration_path = method_to_capability(unregistration[\"method\"])\n debug(\"{}: unregistering capability:\".format(self.config.name), capability_path)\n data = self._registrations.pop(registration_id, None)\n if not data:\n message = \"no registration data found for registration ID {}\".format(registration_id)\n self.send_error_response(request_id, Error(ErrorCode.InvalidParams, message))\n return\n elif not data.selector:\n self.capabilities.unregister(registration_id, capability_path, registration_path)\n self.send_response(Response(request_id, None))\n\n def m_window_workDoneProgress_create(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the window/workDoneProgress/create request\"\"\"\n self._progress[params['token']] = dict()\n self.send_response(Response(request_id, None))\n\n def _progress_status_key(self, token: str) -> str:\n return \"lspprogress{}{}\".format(self.config.name, token)\n\n def m___progress(self, params: Any) -> None:\n \"\"\"handles the $/progress notification\"\"\"\n token = params['token']\n data = self._progress.get(token)\n if not isinstance(data, dict):\n debug('unknown $/progress token: {}'.format(token))\n return\n value = params['value']\n kind = value['kind']\n key = self._progress_status_key(token)\n if kind == 'begin':\n data['title'] = value['title'] # mandatory\n data['message'] = value.get('message') # optional\n progress_string = self._progress_string(data, value)\n for sv in self.session_views_async():\n sv.view.set_status(key, progress_string)\n elif kind == 'report':\n progress_string = self._progress_string(data, value)\n for sv in self.session_views_async():\n sv.view.set_status(key, progress_string)\n elif kind == 'end':\n message = value.get('message')\n if message:\n self.window.status_message(data['title'] + ': ' + message)\n for sv in self.session_views_async():\n sv.view.erase_status(key)\n self._progress.pop(token, None)\n\n def _progress_string(self, data: Dict[str, Any], value: Dict[str, Any]) -> str:\n status_msg = data['title']\n progress_message = value.get('message') # optional\n progress_percentage = value.get('percentage') # optional\n if progress_message:\n data['message'] = progress_message\n status_msg += ': ' + progress_message\n elif data['message']: # reuse last known message if not present\n status_msg += ': ' + data['message']\n if progress_percentage:\n fmt = ' ({:.1f}%)' if isinstance(progress_percentage, float) else ' ({}%)'\n status_msg += fmt.format(progress_percentage)\n return status_msg\n\n # --- shutdown dance -----------------------------------------------------------------------------------------------\n\n def end_async(self) -> None:\n # TODO: Ensure this function is called only from the async thread\n if self.exiting:\n return\n self.exiting = True\n self._plugin = None\n for sv in self.session_views_async():\n sv.shutdown_async()\n self.capabilities.clear()\n self._registrations.clear()\n self.state = ClientStates.STOPPING\n self.send_request_async(Request.shutdown(), self._handle_shutdown_result, self._handle_shutdown_result)\n\n def _handle_shutdown_result(self, _: Any) -> None:\n self.exit()\n\n def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:\n self.exiting = True\n self.state = ClientStates.STOPPING\n self.transport = None\n self._response_handlers.clear()\n if self._initialize_error:\n # Override potential exit error with a saved one.\n exit_code, exception = self._initialize_error\n mgr = self.manager()\n if mgr:\n if self._init_callback:\n self._init_callback(self, True)\n self._init_callback = None\n mgr.on_post_exit_async(self, exit_code, exception)\n\n # --- RPC message handling ----------------------------------------------------------------------------------------\n\n def send_request_async(\n self,\n request: Request,\n on_result: Callable[[Any], None],\n on_error: Optional[Callable[[Any], None]] = None\n ) -> None:\n \"\"\"You must call this method from Sublime's worker thread. Callbacks will run in Sublime's worker thread.\"\"\"\n self.request_id += 1\n request_id = self.request_id\n self._response_handlers[request_id] = (request, on_result, on_error)\n if request.view:\n sv = self.session_view_for_view_async(request.view)\n if sv:\n sv.on_request_started_async(request_id, request)\n else:\n # This is a workspace or window request\n for sv in self.session_views_async():\n sv.on_request_started_async(request_id, request)\n self._logger.outgoing_request(request_id, request.method, request.params)\n self.send_payload(request.to_payload(request_id))\n\n def send_request(\n self,\n request: Request,\n on_result: Callable[[Any], None],\n on_error: Optional[Callable[[Any], None]] = None,\n ) -> None:\n \"\"\"You can call this method from any thread. Callbacks will run in Sublime's worker thread.\"\"\"\n sublime.set_timeout_async(functools.partial(self.send_request_async, request, on_result, on_error))\n\n def send_notification(self, notification: Notification) -> None:\n self._logger.outgoing_notification(notification.method, notification.params)\n self.send_payload(notification.to_payload())\n\n def send_response(self, response: Response) -> None:\n self._logger.outgoing_response(response.request_id, response.result)\n self.send_payload(response.to_payload())\n\n def send_error_response(self, request_id: Any, error: Error) -> None:\n self._logger.outgoing_error_response(request_id, error)\n self.send_payload({'jsonrpc': '2.0', 'id': request_id, 'error': error.to_lsp()})\n\n def exit(self) -> None:\n self.send_notification(Notification.exit())\n try:\n self.transport.close() # type: ignore\n except AttributeError:\n pass\n\n def send_payload(self, payload: Dict[str, Any]) -> None:\n try:\n self.transport.send(payload) # type: ignore\n except AttributeError:\n pass\n\n def deduce_payload(\n self,\n payload: Dict[str, Any]\n ) -> Tuple[Optional[Callable], Any, Optional[int], Optional[str], Optional[str]]:\n if \"method\" in payload:\n method = payload[\"method\"]\n handler = self._get_handler(method)\n result = payload.get(\"params\")\n if \"id\" in payload:\n req_id = payload[\"id\"]\n self._logger.incoming_request(req_id, method, result)\n if handler is None:\n self.send_error_response(req_id, Error(ErrorCode.MethodNotFound, method))\n else:\n tup = (handler, result, req_id, \"request\", method)\n return tup\n else:\n res = (handler, result, None, \"notification\", method)\n self._logger.incoming_notification(method, result, res[0] is None)\n return res\n elif \"id\" in payload:\n response_id = int(payload[\"id\"])\n handler, result, is_error = self.response_handler(response_id, payload)\n response_tuple = (handler, result, None, None, None)\n self._logger.incoming_response(response_id, result, is_error)\n return response_tuple\n else:\n debug(\"Unknown payload type: \", payload)\n return (None, None, None, None, None)\n\n def on_payload(self, payload: Dict[str, Any]) -> None:\n handler, result, req_id, typestr, method = self.deduce_payload(payload)\n if handler:\n try:\n if req_id is None:\n # notification or response\n handler(result)\n else:\n # request\n try:\n handler(result, req_id)\n except Error as err:\n self.send_error_response(req_id, err)\n except Exception as ex:\n self.send_error_response(req_id, Error.from_exception(ex))\n raise\n except Exception as err:\n exception_log(\"Error handling {}\".format(typestr), err)\n\n def response_handler(self, response_id: int, response: Dict[str, Any]) -> Tuple[Optional[Callable], Any, bool]:\n request, handler, error_handler = self._response_handlers.pop(response_id, (None, None, None))\n if not request:\n error = {\"code\": ErrorCode.InvalidParams, \"message\": \"unknown response ID {}\".format(response_id)}\n return (print_to_status_bar, error, True)\n if request.view:\n sv = self.session_view_for_view_async(request.view)\n if sv:\n sv.on_request_finished_async(response_id)\n else:\n for sv in self.session_views_async():\n sv.on_request_finished_async(response_id)\n if \"result\" in response and \"error\" not in response:\n return (handler, response[\"result\"], False)\n if not error_handler:\n error_handler = print_to_status_bar\n if \"result\" not in response and \"error\" in response:\n error = response[\"error\"]\n else:\n error = {\"code\": ErrorCode.InvalidParams, \"message\": \"invalid response payload\"}\n return (error_handler, error, True)\n\n def _get_handler(self, method: str) -> Optional[Callable]:\n return getattr(self, method2attr(method), None)\n", "path": "plugin/core/sessions.py" } ]
[ { "content": "from .edit import apply_workspace_edit\nfrom .edit import parse_workspace_edit\nfrom .logging import debug\nfrom .logging import exception_log\nfrom .promise import Promise\nfrom .protocol import CompletionItemTag\nfrom .protocol import Error\nfrom .protocol import ErrorCode\nfrom .protocol import Notification\nfrom .protocol import Request\nfrom .protocol import Response\nfrom .protocol import WorkspaceFolder\nfrom .settings import client_configs\nfrom .transports import Transport\nfrom .transports import TransportCallbacks\nfrom .types import Capabilities\nfrom .types import ClientConfig\nfrom .types import ClientStates\nfrom .types import debounced\nfrom .types import diff\nfrom .types import DocumentSelector\nfrom .types import method_to_capability\nfrom .typing import Callable, Dict, Any, Optional, List, Tuple, Generator, Type, Protocol, Mapping\nfrom .url import uri_to_filename\nfrom .version import __version__\nfrom .views import COMPLETION_KINDS\nfrom .views import did_change_configuration\nfrom .views import extract_variables\nfrom .views import get_storage_path\nfrom .views import SYMBOL_KINDS\nfrom .workspace import is_subpath_of\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom weakref import WeakSet\nimport functools\nimport os\nimport sublime\nimport weakref\n\n\nInitCallback = Callable[['Session', bool], None]\n\n\nclass Manager(metaclass=ABCMeta):\n \"\"\"\n A Manager is a container of Sessions.\n \"\"\"\n\n # Observers\n\n @abstractmethod\n def window(self) -> sublime.Window:\n \"\"\"\n Get the window associated with this manager.\n \"\"\"\n pass\n\n @abstractmethod\n def sessions(self, view: sublime.View, capability: Optional[str] = None) -> 'Generator[Session, None, None]':\n \"\"\"\n Iterate over the sessions stored in this manager, applicable to the given view, with the given capability.\n \"\"\"\n pass\n\n @abstractmethod\n def get_project_path(self, file_path: str) -> Optional[str]:\n \"\"\"\n Get the project path for the given file.\n \"\"\"\n pass\n\n # Mutators\n\n @abstractmethod\n def start_async(self, configuration: ClientConfig, initiating_view: sublime.View) -> None:\n \"\"\"\n Start a new Session with the given configuration. The initiating view is the view that caused this method to\n be called.\n\n A normal flow of calls would be start -> on_post_initialize -> do language server things -> on_post_exit.\n However, it is possible that the subprocess cannot start, in which case on_post_initialize will never be called.\n \"\"\"\n pass\n\n @abstractmethod\n def update_diagnostics_panel_async(self) -> None:\n pass\n\n @abstractmethod\n def show_diagnostics_panel_async(self) -> None:\n pass\n\n @abstractmethod\n def hide_diagnostics_panel_async(self) -> None:\n pass\n\n # Event callbacks\n\n @abstractmethod\n def on_post_exit_async(self, session: 'Session', exit_code: int, exception: Optional[Exception]) -> None:\n \"\"\"\n The given Session has stopped with the given exit code.\n \"\"\"\n pass\n\n\ndef get_initialize_params(variables: Dict[str, str], workspace_folders: List[WorkspaceFolder],\n config: ClientConfig) -> dict:\n completion_kinds = list(range(1, len(COMPLETION_KINDS) + 1))\n symbol_kinds = list(range(1, len(SYMBOL_KINDS) + 1))\n completion_tag_value_set = [v for k, v in CompletionItemTag.__dict__.items() if not k.startswith('_')]\n first_folder = workspace_folders[0] if workspace_folders else None\n capabilities = {\n \"textDocument\": {\n \"synchronization\": {\n \"dynamicRegistration\": True, # exceptional\n \"didSave\": True,\n \"willSave\": True,\n \"willSaveWaitUntil\": True\n },\n \"hover\": {\n \"dynamicRegistration\": True,\n \"contentFormat\": [\"markdown\", \"plaintext\"]\n },\n \"completion\": {\n \"dynamicRegistration\": True,\n \"completionItem\": {\n \"snippetSupport\": True,\n \"deprecatedSupport\": True,\n \"documentationFormat\": [\"markdown\", \"plaintext\"],\n \"tagSupport\": {\n \"valueSet\": completion_tag_value_set\n }\n },\n \"completionItemKind\": {\n \"valueSet\": completion_kinds\n }\n },\n \"signatureHelp\": {\n \"dynamicRegistration\": True,\n \"signatureInformation\": {\n \"documentationFormat\": [\"markdown\", \"plaintext\"],\n \"parameterInformation\": {\n \"labelOffsetSupport\": True\n }\n }\n },\n \"references\": {\n \"dynamicRegistration\": True\n },\n \"documentHighlight\": {\n \"dynamicRegistration\": True\n },\n \"documentSymbol\": {\n \"dynamicRegistration\": True,\n \"hierarchicalDocumentSymbolSupport\": True,\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n },\n \"formatting\": {\n \"dynamicRegistration\": True # exceptional\n },\n \"rangeFormatting\": {\n \"dynamicRegistration\": True\n },\n \"declaration\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"definition\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"typeDefinition\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"implementation\": {\n \"dynamicRegistration\": True,\n \"linkSupport\": True\n },\n \"codeAction\": {\n \"dynamicRegistration\": True,\n \"codeActionLiteralSupport\": {\n \"codeActionKind\": {\n \"valueSet\": [\n \"quickfix\",\n \"refactor\",\n \"refactor.extract\",\n \"refactor.inline\",\n \"refactor.rewrite\",\n \"source.organizeImports\"\n ]\n }\n }\n },\n \"rename\": {\n \"dynamicRegistration\": True,\n \"prepareSupport\": True\n },\n \"colorProvider\": {\n \"dynamicRegistration\": True # exceptional\n },\n \"publishDiagnostics\": {\n \"relatedInformation\": True\n },\n \"selectionRange\": {\n \"dynamicRegistration\": True\n }\n },\n \"workspace\": {\n \"applyEdit\": True,\n \"didChangeConfiguration\": {\n \"dynamicRegistration\": True\n },\n \"executeCommand\": {},\n \"workspaceEdit\": {\n \"documentChanges\": True,\n \"failureHandling\": \"abort\",\n },\n \"workspaceFolders\": True,\n \"symbol\": {\n \"dynamicRegistration\": True, # exceptional\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n },\n \"configuration\": True\n },\n \"window\": {\n \"showMessage\": {\n \"messageActionItem\": {\n \"additionalPropertiesSupport\": True\n }\n },\n \"workDoneProgress\": True\n }\n }\n if config.experimental_capabilities is not None:\n capabilities['experimental'] = config.experimental_capabilities\n return {\n \"processId\": os.getpid(),\n \"clientInfo\": {\n \"name\": \"Sublime Text LSP\",\n \"version\": \".\".join(map(str, __version__))\n },\n \"rootUri\": first_folder.uri() if first_folder else None,\n \"rootPath\": first_folder.path if first_folder else None,\n \"workspaceFolders\": [folder.to_lsp() for folder in workspace_folders] if workspace_folders else None,\n \"capabilities\": capabilities,\n \"initializationOptions\": sublime.expand_variables(config.init_options.get(), variables)\n }\n\n\nclass SessionViewProtocol(Protocol):\n\n session = None # type: Session\n view = None # type: sublime.View\n listener = None # type: Any\n session_buffer = None # type: Any\n\n def on_capability_added_async(self, capability_path: str, options: Dict[str, Any]) -> None:\n ...\n\n def on_capability_removed_async(self, discarded_capabilities: Dict[str, Any]) -> None:\n ...\n\n def has_capability_async(self, capability_path: str) -> bool:\n ...\n\n def shutdown_async(self) -> None:\n ...\n\n def present_diagnostics_async(self, flags: int) -> None:\n ...\n\n def on_request_started_async(self, request_id: int, request: Request) -> None:\n ...\n\n def on_request_finished_async(self, request_id: int) -> None:\n ...\n\n\nclass SessionBufferProtocol(Protocol):\n\n session = None # type: Session\n session_views = None # type: WeakSet[SessionViewProtocol]\n file_name = None # type: str\n language_id = None # type: str\n\n def register_capability_async(\n self,\n registration_id: str,\n capability_path: str,\n registration_path: str,\n options: Dict[str, Any]\n ) -> None:\n ...\n\n def unregister_capability_async(\n self,\n registration_id: str,\n capability_path: str,\n registration_path: str\n ) -> None:\n ...\n\n def on_diagnostics_async(self, diagnostics: List[Dict[str, Any]], version: Optional[int]) -> None:\n ...\n\n\nclass AbstractPlugin(metaclass=ABCMeta):\n \"\"\"\n Inherit from this class to handle non-standard requests and notifications.\n Given a request/notification, replace the non-alphabetic characters with an underscore, and prepend it with \"m_\".\n This will be the name of your method.\n For instance, to implement the non-standard eslint/openDoc request, define the Python method\n\n def m_eslint_openDoc(self, params, request_id):\n session = self.weaksession()\n if session:\n webbrowser.open_tab(params['url'])\n session.send_response(Response(request_id, None))\n\n To handle the non-standard eslint/status notification, define the Python method\n\n def m_eslint_status(self, params):\n pass\n\n To understand how this works, see the __getattr__ method of the Session class.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def name(cls) -> str:\n \"\"\"\n A human-friendly name. If your plugin is called \"LSP-foobar\", then this should return \"foobar\". If you also\n have your settings file called \"LSP-foobar.sublime-settings\", then you don't even need to re-implement the\n configuration method (see below).\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def configuration(cls) -> Tuple[sublime.Settings, str]:\n \"\"\"\n Return the Settings object that defines the \"command\", \"languages\", and optionally the \"initializationOptions\",\n \"default_settings\", \"env\" and \"tcp_port\" as the first element in the tuple, and the path to the base settings\n filename as the second element in the tuple.\n\n The second element in the tuple is used to handle \"settings\" overrides from users properly. For example, if your\n plugin is called LSP-foobar, you would return \"Packages/LSP-foobar/LSP-foobar.sublime-settings\".\n\n The \"command\", \"initializationOptions\" and \"env\" are subject to template string substitution. The following\n template strings are recognized:\n\n $file\n $file_base_name\n $file_extension\n $file_name\n $file_path\n $platform\n $project\n $project_base_name\n $project_extension\n $project_name\n $project_path\n\n These are just the values from window.extract_variables(). Additionally,\n\n $storage_path The path to the package storage (see AbstractPlugin.storage_path)\n $cache_path sublime.cache_path()\n $temp_dir tempfile.gettempdir()\n $home os.path.expanduser('~')\n $port A random free TCP-port on localhost in case \"tcp_port\" is set to 0. This string template can only\n be used in the \"command\"\n\n The \"command\" and \"env\" are expanded upon starting the subprocess of the Session. The \"initializationOptions\"\n are expanded upon doing the initialize request. \"initializationOptions\" does not expand $port.\n\n When you're managing your own server binary, you would typically place it in sublime.cache_path(). So your\n \"command\" should look like this: \"command\": [\"$cache_path/LSP-foobar/server_binary\", \"--stdio\"]\n \"\"\"\n name = cls.name()\n basename = \"LSP-{}.sublime-settings\".format(name)\n filepath = \"Packages/LSP-{}/{}\".format(name, basename)\n return sublime.load_settings(basename), filepath\n\n @classmethod\n def additional_variables(cls) -> Optional[Dict[str, str]]:\n \"\"\"\n In addition to the above variables, add more variables here to be expanded.\n \"\"\"\n return None\n\n @classmethod\n def storage_path(cls) -> str:\n \"\"\"\n The storage path. Use this as your base directory to install server files. Its path is '$DATA/Package Storage'.\n You should have an additional subdirectory preferrably the same name as your plugin. For instance:\n\n ```python\n from LSP.plugin import AbstractPlugin\n import os\n\n\n class MyPlugin(AbstractPlugin):\n\n @classmethod\n def name(cls) -> str:\n return \"my-plugin\"\n\n @classmethod\n def basedir(cls) -> str:\n # Do everything relative to this directory\n return os.path.join(cls.storage_path(), cls.name())\n ```\n \"\"\"\n return get_storage_path()\n\n @classmethod\n def needs_update_or_installation(cls) -> bool:\n \"\"\"\n If this plugin manages its own server binary, then this is the place to check whether the binary needs\n an update, or whether it needs to be installed before starting the language server.\n \"\"\"\n return False\n\n @classmethod\n def install_or_update(cls) -> None:\n \"\"\"\n Do the actual update/installation of the server binary. This runs in a separate thread, so don't spawn threads\n yourself here.\n \"\"\"\n pass\n\n @classmethod\n def can_start(cls, window: sublime.Window, initiating_view: sublime.View,\n workspace_folders: List[WorkspaceFolder], configuration: ClientConfig) -> Optional[str]:\n \"\"\"\n Determines ability to start. This is called after needs_update_or_installation and after install_or_update.\n So you may assume that if you're managing your server binary, then it is already installed when this\n classmethod is called.\n\n :param window: The window\n :param initiating_view: The initiating view\n :param workspace_folders: The workspace folders\n :param configuration: The configuration\n\n :returns: A string describing the reason why we should not start a language server session, or None if we\n should go ahead and start a session.\n \"\"\"\n return None\n\n def __init__(self, weaksession: 'weakref.ref[Session]') -> None:\n \"\"\"\n Constructs a new instance.\n\n :param weaksession: A weak reference to the Session. You can grab a strong reference through\n self.weaksession(), but don't hold on to that reference.\n \"\"\"\n self.weaksession = weaksession\n\n def on_workspace_configuration(self, params: Dict, configuration: Any) -> None:\n \"\"\"\n Override to augment configuration returned for the workspace/configuration request.\n\n :param params: A ConfigurationItem for which configuration is requested.\n :param configuration: The resolved configuration for given params.\n \"\"\"\n pass\n\n def on_pre_server_command(self, command: Mapping[str, Any], done_callback: Callable[[], None]) -> bool:\n \"\"\"\n Intercept a command that is about to be sent to the language server.\n\n :param command: The payload containing a \"command\" and optionally \"arguments\".\n :param done_callback: The callback that you promise to invoke when you return true.\n\n :returns: True if *YOU* will handle this command plugin-side, false otherwise. You must invoke the\n passed `done_callback` when you're done.\n \"\"\"\n return False\n\n\n_plugins = {} # type: Dict[str, Type[AbstractPlugin]]\n\n\ndef _register_plugin_impl(plugin: Type[AbstractPlugin], notify_listener: bool) -> None:\n global _plugins\n name = plugin.name()\n try:\n settings, base_file = plugin.configuration()\n if client_configs.add_external_config(name, settings, base_file, notify_listener):\n _plugins[name] = plugin\n except Exception as ex:\n exception_log('Failed to register plugin \"{}\"'.format(name), ex)\n\n\ndef register_plugin(plugin: Type[AbstractPlugin], notify_listener: bool = True) -> None:\n \"\"\"\n Register an LSP plugin in LSP.\n\n You should put a call to this function in your `plugin_loaded` callback. This way, when your package is disabled\n by a user and then re-enabled again by a user, the changes in state are picked up by LSP, and your language server\n will start for the relevant views.\n\n While your helper package may still work without calling `register_plugin` in `plugin_loaded`, the user will have a\n better experience when you do call this function.\n\n Your implementation should look something like this:\n\n ```python\n from LSP.plugin import register_plugin\n from LSP.plugin import unregister_plugin\n from LSP.plugin import AbstractPlugin\n\n\n class MyPlugin(AbstractPlugin):\n ...\n\n\n def plugin_loaded():\n register_plugin(MyPlugin)\n\n def plugin_unloaded():\n unregister_plugin(MyPlugin)\n ```\n\n If you need to install supplementary files (e.g. javascript source code that implements the actual server), do so\n in `AbstractPlugin.install_or_update` in a blocking manner, without the use of Python's `threading` module.\n \"\"\"\n if notify_listener:\n # There is a bug in Sublime Text's `plugin_loaded` callback. When the package is in the list of\n # `\"ignored_packages\"` in Packages/User/Preferences.sublime-settings, and then removed from that list, the\n # sublime.Settings object has missing keys/values. To circumvent this, we run the actual registration one tick\n # later. At that point, the settings object is fully loaded. At least, it seems that way. For more context,\n # see https://github.com/sublimehq/sublime_text/issues/3379\n # and https://github.com/sublimehq/sublime_text/issues/2099\n sublime.set_timeout(lambda: _register_plugin_impl(plugin, notify_listener))\n else:\n _register_plugin_impl(plugin, notify_listener)\n\n\ndef unregister_plugin(plugin: Type[AbstractPlugin]) -> None:\n \"\"\"\n Unregister an LSP plugin in LSP.\n\n You should put a call to this function in your `plugin_unloaded` callback. this way, when your package is disabled\n by a user, your language server is shut down for the views that it is attached to. This results in a good user\n experience.\n \"\"\"\n global _plugins\n name = plugin.name()\n try:\n _plugins.pop(name, None)\n client_configs.remove_external_config(name)\n except Exception as ex:\n exception_log('Failed to unregister plugin \"{}\"'.format(name), ex)\n\n\ndef get_plugin(name: str) -> Optional[Type[AbstractPlugin]]:\n global _plugins\n return _plugins.get(name, None)\n\n\nclass Logger(metaclass=ABCMeta):\n\n @abstractmethod\n def stderr_message(self, message: str) -> None:\n pass\n\n @abstractmethod\n def outgoing_response(self, request_id: Any, params: Any) -> None:\n pass\n\n @abstractmethod\n def outgoing_error_response(self, request_id: Any, error: Error) -> None:\n pass\n\n @abstractmethod\n def outgoing_request(self, request_id: int, method: str, params: Any) -> None:\n pass\n\n @abstractmethod\n def outgoing_notification(self, method: str, params: Any) -> None:\n pass\n\n @abstractmethod\n def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:\n pass\n\n @abstractmethod\n def incoming_request(self, request_id: Any, method: str, params: Any) -> None:\n pass\n\n @abstractmethod\n def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:\n pass\n\n\ndef print_to_status_bar(error: Dict[str, Any]) -> None:\n sublime.status_message(error[\"message\"])\n\n\ndef method2attr(method: str) -> str:\n # window/messageRequest -> m_window_messageRequest\n # $/progress -> m___progress\n # client/registerCapability -> m_client_registerCapability\n return 'm_' + ''.join(map(lambda c: c if c.isalpha() else '_', method))\n\n\nclass _RegistrationData:\n\n __slots__ = (\"registration_id\", \"capability_path\", \"registration_path\", \"options\", \"session_buffers\", \"selector\")\n\n def __init__(\n self,\n registration_id: str,\n capability_path: str,\n registration_path: str,\n options: Dict[str, Any]\n ) -> None:\n self.registration_id = registration_id\n self.registration_path = registration_path\n self.capability_path = capability_path\n document_selector = options.pop(\"documentSelector\", None)\n if not isinstance(document_selector, list):\n document_selector = []\n self.selector = DocumentSelector(document_selector)\n self.options = options\n self.session_buffers = WeakSet() # type: WeakSet[SessionBufferProtocol]\n\n def __del__(self) -> None:\n for sb in self.session_buffers:\n sb.unregister_capability_async(self.registration_id, self.capability_path, self.registration_path)\n\n def check_applicable(self, sb: SessionBufferProtocol) -> None:\n for sv in sb.session_views:\n if self.selector.matches(sv.view):\n self.session_buffers.add(sb)\n sb.register_capability_async(\n self.registration_id, self.capability_path, self.registration_path, self.options)\n return\n\n\nclass Session(TransportCallbacks):\n\n def __init__(self, manager: Manager, logger: Logger, workspace_folders: List[WorkspaceFolder],\n config: ClientConfig, plugin_class: Optional[Type[AbstractPlugin]]) -> None:\n self.transport = None # type: Optional[Transport]\n self.request_id = 0 # Our request IDs are always integers.\n self._logger = logger\n self._response_handlers = {} # type: Dict[int, Tuple[Request, Callable, Optional[Callable[[Any], None]]]]\n self.config = config\n self.manager = weakref.ref(manager)\n self.window = manager.window()\n self.state = ClientStates.STARTING\n self.capabilities = Capabilities()\n self.exiting = False\n self._registrations = {} # type: Dict[str, _RegistrationData]\n self._init_callback = None # type: Optional[InitCallback]\n self._initialize_error = None # type: Optional[Tuple[int, Optional[Exception]]]\n self._views_opened = 0\n self._workspace_folders = workspace_folders\n self._session_views = WeakSet() # type: WeakSet[SessionViewProtocol]\n self._session_buffers = WeakSet() # type: WeakSet[SessionBufferProtocol]\n self._progress = {} # type: Dict[Any, Dict[str, str]]\n self._plugin_class = plugin_class\n self._plugin = None # type: Optional[AbstractPlugin]\n\n def __del__(self) -> None:\n debug(self.config.command, \"ended\")\n for token in self._progress.keys():\n key = self._progress_status_key(token)\n for sv in self.session_views_async():\n if sv.view.is_valid():\n sv.view.erase_status(key)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"\n If we don't have a request/notification handler, look up the request/notification handler in the plugin.\n \"\"\"\n if name.startswith('m_'):\n attr = getattr(self._plugin, name)\n if attr is not None:\n return attr\n raise AttributeError(name)\n\n # TODO: Create an assurance that the API doesn't change here as it can be used by plugins.\n def get_workspace_folders(self) -> List[WorkspaceFolder]:\n return self._workspace_folders\n\n # --- session view management --------------------------------------------------------------------------------------\n\n def register_session_view_async(self, sv: SessionViewProtocol) -> None:\n self._session_views.add(sv)\n self._views_opened += 1\n\n def unregister_session_view_async(self, sv: SessionViewProtocol) -> None:\n self._session_views.discard(sv)\n if not self._session_views:\n current_count = self._views_opened\n debounced(self.end_async, 3000, lambda: self._views_opened == current_count, async_thread=True)\n\n def session_views_async(self) -> Generator[SessionViewProtocol, None, None]:\n \"\"\"\n It is only safe to iterate over this in the async thread\n \"\"\"\n yield from self._session_views\n\n def session_view_for_view_async(self, view: sublime.View) -> Optional[SessionViewProtocol]:\n for sv in self.session_views_async():\n if sv.view == view:\n return sv\n return None\n\n # --- session buffer management ------------------------------------------------------------------------------------\n\n def register_session_buffer_async(self, sb: SessionBufferProtocol) -> None:\n self._session_buffers.add(sb)\n for data in self._registrations.values():\n data.check_applicable(sb)\n\n def unregister_session_buffer_async(self, sb: SessionBufferProtocol) -> None:\n self._session_buffers.discard(sb)\n\n def session_buffers_async(self) -> Generator[SessionBufferProtocol, None, None]:\n \"\"\"\n It is only safe to iterate over this in the async thread\n \"\"\"\n yield from self._session_buffers\n\n def get_session_buffer_for_uri_async(self, uri: str) -> Optional[SessionBufferProtocol]:\n file_name = uri_to_filename(uri)\n for sb in self.session_buffers_async():\n try:\n if sb.file_name == file_name or os.path.samefile(file_name, sb.file_name):\n return sb\n except FileNotFoundError:\n pass\n return None\n\n # --- capability observers -----------------------------------------------------------------------------------------\n\n def can_handle(self, view: sublime.View, capability: Optional[str], inside_workspace: bool) -> bool:\n file_name = view.file_name() or ''\n if (self.config.match_view(view)\n and self.state == ClientStates.READY\n and self.handles_path(file_name, inside_workspace)):\n # If there's no capability requirement then this session can handle the view\n if capability is None:\n return True\n sv = self.session_view_for_view_async(view)\n if sv:\n return sv.has_capability_async(capability)\n else:\n return self.has_capability(capability)\n return False\n\n def has_capability(self, capability: str) -> bool:\n value = self.get_capability(capability)\n return value is not False and value is not None\n\n def get_capability(self, capability: str) -> Optional[Any]:\n return self.capabilities.get(capability)\n\n def should_notify_did_open(self) -> bool:\n return self.capabilities.should_notify_did_open()\n\n def text_sync_kind(self) -> int:\n return self.capabilities.text_sync_kind()\n\n def should_notify_did_change(self) -> bool:\n return self.capabilities.should_notify_did_change()\n\n def should_notify_did_change_workspace_folders(self) -> bool:\n return self.capabilities.should_notify_did_change_workspace_folders()\n\n def should_notify_will_save(self) -> bool:\n return self.capabilities.should_notify_will_save()\n\n def should_notify_did_save(self) -> Tuple[bool, bool]:\n return self.capabilities.should_notify_did_save()\n\n def should_notify_did_close(self) -> bool:\n return self.capabilities.should_notify_did_close()\n\n # --- misc methods -------------------------------------------------------------------------------------------------\n\n def handles_path(self, file_path: Optional[str], inside_workspace: bool) -> bool:\n if self._supports_workspace_folders():\n # A workspace-aware language server handles any path, both inside and outside the workspaces.\n return True\n # If we end up here then the language server is workspace-unaware. This means there can be more than one\n # language server with the same config name. So we have to actually do the subpath checks.\n if not file_path:\n return False\n if not self._workspace_folders or not inside_workspace:\n return True\n for folder in self._workspace_folders:\n if is_subpath_of(file_path, folder.path):\n return True\n return False\n\n def update_folders(self, folders: List[WorkspaceFolder]) -> None:\n if self.should_notify_did_change_workspace_folders():\n added, removed = diff(self._workspace_folders, folders)\n if added or removed:\n params = {\n \"event\": {\n \"added\": [a.to_lsp() for a in added],\n \"removed\": [r.to_lsp() for r in removed]\n }\n }\n self.send_notification(Notification.didChangeWorkspaceFolders(params))\n if self._supports_workspace_folders():\n self._workspace_folders = folders\n else:\n self._workspace_folders = folders[:1]\n\n def initialize_async(self, variables: Dict[str, str], transport: Transport, init_callback: InitCallback) -> None:\n self.transport = transport\n params = get_initialize_params(variables, self._workspace_folders, self.config)\n self._init_callback = init_callback\n self.send_request_async(\n Request.initialize(params), self._handle_initialize_success, self._handle_initialize_error)\n\n def _handle_initialize_success(self, result: Any) -> None:\n self.capabilities.assign(result.get('capabilities', dict()))\n if self._workspace_folders and not self._supports_workspace_folders():\n self._workspace_folders = self._workspace_folders[:1]\n self.state = ClientStates.READY\n if self._plugin_class is not None:\n self._plugin = self._plugin_class(weakref.ref(self))\n self.send_notification(Notification.initialized())\n self._maybe_send_did_change_configuration()\n execute_commands = self.get_capability('executeCommandProvider.commands')\n if execute_commands:\n debug(\"{}: Supported execute commands: {}\".format(self.config.name, execute_commands))\n code_action_kinds = self.get_capability('codeActionProvider.codeActionKinds')\n if code_action_kinds:\n debug('{}: supported code action kinds: {}'.format(self.config.name, code_action_kinds))\n if self._init_callback:\n self._init_callback(self, False)\n self._init_callback = None\n\n def _handle_initialize_error(self, result: Any) -> None:\n self._initialize_error = (result.get('code', -1), Exception(result.get('message', 'Error initializing server')))\n # Init callback called after transport is closed to avoid pre-mature GC of Session.\n self.end_async()\n\n def call_manager(self, method: str, *args: Any) -> None:\n mgr = self.manager()\n if mgr:\n getattr(mgr, method)(*args)\n\n def clear_diagnostics_async(self) -> None:\n # XXX: Remove this functionality?\n for sb in self.session_buffers_async():\n sb.on_diagnostics_async([], None)\n\n def on_stderr_message(self, message: str) -> None:\n self.call_manager('handle_stderr_log', self, message)\n self._logger.stderr_message(message)\n\n def _supports_workspace_folders(self) -> bool:\n return self.has_capability(\"workspace.workspaceFolders.supported\")\n\n def _maybe_send_did_change_configuration(self) -> None:\n if self.config.settings:\n self.send_notification(did_change_configuration(self.config.settings, self._template_variables()))\n\n def _template_variables(self) -> Dict[str, str]:\n variables = extract_variables(self.window)\n if self._plugin_class is not None:\n extra_vars = self._plugin_class.additional_variables()\n if extra_vars:\n variables.update(extra_vars)\n return variables\n\n def run_command(self, command: Mapping[str, Any]) -> Promise:\n \"\"\"Run a command from any thread. Your .then() continuations will run in Sublime's worker thread.\"\"\"\n if self._plugin:\n promise, callback = Promise.packaged_task()\n if self._plugin.on_pre_server_command(command, callback):\n return promise\n # TODO: Our Promise class should be able to handle errors/exceptions\n return Promise(\n lambda resolve: self.send_request(\n Request.executeCommand(command),\n resolve,\n lambda err: resolve(Error(err[\"code\"], err[\"message\"], err.get(\"data\")))\n )\n )\n\n def run_code_action_async(self, code_action: Mapping[str, Any]) -> Promise:\n command = code_action.get(\"command\")\n if isinstance(command, str):\n # This is actually a command.\n return self.run_command(code_action)\n # At this point it cannot be a command anymore, it has to be a proper code action.\n # A code action can have an edit and/or command. Note that it can have *both*. In case both are present, we\n # must apply the edits before running the command.\n edit = code_action.get(\"edit\")\n promise = self._apply_workspace_edit_async(edit) if edit else Promise.resolve()\n return promise.then(lambda _: self.run_command(command) if isinstance(command, dict) else Promise.resolve())\n\n def _apply_workspace_edit_async(self, edit: Any) -> Promise:\n \"\"\"\n Apply workspace edits, and return a promise that resolves on the async thread again after the edits have been\n applied.\n \"\"\"\n changes = parse_workspace_edit(edit)\n return Promise.on_main_thread() \\\n .then(lambda _: apply_workspace_edit(self.window, changes)) \\\n .then(Promise.on_async_thread)\n\n # --- server request handlers --------------------------------------------------------------------------------------\n\n def m_window_showMessageRequest(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the window/showMessageRequest request\"\"\"\n self.call_manager('handle_message_request', self, params, request_id)\n\n def m_window_showMessage(self, params: Any) -> None:\n \"\"\"handles the window/showMessage notification\"\"\"\n self.call_manager('handle_show_message', self, params)\n\n def m_window_logMessage(self, params: Any) -> None:\n \"\"\"handles the window/logMessage notification\"\"\"\n self.call_manager('handle_log_message', self, params)\n\n def m_workspace_workspaceFolders(self, _: Any, request_id: Any) -> None:\n \"\"\"handles the workspace/workspaceFolders request\"\"\"\n self.send_response(Response(request_id, [wf.to_lsp() for wf in self._workspace_folders]))\n\n def m_workspace_configuration(self, params: Dict[str, Any], request_id: Any) -> None:\n \"\"\"handles the workspace/configuration request\"\"\"\n items = [] # type: List[Any]\n requested_items = params.get(\"items\") or []\n for requested_item in requested_items:\n configuration = self.config.settings.copy(requested_item.get('section') or None)\n if self._plugin:\n self._plugin.on_workspace_configuration(requested_item, configuration)\n items.append(configuration)\n self.send_response(Response(request_id, sublime.expand_variables(items, self._template_variables())))\n\n def m_workspace_applyEdit(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the workspace/applyEdit request\"\"\"\n self._apply_workspace_edit_async(params.get('edit', {})).then(\n lambda _: self.send_response(Response(request_id, {\"applied\": True})))\n\n def m_textDocument_publishDiagnostics(self, params: Any) -> None:\n \"\"\"handles the textDocument/publishDiagnostics notification\"\"\"\n uri = params[\"uri\"]\n sb = self.get_session_buffer_for_uri_async(uri)\n if sb:\n sb.on_diagnostics_async(params[\"diagnostics\"], params.get(\"version\"))\n\n def m_client_registerCapability(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the client/registerCapability request\"\"\"\n registrations = params[\"registrations\"]\n for registration in registrations:\n registration_id = registration[\"id\"]\n capability_path, registration_path = method_to_capability(registration[\"method\"])\n debug(\"{}: registering capability:\".format(self.config.name), capability_path)\n options = registration.get(\"registerOptions\") # type: Optional[Dict[str, Any]]\n if not isinstance(options, dict):\n options = {}\n data = _RegistrationData(registration_id, capability_path, registration_path, options)\n self._registrations[registration_id] = data\n if data.selector:\n # The registration is applicable only to certain buffers, so let's check which buffers apply.\n for sb in self.session_buffers_async():\n data.check_applicable(sb)\n else:\n # The registration applies globally to all buffers.\n self.capabilities.register(registration_id, capability_path, registration_path, options)\n self.send_response(Response(request_id, None))\n\n def m_client_unregisterCapability(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the client/unregisterCapability request\"\"\"\n unregistrations = params[\"unregisterations\"] # typo in the official specification\n for unregistration in unregistrations:\n registration_id = unregistration[\"id\"]\n capability_path, registration_path = method_to_capability(unregistration[\"method\"])\n debug(\"{}: unregistering capability:\".format(self.config.name), capability_path)\n data = self._registrations.pop(registration_id, None)\n if not data:\n message = \"no registration data found for registration ID {}\".format(registration_id)\n self.send_error_response(request_id, Error(ErrorCode.InvalidParams, message))\n return\n elif not data.selector:\n self.capabilities.unregister(registration_id, capability_path, registration_path)\n self.send_response(Response(request_id, None))\n\n def m_window_workDoneProgress_create(self, params: Any, request_id: Any) -> None:\n \"\"\"handles the window/workDoneProgress/create request\"\"\"\n self._progress[params['token']] = dict()\n self.send_response(Response(request_id, None))\n\n def _progress_status_key(self, token: str) -> str:\n return \"lspprogress{}{}\".format(self.config.name, token)\n\n def m___progress(self, params: Any) -> None:\n \"\"\"handles the $/progress notification\"\"\"\n token = params['token']\n data = self._progress.get(token)\n if not isinstance(data, dict):\n debug('unknown $/progress token: {}'.format(token))\n return\n value = params['value']\n kind = value['kind']\n key = self._progress_status_key(token)\n if kind == 'begin':\n data['title'] = value['title'] # mandatory\n data['message'] = value.get('message') # optional\n progress_string = self._progress_string(data, value)\n for sv in self.session_views_async():\n sv.view.set_status(key, progress_string)\n elif kind == 'report':\n progress_string = self._progress_string(data, value)\n for sv in self.session_views_async():\n sv.view.set_status(key, progress_string)\n elif kind == 'end':\n message = value.get('message')\n if message:\n self.window.status_message(data['title'] + ': ' + message)\n for sv in self.session_views_async():\n sv.view.erase_status(key)\n self._progress.pop(token, None)\n\n def _progress_string(self, data: Dict[str, Any], value: Dict[str, Any]) -> str:\n status_msg = data['title']\n progress_message = value.get('message') # optional\n progress_percentage = value.get('percentage') # optional\n if progress_message:\n data['message'] = progress_message\n status_msg += ': ' + progress_message\n elif data['message']: # reuse last known message if not present\n status_msg += ': ' + data['message']\n if progress_percentage:\n fmt = ' ({:.1f}%)' if isinstance(progress_percentage, float) else ' ({}%)'\n status_msg += fmt.format(progress_percentage)\n return status_msg\n\n # --- shutdown dance -----------------------------------------------------------------------------------------------\n\n def end_async(self) -> None:\n # TODO: Ensure this function is called only from the async thread\n if self.exiting:\n return\n self.exiting = True\n self._plugin = None\n for sv in self.session_views_async():\n sv.shutdown_async()\n self.capabilities.clear()\n self._registrations.clear()\n self.state = ClientStates.STOPPING\n self.send_request_async(Request.shutdown(), self._handle_shutdown_result, self._handle_shutdown_result)\n\n def _handle_shutdown_result(self, _: Any) -> None:\n self.exit()\n\n def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:\n self.exiting = True\n self.state = ClientStates.STOPPING\n self.transport = None\n self._response_handlers.clear()\n if self._initialize_error:\n # Override potential exit error with a saved one.\n exit_code, exception = self._initialize_error\n mgr = self.manager()\n if mgr:\n if self._init_callback:\n self._init_callback(self, True)\n self._init_callback = None\n mgr.on_post_exit_async(self, exit_code, exception)\n\n # --- RPC message handling ----------------------------------------------------------------------------------------\n\n def send_request_async(\n self,\n request: Request,\n on_result: Callable[[Any], None],\n on_error: Optional[Callable[[Any], None]] = None\n ) -> None:\n \"\"\"You must call this method from Sublime's worker thread. Callbacks will run in Sublime's worker thread.\"\"\"\n self.request_id += 1\n request_id = self.request_id\n self._response_handlers[request_id] = (request, on_result, on_error)\n if request.view:\n sv = self.session_view_for_view_async(request.view)\n if sv:\n sv.on_request_started_async(request_id, request)\n else:\n # This is a workspace or window request\n for sv in self.session_views_async():\n sv.on_request_started_async(request_id, request)\n self._logger.outgoing_request(request_id, request.method, request.params)\n self.send_payload(request.to_payload(request_id))\n\n def send_request(\n self,\n request: Request,\n on_result: Callable[[Any], None],\n on_error: Optional[Callable[[Any], None]] = None,\n ) -> None:\n \"\"\"You can call this method from any thread. Callbacks will run in Sublime's worker thread.\"\"\"\n sublime.set_timeout_async(functools.partial(self.send_request_async, request, on_result, on_error))\n\n def send_notification(self, notification: Notification) -> None:\n self._logger.outgoing_notification(notification.method, notification.params)\n self.send_payload(notification.to_payload())\n\n def send_response(self, response: Response) -> None:\n self._logger.outgoing_response(response.request_id, response.result)\n self.send_payload(response.to_payload())\n\n def send_error_response(self, request_id: Any, error: Error) -> None:\n self._logger.outgoing_error_response(request_id, error)\n self.send_payload({'jsonrpc': '2.0', 'id': request_id, 'error': error.to_lsp()})\n\n def exit(self) -> None:\n self.send_notification(Notification.exit())\n try:\n self.transport.close() # type: ignore\n except AttributeError:\n pass\n\n def send_payload(self, payload: Dict[str, Any]) -> None:\n try:\n self.transport.send(payload) # type: ignore\n except AttributeError:\n pass\n\n def deduce_payload(\n self,\n payload: Dict[str, Any]\n ) -> Tuple[Optional[Callable], Any, Optional[int], Optional[str], Optional[str]]:\n if \"method\" in payload:\n method = payload[\"method\"]\n handler = self._get_handler(method)\n result = payload.get(\"params\")\n if \"id\" in payload:\n req_id = payload[\"id\"]\n self._logger.incoming_request(req_id, method, result)\n if handler is None:\n self.send_error_response(req_id, Error(ErrorCode.MethodNotFound, method))\n else:\n tup = (handler, result, req_id, \"request\", method)\n return tup\n else:\n res = (handler, result, None, \"notification\", method)\n self._logger.incoming_notification(method, result, res[0] is None)\n return res\n elif \"id\" in payload:\n response_id = int(payload[\"id\"])\n handler, result, is_error = self.response_handler(response_id, payload)\n response_tuple = (handler, result, None, None, None)\n self._logger.incoming_response(response_id, result, is_error)\n return response_tuple\n else:\n debug(\"Unknown payload type: \", payload)\n return (None, None, None, None, None)\n\n def on_payload(self, payload: Dict[str, Any]) -> None:\n handler, result, req_id, typestr, method = self.deduce_payload(payload)\n if handler:\n try:\n if req_id is None:\n # notification or response\n handler(result)\n else:\n # request\n try:\n handler(result, req_id)\n except Error as err:\n self.send_error_response(req_id, err)\n except Exception as ex:\n self.send_error_response(req_id, Error.from_exception(ex))\n raise\n except Exception as err:\n exception_log(\"Error handling {}\".format(typestr), err)\n\n def response_handler(self, response_id: int, response: Dict[str, Any]) -> Tuple[Optional[Callable], Any, bool]:\n request, handler, error_handler = self._response_handlers.pop(response_id, (None, None, None))\n if not request:\n error = {\"code\": ErrorCode.InvalidParams, \"message\": \"unknown response ID {}\".format(response_id)}\n return (print_to_status_bar, error, True)\n if request.view:\n sv = self.session_view_for_view_async(request.view)\n if sv:\n sv.on_request_finished_async(response_id)\n else:\n for sv in self.session_views_async():\n sv.on_request_finished_async(response_id)\n if \"result\" in response and \"error\" not in response:\n return (handler, response[\"result\"], False)\n if not error_handler:\n error_handler = print_to_status_bar\n if \"result\" not in response and \"error\" in response:\n error = response[\"error\"]\n else:\n error = {\"code\": ErrorCode.InvalidParams, \"message\": \"invalid response payload\"}\n return (error_handler, error, True)\n\n def _get_handler(self, method: str) -> Optional[Callable]:\n return getattr(self, method2attr(method), None)\n", "path": "plugin/core/sessions.py" } ]
diff --git a/README.md b/README.md index f592f8a75..b5acb47a8 100644 --- a/README.md +++ b/README.md @@ -147,3 +147,4 @@ If you have any problems, see the [troubleshooting](https://lsp.readthedocs.io/e - ✅ workDoneProgress - ✅ create - ❌ cancel +- ✅ showMessage request additionalPropertiesSupport diff --git a/plugin/core/sessions.py b/plugin/core/sessions.py index 53784f2aa..cad73f893 100644 --- a/plugin/core/sessions.py +++ b/plugin/core/sessions.py @@ -229,6 +229,11 @@ def get_initialize_params(variables: Dict[str, str], workspace_folders: List[Wor "configuration": True }, "window": { + "showMessage": { + "messageActionItem": { + "additionalPropertiesSupport": True + } + }, "workDoneProgress": True } }
getsentry__sentry-19997
Invalid List-ID header within Sentry Email notifications. ## Important Details How are you running Sentry? <!-- Please pick one of the following --> Saas (sentry.io) ## Description Email notifications set invalid List-ID headers according to [RFC 2919](https://tools.ietf.org/html/rfc2919) ## Steps to Reproduce 1. Receive an email from sentry.io 1. Look at the raw headers for the email The header looks like: ``` List-Id: project.team.getsentry.com ``` ### What you expected to happen The List-ID header should be valid, for example: ``` List-Id: <project.team.getsentry.com> ``` ### Possible Solution Add `<>` around your List IDs. Ideally set a display name as well.
[ { "content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport six\nimport subprocess\nimport tempfile\nimport time\n\nfrom email.utils import parseaddr\nfrom functools import partial\nfrom operator import attrgetter\nfrom random import randrange\n\nimport lxml\nimport toronado\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.mail.backends.base import BaseEmailBackend\nfrom django.core.signing import BadSignature, Signer\nfrom django.utils.crypto import constant_time_compare\nfrom django.utils.encoding import force_bytes, force_str, force_text\n\nfrom sentry import options\nfrom sentry.logging import LoggingFormat\nfrom sentry.models import Activity, Group, GroupEmailThread, Project, User, UserOption\nfrom sentry.utils import metrics\nfrom sentry.utils.safe import safe_execute\nfrom sentry.utils.strings import is_valid_dot_atom\nfrom sentry.web.helpers import render_to_string\nfrom sentry.utils.compat import map\n\n# The maximum amount of recipients to display in human format.\nMAX_RECIPIENTS = 5\n\n# The fake TLD used to construct email addresses when one is required,\n# for example by automatically generated SSO accounts.\nFAKE_EMAIL_TLD = \".sentry-fake\"\n\nlogger = logging.getLogger(\"sentry.mail\")\n\n\ndef inline_css(value):\n tree = lxml.html.document_fromstring(value)\n toronado.inline(tree)\n # CSS media query support is inconsistent when the DOCTYPE declaration is\n # missing, so we force it to HTML5 here.\n return lxml.html.tostring(tree, doctype=\"<!DOCTYPE html>\")\n\n\nclass _CaseInsensitiveSigner(Signer):\n \"\"\"\n Generate a signature that is comprised of only lowercase letters.\n\n WARNING: Do not use this for anything that needs to be cryptographically\n secure! This is losing entropy and has a much higher chance of collision\n due to dropping to lowercase letters. For our purposes, this lack of entropy\n is ok and doesn't pose a risk.\n\n NOTE: This is needed strictly for signatures used in email addresses. Some\n clients, coughAirmailcough, treat email addresses as being case-insensitive,\n and sends the value as all lowercase.\n \"\"\"\n\n def signature(self, value):\n sig = super(_CaseInsensitiveSigner, self).signature(value)\n return sig.lower()\n\n def unsign(self, signed_value):\n # This unsign is identical to subclass except for the lowercasing\n # See: https://github.com/django/django/blob/1.6.11/django/core/signing.py#L165-L172\n signed_value = force_str(signed_value)\n if self.sep not in signed_value:\n raise BadSignature('No \"%s\" found in value' % self.sep)\n value, sig = signed_value.rsplit(self.sep, 1)\n if constant_time_compare(sig.lower(), self.signature(value)):\n return force_text(value)\n raise BadSignature('Signature \"%s\" does not match' % sig)\n\n\nsigner = _CaseInsensitiveSigner()\n\n\ndef email_to_group_id(address):\n \"\"\"\n Email address should be in the form of:\n {group_id}+{signature}@example.com\n \"\"\"\n address = address.split(\"@\", 1)[0]\n signed_data = address.replace(\"+\", \":\")\n return int(force_bytes(signer.unsign(signed_data)))\n\n\ndef group_id_to_email(group_id):\n signed_data = signer.sign(six.text_type(group_id))\n return \"@\".join(\n (\n signed_data.replace(\":\", \"+\"),\n options.get(\"mail.reply-hostname\") or get_from_email_domain(),\n )\n )\n\n\ndef domain_from_email(email):\n email = parseaddr(email)[1]\n try:\n return email.split(\"@\", 1)[1]\n except IndexError:\n # The email address is likely malformed or something\n return email\n\n\n# Slightly modified version of Django's\n# `django.core.mail.message:make_msgid` because we need\n# to override the domain. If we ever upgrade to\n# django 1.8, we can/should replace this.\ndef make_msgid(domain):\n \"\"\"Returns a string suitable for RFC 2822 compliant Message-ID, e.g:\n <[email protected]>\n Optional idstring if given is a string used to strengthen the\n uniqueness of the message id. Optional domain if given provides the\n portion of the message id after the '@'. It defaults to the locally\n defined hostname.\n \"\"\"\n timeval = time.time()\n utcdate = time.strftime(\"%Y%m%d%H%M%S\", time.gmtime(timeval))\n pid = os.getpid()\n randint = randrange(100000)\n msgid = \"<%s.%s.%s@%s>\" % (utcdate, pid, randint, domain)\n return msgid\n\n\n# cache the domain_from_email calculation\n# This is just a tuple of (email, email-domain)\n_from_email_domain_cache = (None, None)\n\n\ndef get_from_email_domain():\n global _from_email_domain_cache\n from_ = options.get(\"mail.from\")\n if not _from_email_domain_cache[0] == from_:\n _from_email_domain_cache = (from_, domain_from_email(from_))\n return _from_email_domain_cache[1]\n\n\ndef create_fake_email(unique_id, namespace):\n \"\"\"\n Generate a fake email of the form: {unique_id}@{namespace}{FAKE_EMAIL_TLD}\n\n For example: [email protected]\n \"\"\"\n return u\"{}@{}{}\".format(unique_id, namespace, FAKE_EMAIL_TLD)\n\n\ndef is_fake_email(email):\n \"\"\"\n Returns True if the provided email matches the fake email pattern.\n \"\"\"\n return email.endswith(FAKE_EMAIL_TLD)\n\n\ndef get_email_addresses(user_ids, project=None):\n pending = set(user_ids)\n results = {}\n\n if project:\n queryset = UserOption.objects.filter(project=project, user__in=pending, key=\"mail:email\")\n for option in (o for o in queryset if o.value and not is_fake_email(o.value)):\n results[option.user_id] = option.value\n pending.discard(option.user_id)\n\n if pending:\n queryset = User.objects.filter(pk__in=pending, is_active=True)\n for (user_id, email) in queryset.values_list(\"id\", \"email\"):\n if email and not is_fake_email(email):\n results[user_id] = email\n pending.discard(user_id)\n\n if pending:\n logger.warning(\n \"Could not resolve email addresses for user IDs in %r, discarding...\", pending\n )\n\n return results\n\n\nclass ListResolver(object):\n \"\"\"\n Manages the generation of RFC 2919 compliant list-id strings from varying\n objects types.\n \"\"\"\n\n class UnregisteredTypeError(Exception):\n \"\"\"\n Error raised when attempting to build a list-id from an unregisted object type.\n \"\"\"\n\n def __init__(self, namespace, type_handlers):\n assert is_valid_dot_atom(namespace)\n\n # The list-id-namespace that will be used when generating the list-id\n # string. This should be a domain name under the control of the\n # generator (see RFC 2919.)\n self.__namespace = namespace\n\n # A mapping of classes to functions that accept an instance of that\n # class, returning a tuple of values that will be used to generate the\n # list label. Returned values must be valid RFC 2822 dot-atom-text\n # values.\n self.__type_handlers = type_handlers\n\n def __call__(self, instance):\n \"\"\"\n Build a list-id string from an instance.\n\n Raises ``UnregisteredTypeError`` if there is no registered handler for\n the instance type. Raises ``AssertionError`` if a valid list-id string\n cannot be generated from the values returned by the type handler.\n \"\"\"\n try:\n handler = self.__type_handlers[type(instance)]\n except KeyError:\n raise self.UnregisteredTypeError(\n u\"Cannot generate mailing list identifier for {!r}\".format(instance)\n )\n\n label = \".\".join(map(six.text_type, handler(instance)))\n assert is_valid_dot_atom(label)\n\n return u\"{}.{}\".format(label, self.__namespace)\n\n\ndefault_list_type_handlers = {\n Activity: attrgetter(\"project.slug\", \"project.organization.slug\"),\n Project: attrgetter(\"slug\", \"organization.slug\"),\n Group: attrgetter(\"project.slug\", \"organization.slug\"),\n}\n\nmake_listid_from_instance = ListResolver(\n options.get(\"mail.list-namespace\"), default_list_type_handlers\n)\n\n\nclass MessageBuilder(object):\n def __init__(\n self,\n subject,\n context=None,\n template=None,\n html_template=None,\n body=\"\",\n html_body=None,\n headers=None,\n reference=None,\n reply_reference=None,\n from_email=None,\n type=None,\n ):\n assert not (body and template)\n assert not (html_body and html_template)\n assert context or not (template or html_template)\n\n if headers is None:\n headers = {}\n\n self.subject = subject\n self.context = context or {}\n self.template = template\n self.html_template = html_template\n self._txt_body = body\n self._html_body = html_body\n self.headers = headers\n self.reference = reference # The object that generated this message\n self.reply_reference = reply_reference # The object this message is replying about\n self.from_email = from_email or options.get(\"mail.from\")\n self._send_to = set()\n self.type = type if type else \"generic\"\n\n if reference is not None and \"List-Id\" not in headers:\n try:\n headers[\"List-Id\"] = make_listid_from_instance(reference)\n except ListResolver.UnregisteredTypeError as error:\n logger.debug(six.text_type(error))\n except AssertionError as error:\n logger.warning(six.text_type(error))\n\n def __render_html_body(self):\n html_body = None\n if self.html_template:\n html_body = render_to_string(self.html_template, self.context)\n else:\n html_body = self._html_body\n\n if html_body is not None:\n return inline_css(html_body)\n\n def __render_text_body(self):\n if self.template:\n return render_to_string(self.template, self.context)\n return self._txt_body\n\n def add_users(self, user_ids, project=None):\n self._send_to.update(get_email_addresses(user_ids, project).values())\n\n def build(self, to, reply_to=None, cc=None, bcc=None):\n if self.headers is None:\n headers = {}\n else:\n headers = self.headers.copy()\n\n if options.get(\"mail.enable-replies\") and \"X-Sentry-Reply-To\" in headers:\n reply_to = headers[\"X-Sentry-Reply-To\"]\n else:\n reply_to = set(reply_to or ())\n reply_to.discard(to)\n reply_to = \", \".join(reply_to)\n\n if reply_to:\n headers.setdefault(\"Reply-To\", reply_to)\n\n # Every message sent needs a unique message id\n message_id = make_msgid(get_from_email_domain())\n headers.setdefault(\"Message-Id\", message_id)\n\n subject = self.subject\n\n if self.reply_reference is not None:\n reference = self.reply_reference\n subject = \"Re: %s\" % subject\n else:\n reference = self.reference\n\n if isinstance(reference, Group):\n thread, created = GroupEmailThread.objects.get_or_create(\n email=to,\n group=reference,\n defaults={\"project\": reference.project, \"msgid\": message_id},\n )\n if not created:\n headers.setdefault(\"In-Reply-To\", thread.msgid)\n headers.setdefault(\"References\", thread.msgid)\n\n msg = EmailMultiAlternatives(\n subject=subject.splitlines()[0],\n body=self.__render_text_body(),\n from_email=self.from_email,\n to=(to,),\n cc=cc or (),\n bcc=bcc or (),\n headers=headers,\n )\n\n html_body = self.__render_html_body()\n if html_body:\n msg.attach_alternative(html_body.decode(\"utf-8\"), \"text/html\")\n\n return msg\n\n def get_built_messages(self, to=None, cc=None, bcc=None):\n send_to = set(to or ())\n send_to.update(self._send_to)\n results = [\n self.build(to=email, reply_to=send_to, cc=cc, bcc=bcc) for email in send_to if email\n ]\n if not results:\n logger.debug(\"Did not build any messages, no users to send to.\")\n return results\n\n def format_to(self, to):\n if not to:\n return \"\"\n if len(to) > MAX_RECIPIENTS:\n to = to[:MAX_RECIPIENTS] + [u\"and {} more.\".format(len(to[MAX_RECIPIENTS:]))]\n return \", \".join(to)\n\n def send(self, to=None, cc=None, bcc=None, fail_silently=False):\n return send_messages(\n self.get_built_messages(to, cc=cc, bcc=bcc), fail_silently=fail_silently\n )\n\n def send_async(self, to=None, cc=None, bcc=None):\n from sentry.tasks.email import send_email\n\n fmt = options.get(\"system.logging-format\")\n messages = self.get_built_messages(to, cc=cc, bcc=bcc)\n extra = {\"message_type\": self.type}\n loggable = [v for k, v in six.iteritems(self.context) if hasattr(v, \"id\")]\n for context in loggable:\n extra[\"%s_id\" % type(context).__name__.lower()] = context.id\n\n log_mail_queued = partial(logger.info, \"mail.queued\", extra=extra)\n for message in messages:\n safe_execute(send_email.delay, message=message, _with_transaction=False)\n extra[\"message_id\"] = message.extra_headers[\"Message-Id\"]\n metrics.incr(\"email.queued\", instance=self.type, skip_internal=False)\n if fmt == LoggingFormat.HUMAN:\n extra[\"message_to\"] = (self.format_to(message.to),)\n log_mail_queued()\n elif fmt == LoggingFormat.MACHINE:\n for recipient in message.to:\n extra[\"message_to\"] = recipient\n log_mail_queued()\n\n\ndef send_messages(messages, fail_silently=False):\n connection = get_connection(fail_silently=fail_silently)\n sent = connection.send_messages(messages)\n metrics.incr(\"email.sent\", len(messages), skip_internal=False)\n for message in messages:\n extra = {\n \"message_id\": message.extra_headers[\"Message-Id\"],\n \"size\": len(message.message().as_bytes()),\n }\n logger.info(\"mail.sent\", extra=extra)\n return sent\n\n\ndef get_mail_backend():\n backend = options.get(\"mail.backend\")\n try:\n return settings.SENTRY_EMAIL_BACKEND_ALIASES[backend]\n except KeyError:\n return backend\n\n\ndef get_connection(fail_silently=False):\n \"\"\"\n Gets an SMTP connection using our OptionsStore\n \"\"\"\n return mail.get_connection(\n backend=get_mail_backend(),\n host=options.get(\"mail.host\"),\n port=options.get(\"mail.port\"),\n username=options.get(\"mail.username\"),\n password=options.get(\"mail.password\"),\n use_tls=options.get(\"mail.use-tls\"),\n timeout=options.get(\"mail.timeout\"),\n fail_silently=fail_silently,\n )\n\n\ndef send_mail(subject, message, from_email, recipient_list, fail_silently=False, **kwargs):\n \"\"\"\n Wrapper that forces sending mail through our connection.\n Uses EmailMessage class which has more options than the simple send_mail\n \"\"\"\n email = mail.EmailMessage(\n subject,\n message,\n from_email,\n recipient_list,\n connection=get_connection(fail_silently=fail_silently),\n **kwargs\n )\n return email.send(fail_silently=fail_silently)\n\n\ndef is_smtp_enabled(backend=None):\n \"\"\"\n Check if the current backend is SMTP based.\n \"\"\"\n if backend is None:\n backend = get_mail_backend()\n return backend not in settings.SENTRY_SMTP_DISABLED_BACKENDS\n\n\nclass PreviewBackend(BaseEmailBackend):\n \"\"\"\n Email backend that can be used in local development to open messages in the\n local mail client as they are sent.\n\n Probably only works on OS X.\n \"\"\"\n\n def send_messages(self, email_messages):\n for message in email_messages:\n content = six.binary_type(message.message())\n preview = tempfile.NamedTemporaryFile(\n delete=False, prefix=\"sentry-email-preview-\", suffix=\".eml\"\n )\n try:\n preview.write(content)\n preview.flush()\n finally:\n preview.close()\n\n subprocess.check_call((\"open\", preview.name))\n\n return len(email_messages)\n", "path": "src/sentry/utils/email.py" } ]
[ { "content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport six\nimport subprocess\nimport tempfile\nimport time\n\nfrom email.utils import parseaddr\nfrom functools import partial\nfrom operator import attrgetter\nfrom random import randrange\n\nimport lxml\nimport toronado\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.mail.backends.base import BaseEmailBackend\nfrom django.core.signing import BadSignature, Signer\nfrom django.utils.crypto import constant_time_compare\nfrom django.utils.encoding import force_bytes, force_str, force_text\n\nfrom sentry import options\nfrom sentry.logging import LoggingFormat\nfrom sentry.models import Activity, Group, GroupEmailThread, Project, User, UserOption\nfrom sentry.utils import metrics\nfrom sentry.utils.safe import safe_execute\nfrom sentry.utils.strings import is_valid_dot_atom\nfrom sentry.web.helpers import render_to_string\nfrom sentry.utils.compat import map\n\n# The maximum amount of recipients to display in human format.\nMAX_RECIPIENTS = 5\n\n# The fake TLD used to construct email addresses when one is required,\n# for example by automatically generated SSO accounts.\nFAKE_EMAIL_TLD = \".sentry-fake\"\n\nlogger = logging.getLogger(\"sentry.mail\")\n\n\ndef inline_css(value):\n tree = lxml.html.document_fromstring(value)\n toronado.inline(tree)\n # CSS media query support is inconsistent when the DOCTYPE declaration is\n # missing, so we force it to HTML5 here.\n return lxml.html.tostring(tree, doctype=\"<!DOCTYPE html>\")\n\n\nclass _CaseInsensitiveSigner(Signer):\n \"\"\"\n Generate a signature that is comprised of only lowercase letters.\n\n WARNING: Do not use this for anything that needs to be cryptographically\n secure! This is losing entropy and has a much higher chance of collision\n due to dropping to lowercase letters. For our purposes, this lack of entropy\n is ok and doesn't pose a risk.\n\n NOTE: This is needed strictly for signatures used in email addresses. Some\n clients, coughAirmailcough, treat email addresses as being case-insensitive,\n and sends the value as all lowercase.\n \"\"\"\n\n def signature(self, value):\n sig = super(_CaseInsensitiveSigner, self).signature(value)\n return sig.lower()\n\n def unsign(self, signed_value):\n # This unsign is identical to subclass except for the lowercasing\n # See: https://github.com/django/django/blob/1.6.11/django/core/signing.py#L165-L172\n signed_value = force_str(signed_value)\n if self.sep not in signed_value:\n raise BadSignature('No \"%s\" found in value' % self.sep)\n value, sig = signed_value.rsplit(self.sep, 1)\n if constant_time_compare(sig.lower(), self.signature(value)):\n return force_text(value)\n raise BadSignature('Signature \"%s\" does not match' % sig)\n\n\nsigner = _CaseInsensitiveSigner()\n\n\ndef email_to_group_id(address):\n \"\"\"\n Email address should be in the form of:\n {group_id}+{signature}@example.com\n \"\"\"\n address = address.split(\"@\", 1)[0]\n signed_data = address.replace(\"+\", \":\")\n return int(force_bytes(signer.unsign(signed_data)))\n\n\ndef group_id_to_email(group_id):\n signed_data = signer.sign(six.text_type(group_id))\n return \"@\".join(\n (\n signed_data.replace(\":\", \"+\"),\n options.get(\"mail.reply-hostname\") or get_from_email_domain(),\n )\n )\n\n\ndef domain_from_email(email):\n email = parseaddr(email)[1]\n try:\n return email.split(\"@\", 1)[1]\n except IndexError:\n # The email address is likely malformed or something\n return email\n\n\n# Slightly modified version of Django's\n# `django.core.mail.message:make_msgid` because we need\n# to override the domain. If we ever upgrade to\n# django 1.8, we can/should replace this.\ndef make_msgid(domain):\n \"\"\"Returns a string suitable for RFC 2822 compliant Message-ID, e.g:\n <[email protected]>\n Optional idstring if given is a string used to strengthen the\n uniqueness of the message id. Optional domain if given provides the\n portion of the message id after the '@'. It defaults to the locally\n defined hostname.\n \"\"\"\n timeval = time.time()\n utcdate = time.strftime(\"%Y%m%d%H%M%S\", time.gmtime(timeval))\n pid = os.getpid()\n randint = randrange(100000)\n msgid = \"<%s.%s.%s@%s>\" % (utcdate, pid, randint, domain)\n return msgid\n\n\n# cache the domain_from_email calculation\n# This is just a tuple of (email, email-domain)\n_from_email_domain_cache = (None, None)\n\n\ndef get_from_email_domain():\n global _from_email_domain_cache\n from_ = options.get(\"mail.from\")\n if not _from_email_domain_cache[0] == from_:\n _from_email_domain_cache = (from_, domain_from_email(from_))\n return _from_email_domain_cache[1]\n\n\ndef create_fake_email(unique_id, namespace):\n \"\"\"\n Generate a fake email of the form: {unique_id}@{namespace}{FAKE_EMAIL_TLD}\n\n For example: [email protected]\n \"\"\"\n return u\"{}@{}{}\".format(unique_id, namespace, FAKE_EMAIL_TLD)\n\n\ndef is_fake_email(email):\n \"\"\"\n Returns True if the provided email matches the fake email pattern.\n \"\"\"\n return email.endswith(FAKE_EMAIL_TLD)\n\n\ndef get_email_addresses(user_ids, project=None):\n pending = set(user_ids)\n results = {}\n\n if project:\n queryset = UserOption.objects.filter(project=project, user__in=pending, key=\"mail:email\")\n for option in (o for o in queryset if o.value and not is_fake_email(o.value)):\n results[option.user_id] = option.value\n pending.discard(option.user_id)\n\n if pending:\n queryset = User.objects.filter(pk__in=pending, is_active=True)\n for (user_id, email) in queryset.values_list(\"id\", \"email\"):\n if email and not is_fake_email(email):\n results[user_id] = email\n pending.discard(user_id)\n\n if pending:\n logger.warning(\n \"Could not resolve email addresses for user IDs in %r, discarding...\", pending\n )\n\n return results\n\n\nclass ListResolver(object):\n \"\"\"\n Manages the generation of RFC 2919 compliant list-id strings from varying\n objects types.\n \"\"\"\n\n class UnregisteredTypeError(Exception):\n \"\"\"\n Error raised when attempting to build a list-id from an unregisted object type.\n \"\"\"\n\n def __init__(self, namespace, type_handlers):\n assert is_valid_dot_atom(namespace)\n\n # The list-id-namespace that will be used when generating the list-id\n # string. This should be a domain name under the control of the\n # generator (see RFC 2919.)\n self.__namespace = namespace\n\n # A mapping of classes to functions that accept an instance of that\n # class, returning a tuple of values that will be used to generate the\n # list label. Returned values must be valid RFC 2822 dot-atom-text\n # values.\n self.__type_handlers = type_handlers\n\n def __call__(self, instance):\n \"\"\"\n Build a list-id string from an instance.\n\n Raises ``UnregisteredTypeError`` if there is no registered handler for\n the instance type. Raises ``AssertionError`` if a valid list-id string\n cannot be generated from the values returned by the type handler.\n \"\"\"\n try:\n handler = self.__type_handlers[type(instance)]\n except KeyError:\n raise self.UnregisteredTypeError(\n u\"Cannot generate mailing list identifier for {!r}\".format(instance)\n )\n\n label = \".\".join(map(six.text_type, handler(instance)))\n assert is_valid_dot_atom(label)\n\n return u\"<{}.{}>\".format(label, self.__namespace)\n\n\ndefault_list_type_handlers = {\n Activity: attrgetter(\"project.slug\", \"project.organization.slug\"),\n Project: attrgetter(\"slug\", \"organization.slug\"),\n Group: attrgetter(\"project.slug\", \"organization.slug\"),\n}\n\nmake_listid_from_instance = ListResolver(\n options.get(\"mail.list-namespace\"), default_list_type_handlers\n)\n\n\nclass MessageBuilder(object):\n def __init__(\n self,\n subject,\n context=None,\n template=None,\n html_template=None,\n body=\"\",\n html_body=None,\n headers=None,\n reference=None,\n reply_reference=None,\n from_email=None,\n type=None,\n ):\n assert not (body and template)\n assert not (html_body and html_template)\n assert context or not (template or html_template)\n\n if headers is None:\n headers = {}\n\n self.subject = subject\n self.context = context or {}\n self.template = template\n self.html_template = html_template\n self._txt_body = body\n self._html_body = html_body\n self.headers = headers\n self.reference = reference # The object that generated this message\n self.reply_reference = reply_reference # The object this message is replying about\n self.from_email = from_email or options.get(\"mail.from\")\n self._send_to = set()\n self.type = type if type else \"generic\"\n\n if reference is not None and \"List-Id\" not in headers:\n try:\n headers[\"List-Id\"] = make_listid_from_instance(reference)\n except ListResolver.UnregisteredTypeError as error:\n logger.debug(six.text_type(error))\n except AssertionError as error:\n logger.warning(six.text_type(error))\n\n def __render_html_body(self):\n html_body = None\n if self.html_template:\n html_body = render_to_string(self.html_template, self.context)\n else:\n html_body = self._html_body\n\n if html_body is not None:\n return inline_css(html_body)\n\n def __render_text_body(self):\n if self.template:\n return render_to_string(self.template, self.context)\n return self._txt_body\n\n def add_users(self, user_ids, project=None):\n self._send_to.update(get_email_addresses(user_ids, project).values())\n\n def build(self, to, reply_to=None, cc=None, bcc=None):\n if self.headers is None:\n headers = {}\n else:\n headers = self.headers.copy()\n\n if options.get(\"mail.enable-replies\") and \"X-Sentry-Reply-To\" in headers:\n reply_to = headers[\"X-Sentry-Reply-To\"]\n else:\n reply_to = set(reply_to or ())\n reply_to.discard(to)\n reply_to = \", \".join(reply_to)\n\n if reply_to:\n headers.setdefault(\"Reply-To\", reply_to)\n\n # Every message sent needs a unique message id\n message_id = make_msgid(get_from_email_domain())\n headers.setdefault(\"Message-Id\", message_id)\n\n subject = self.subject\n\n if self.reply_reference is not None:\n reference = self.reply_reference\n subject = \"Re: %s\" % subject\n else:\n reference = self.reference\n\n if isinstance(reference, Group):\n thread, created = GroupEmailThread.objects.get_or_create(\n email=to,\n group=reference,\n defaults={\"project\": reference.project, \"msgid\": message_id},\n )\n if not created:\n headers.setdefault(\"In-Reply-To\", thread.msgid)\n headers.setdefault(\"References\", thread.msgid)\n\n msg = EmailMultiAlternatives(\n subject=subject.splitlines()[0],\n body=self.__render_text_body(),\n from_email=self.from_email,\n to=(to,),\n cc=cc or (),\n bcc=bcc or (),\n headers=headers,\n )\n\n html_body = self.__render_html_body()\n if html_body:\n msg.attach_alternative(html_body.decode(\"utf-8\"), \"text/html\")\n\n return msg\n\n def get_built_messages(self, to=None, cc=None, bcc=None):\n send_to = set(to or ())\n send_to.update(self._send_to)\n results = [\n self.build(to=email, reply_to=send_to, cc=cc, bcc=bcc) for email in send_to if email\n ]\n if not results:\n logger.debug(\"Did not build any messages, no users to send to.\")\n return results\n\n def format_to(self, to):\n if not to:\n return \"\"\n if len(to) > MAX_RECIPIENTS:\n to = to[:MAX_RECIPIENTS] + [u\"and {} more.\".format(len(to[MAX_RECIPIENTS:]))]\n return \", \".join(to)\n\n def send(self, to=None, cc=None, bcc=None, fail_silently=False):\n return send_messages(\n self.get_built_messages(to, cc=cc, bcc=bcc), fail_silently=fail_silently\n )\n\n def send_async(self, to=None, cc=None, bcc=None):\n from sentry.tasks.email import send_email\n\n fmt = options.get(\"system.logging-format\")\n messages = self.get_built_messages(to, cc=cc, bcc=bcc)\n extra = {\"message_type\": self.type}\n loggable = [v for k, v in six.iteritems(self.context) if hasattr(v, \"id\")]\n for context in loggable:\n extra[\"%s_id\" % type(context).__name__.lower()] = context.id\n\n log_mail_queued = partial(logger.info, \"mail.queued\", extra=extra)\n for message in messages:\n safe_execute(send_email.delay, message=message, _with_transaction=False)\n extra[\"message_id\"] = message.extra_headers[\"Message-Id\"]\n metrics.incr(\"email.queued\", instance=self.type, skip_internal=False)\n if fmt == LoggingFormat.HUMAN:\n extra[\"message_to\"] = (self.format_to(message.to),)\n log_mail_queued()\n elif fmt == LoggingFormat.MACHINE:\n for recipient in message.to:\n extra[\"message_to\"] = recipient\n log_mail_queued()\n\n\ndef send_messages(messages, fail_silently=False):\n connection = get_connection(fail_silently=fail_silently)\n sent = connection.send_messages(messages)\n metrics.incr(\"email.sent\", len(messages), skip_internal=False)\n for message in messages:\n extra = {\n \"message_id\": message.extra_headers[\"Message-Id\"],\n \"size\": len(message.message().as_bytes()),\n }\n logger.info(\"mail.sent\", extra=extra)\n return sent\n\n\ndef get_mail_backend():\n backend = options.get(\"mail.backend\")\n try:\n return settings.SENTRY_EMAIL_BACKEND_ALIASES[backend]\n except KeyError:\n return backend\n\n\ndef get_connection(fail_silently=False):\n \"\"\"\n Gets an SMTP connection using our OptionsStore\n \"\"\"\n return mail.get_connection(\n backend=get_mail_backend(),\n host=options.get(\"mail.host\"),\n port=options.get(\"mail.port\"),\n username=options.get(\"mail.username\"),\n password=options.get(\"mail.password\"),\n use_tls=options.get(\"mail.use-tls\"),\n timeout=options.get(\"mail.timeout\"),\n fail_silently=fail_silently,\n )\n\n\ndef send_mail(subject, message, from_email, recipient_list, fail_silently=False, **kwargs):\n \"\"\"\n Wrapper that forces sending mail through our connection.\n Uses EmailMessage class which has more options than the simple send_mail\n \"\"\"\n email = mail.EmailMessage(\n subject,\n message,\n from_email,\n recipient_list,\n connection=get_connection(fail_silently=fail_silently),\n **kwargs\n )\n return email.send(fail_silently=fail_silently)\n\n\ndef is_smtp_enabled(backend=None):\n \"\"\"\n Check if the current backend is SMTP based.\n \"\"\"\n if backend is None:\n backend = get_mail_backend()\n return backend not in settings.SENTRY_SMTP_DISABLED_BACKENDS\n\n\nclass PreviewBackend(BaseEmailBackend):\n \"\"\"\n Email backend that can be used in local development to open messages in the\n local mail client as they are sent.\n\n Probably only works on OS X.\n \"\"\"\n\n def send_messages(self, email_messages):\n for message in email_messages:\n content = six.binary_type(message.message())\n preview = tempfile.NamedTemporaryFile(\n delete=False, prefix=\"sentry-email-preview-\", suffix=\".eml\"\n )\n try:\n preview.write(content)\n preview.flush()\n finally:\n preview.close()\n\n subprocess.check_call((\"open\", preview.name))\n\n return len(email_messages)\n", "path": "src/sentry/utils/email.py" } ]
diff --git a/src/sentry/utils/email.py b/src/sentry/utils/email.py index ddd30fc7a89cdf..42342439a51685 100644 --- a/src/sentry/utils/email.py +++ b/src/sentry/utils/email.py @@ -228,7 +228,7 @@ def __call__(self, instance): label = ".".join(map(six.text_type, handler(instance))) assert is_valid_dot_atom(label) - return u"{}.{}".format(label, self.__namespace) + return u"<{}.{}>".format(label, self.__namespace) default_list_type_handlers = { diff --git a/tests/sentry/utils/email/tests.py b/tests/sentry/utils/email/tests.py index 332da9777f10b0..c25902da8f61ab 100644 --- a/tests/sentry/utils/email/tests.py +++ b/tests/sentry/utils/email/tests.py @@ -32,7 +32,7 @@ def test_rejects_invalid_types(self): self.resolver(object()) def test_generates_list_ids(self): - expected = u"{0.project.slug}.{0.organization.slug}.namespace".format(self.event) + expected = u"<{0.project.slug}.{0.organization.slug}.namespace>".format(self.event) assert self.resolver(self.event.group) == expected assert self.resolver(self.event.project) == expected @@ -297,7 +297,7 @@ def test_generates_list_ids_for_registered_types(self): MessageBuilder, subject="Test", body="hello world", html_body="<b>hello world</b>" ) - expected = u"{event.project.slug}.{event.organization.slug}.{namespace}".format( + expected = u"<{event.project.slug}.{event.organization.slug}.{namespace}>".format( event=self.event, namespace=options.get("mail.list-namespace") )
ibis-project__ibis-2368
BigQuery Covariance operator compilation includes string representation of table instead of table ID **Failing test** https://github.com/ibis-project/ibis/blob/a70d443c7931cb8bb47c52f97999589566e03cb2/ibis/tests/all/test_aggregation.py#L165-L169 **Test output** ``` $ pytest ibis/tests/all/test_aggregation.py::test_reduction_ops[BigQuery-no_cond-covar] \ ibis/tests/all/test_aggregation.py::test_reduction_ops[BigQuery-is_in-covar] ``` Output: <details> ``` ======================================================= test session starts ======================================================= platform darwin -- Python 3.7.8, pytest-5.4.3, py-1.9.0, pluggy-0.13.1 rootdir: /Users/swast/src/ibis, inifile: setup.cfg plugins: forked-1.2.0, mock-3.1.1, cov-2.10.0, xdist-1.34.0 collected 2 items ibis/tests/all/test_aggregation.py FF [100%] ============================================================ FAILURES ============================================================= ___________________________________________ test_reduction_ops[BigQuery-no_cond-covar] ____________________________________________ backend = <ibis.tests.backends.BigQuery object at 0x7fc25b7935d0> alltypes = BigQueryTable[table] name: swast-scratch.testing.functional_alltypes schema: index : int64 Unnamed_0 : int...4 date_string_col : string string_col : string timestamp_col : timestamp year : int64 month : int64 df = index Unnamed_0 id bool_col tinyint_col ... date_string_col string_col timestamp_col year m... True 6 ... 01/31/10 6 2010-01-31 05:06:13.650 2010 1 [7300 rows x 15 columns] result_fn = <function <lambda> at 0x7fc25b7c08c0>, expected_fn = <function <lambda> at 0x7fc25b7c0950> ibis_cond = <function <lambda> at 0x7fc25b7c0e60>, pandas_cond = <function <lambda> at 0x7fc25b7c0ef0> @pytest.mark.parametrize( ('result_fn', 'expected_fn'), [ param( lambda t, where: t.bool_col.count(where=where), lambda t, where: len(t.bool_col[where].dropna()), id='count', ), param( lambda t, where: t.bool_col.any(), lambda t, where: t.bool_col.any(), id='any', ), param( lambda t, where: t.bool_col.notany(), lambda t, where: ~t.bool_col.any(), id='notany', ), param( lambda t, where: -t.bool_col.any(), lambda t, where: ~t.bool_col.any(), id='any_negate', ), param( lambda t, where: t.bool_col.all(), lambda t, where: t.bool_col.all(), id='all', ), param( lambda t, where: t.bool_col.notall(), lambda t, where: ~t.bool_col.all(), id='notall', ), param( lambda t, where: -t.bool_col.all(), lambda t, where: ~t.bool_col.all(), id='all_negate', ), param( lambda t, where: t.double_col.sum(), lambda t, where: t.double_col.sum(), id='sum', ), param( lambda t, where: t.double_col.mean(), lambda t, where: t.double_col.mean(), id='mean', ), param( lambda t, where: t.double_col.min(), lambda t, where: t.double_col.min(), id='min', ), param( lambda t, where: t.double_col.max(), lambda t, where: t.double_col.max(), id='max', ), param( lambda t, where: t.double_col.approx_median(), lambda t, where: t.double_col.median(), id='approx_median', marks=pytest.mark.xpass_backends([Clickhouse]), ), param( lambda t, where: t.double_col.std(how='sample'), lambda t, where: t.double_col.std(ddof=1), id='std', ), param( lambda t, where: t.double_col.var(how='sample'), lambda t, where: t.double_col.var(ddof=1), id='var', ), param( lambda t, where: t.double_col.std(how='pop'), lambda t, where: t.double_col.std(ddof=0), id='std_pop', ), param( lambda t, where: t.double_col.var(how='pop'), lambda t, where: t.double_col.var(ddof=0), id='var_pop', ), param( lambda t, where: t.double_col.cov(t.float_col), lambda t, where: t.double_col.cov(t.float_col), id='covar', ), param( lambda t, where: t.double_col.corr(t.float_col), lambda t, where: t.double_col.corr(t.float_col), id='corr', ), param( lambda t, where: t.string_col.approx_nunique(), lambda t, where: t.string_col.nunique(), id='approx_nunique', marks=pytest.mark.xfail_backends([MySQL, SQLite]), ), param( lambda t, where: t.double_col.arbitrary(how='first'), lambda t, where: t.double_col.iloc[0], id='arbitrary_first', ), param( lambda t, where: t.double_col.arbitrary(how='last'), lambda t, where: t.double_col.iloc[-1], id='arbitrary_last', ), ], ) @pytest.mark.parametrize( ('ibis_cond', 'pandas_cond'), [ param(lambda t: None, lambda t: slice(None), id='no_cond'), param( lambda t: t.string_col.isin(['1', '7']), lambda t: t.string_col.isin(['1', '7']), id='is_in', ), ], ) @pytest.mark.xfail_unsupported def test_reduction_ops( backend, alltypes, df, result_fn, expected_fn, ibis_cond, pandas_cond ): expr = result_fn(alltypes, ibis_cond(alltypes)) > result = expr.execute() ibis/tests/all/test_aggregation.py:209: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ibis/expr/types.py:219: in execute self, limit=limit, timecontext=timecontext, params=params, **kwargs ibis/client.py:368: in execute return backend.execute(expr, limit=limit, params=params, **kwargs) ibis/client.py:221: in execute result = self._execute_query(query, **kwargs) ibis/client.py:228: in _execute_query return query.execute() ibis/bigquery/client.py:194: in execute query_parameters=self.query_parameters, ibis/bigquery/client.py:475: in _execute query.result() # blocks until finished ../../miniconda3/envs/ibis-dev/lib/python3.7/site-packages/google/cloud/bigquery/job.py:3207: in result super(QueryJob, self).result(retry=retry, timeout=timeout) ../../miniconda3/envs/ibis-dev/lib/python3.7/site-packages/google/cloud/bigquery/job.py:812: in result return super(_AsyncJob, self).result(timeout=timeout) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <google.cloud.bigquery.job.QueryJob object at 0x7fc25b890c10>, timeout = None def result(self, timeout=None): """Get the result of the operation, blocking if necessary. Args: timeout (int): How long (in seconds) to wait for the operation to complete. If None, wait indefinitely. Returns: google.protobuf.Message: The Operation's result. Raises: google.api_core.GoogleAPICallError: If the operation errors or if the timeout is reached before the operation completes. """ self._blocking_poll(timeout=timeout) if self._exception is not None: # pylint: disable=raising-bad-type # Pylint doesn't recognize that this is valid in this case. > raise self._exception E google.api_core.exceptions.BadRequest: 400 Syntax error: Expected ")" but got identifier "BigQueryTable" at [3:3] E E (job ID: fabc9d5c-9c79-482e-9320-acffbd787de1) E E -----Query Job SQL Follows----- E E | . | . | . | . | . | E 1:SELECT E 2: COVAR_SAMP(ref_0 E 3: BigQueryTable[table] E 4: name: swast-scratch.testing.functional_alltypes E 5: schema: E 6: index : int64 E 7: Unnamed_0 : int64 E 8: id : int64 E 9: bool_col : boolean E 10: tinyint_col : int64 E 11: smallint_col : int64 E 12: int_col : int64 E 13: bigint_col : int64 E 14: float_col : float64 E 15: double_col : float64 E 16: date_string_col : string E 17: string_col : string E 18: timestamp_col : timestamp E 19: year : int64 E 20: month : int64 E 21: E 22: double_col = Column[float64*] 'double_col' from table E 23: ref_0, ref_0 E 24: BigQueryTable[table] E 25: name: swast-scratch.testing.functional_alltypes E 26: schema: E 27: index : int64 E 28: Unnamed_0 : int64 E 29: id : int64 E 30: bool_col : boolean E 31: tinyint_col : int64 E 32: smallint_col : int64 E 33: int_col : int64 E 34: bigint_col : int64 E 35: float_col : float64 E 36: double_col : float64 E 37: date_string_col : string E 38: string_col : string E 39: timestamp_col : timestamp E 40: year : int64 E 41: month : int64 E 42: E 43: float_col = Column[float64*] 'float_col' from table E 44: ref_0) AS `tmp` E 45:FROM `swast-scratch.testing.functional_alltypes` E | . | . | . | . | . | ../../miniconda3/envs/ibis-dev/lib/python3.7/site-packages/google/api_core/future/polling.py:130: BadRequest ____________________________________________ test_reduction_ops[BigQuery-is_in-covar] _____________________________________________ backend = <ibis.tests.backends.BigQuery object at 0x7fc25b7935d0> alltypes = BigQueryTable[table] name: swast-scratch.testing.functional_alltypes schema: index : int64 Unnamed_0 : int...4 date_string_col : string string_col : string timestamp_col : timestamp year : int64 month : int64 df = index Unnamed_0 id bool_col tinyint_col ... date_string_col string_col timestamp_col year m... True 6 ... 01/31/10 6 2010-01-31 05:06:13.650 2010 1 [7300 rows x 15 columns] result_fn = <function <lambda> at 0x7fc25b7c08c0>, expected_fn = <function <lambda> at 0x7fc25b7c0950> ibis_cond = <function <lambda> at 0x7fc25b7c0f80>, pandas_cond = <function <lambda> at 0x7fc25b7c3050> @pytest.mark.parametrize( ('result_fn', 'expected_fn'), [ param( lambda t, where: t.bool_col.count(where=where), lambda t, where: len(t.bool_col[where].dropna()), id='count', ), param( lambda t, where: t.bool_col.any(), lambda t, where: t.bool_col.any(), id='any', ), param( lambda t, where: t.bool_col.notany(), lambda t, where: ~t.bool_col.any(), id='notany', ), param( lambda t, where: -t.bool_col.any(), lambda t, where: ~t.bool_col.any(), id='any_negate', ), param( lambda t, where: t.bool_col.all(), lambda t, where: t.bool_col.all(), id='all', ), param( lambda t, where: t.bool_col.notall(), lambda t, where: ~t.bool_col.all(), id='notall', ), param( lambda t, where: -t.bool_col.all(), lambda t, where: ~t.bool_col.all(), id='all_negate', ), param( lambda t, where: t.double_col.sum(), lambda t, where: t.double_col.sum(), id='sum', ), param( lambda t, where: t.double_col.mean(), lambda t, where: t.double_col.mean(), id='mean', ), param( lambda t, where: t.double_col.min(), lambda t, where: t.double_col.min(), id='min', ), param( lambda t, where: t.double_col.max(), lambda t, where: t.double_col.max(), id='max', ), param( lambda t, where: t.double_col.approx_median(), lambda t, where: t.double_col.median(), id='approx_median', marks=pytest.mark.xpass_backends([Clickhouse]), ), param( lambda t, where: t.double_col.std(how='sample'), lambda t, where: t.double_col.std(ddof=1), id='std', ), param( lambda t, where: t.double_col.var(how='sample'), lambda t, where: t.double_col.var(ddof=1), id='var', ), param( lambda t, where: t.double_col.std(how='pop'), lambda t, where: t.double_col.std(ddof=0), id='std_pop', ), param( lambda t, where: t.double_col.var(how='pop'), lambda t, where: t.double_col.var(ddof=0), id='var_pop', ), param( lambda t, where: t.double_col.cov(t.float_col), lambda t, where: t.double_col.cov(t.float_col), id='covar', ), param( lambda t, where: t.double_col.corr(t.float_col), lambda t, where: t.double_col.corr(t.float_col), id='corr', ), param( lambda t, where: t.string_col.approx_nunique(), lambda t, where: t.string_col.nunique(), id='approx_nunique', marks=pytest.mark.xfail_backends([MySQL, SQLite]), ), param( lambda t, where: t.double_col.arbitrary(how='first'), lambda t, where: t.double_col.iloc[0], id='arbitrary_first', ), param( lambda t, where: t.double_col.arbitrary(how='last'), lambda t, where: t.double_col.iloc[-1], id='arbitrary_last', ), ], ) @pytest.mark.parametrize( ('ibis_cond', 'pandas_cond'), [ param(lambda t: None, lambda t: slice(None), id='no_cond'), param( lambda t: t.string_col.isin(['1', '7']), lambda t: t.string_col.isin(['1', '7']), id='is_in', ), ], ) @pytest.mark.xfail_unsupported def test_reduction_ops( backend, alltypes, df, result_fn, expected_fn, ibis_cond, pandas_cond ): expr = result_fn(alltypes, ibis_cond(alltypes)) > result = expr.execute() ibis/tests/all/test_aggregation.py:209: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ibis/expr/types.py:219: in execute self, limit=limit, timecontext=timecontext, params=params, **kwargs ibis/client.py:368: in execute return backend.execute(expr, limit=limit, params=params, **kwargs) ibis/client.py:221: in execute result = self._execute_query(query, **kwargs) ibis/client.py:228: in _execute_query return query.execute() ibis/bigquery/client.py:194: in execute query_parameters=self.query_parameters, ibis/bigquery/client.py:475: in _execute query.result() # blocks until finished ../../miniconda3/envs/ibis-dev/lib/python3.7/site-packages/google/cloud/bigquery/job.py:3207: in result super(QueryJob, self).result(retry=retry, timeout=timeout) ../../miniconda3/envs/ibis-dev/lib/python3.7/site-packages/google/cloud/bigquery/job.py:812: in result return super(_AsyncJob, self).result(timeout=timeout) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <google.cloud.bigquery.job.QueryJob object at 0x7fc25c1aa7d0>, timeout = None def result(self, timeout=None): """Get the result of the operation, blocking if necessary. Args: timeout (int): How long (in seconds) to wait for the operation to complete. If None, wait indefinitely. Returns: google.protobuf.Message: The Operation's result. Raises: google.api_core.GoogleAPICallError: If the operation errors or if the timeout is reached before the operation completes. """ self._blocking_poll(timeout=timeout) if self._exception is not None: # pylint: disable=raising-bad-type # Pylint doesn't recognize that this is valid in this case. > raise self._exception E google.api_core.exceptions.BadRequest: 400 Syntax error: Expected ")" but got identifier "BigQueryTable" at [3:3] E E (job ID: 23caa920-3cf1-4d62-9523-e81e1d58e9a9) E E -----Query Job SQL Follows----- E E | . | . | . | . | . | E 1:SELECT E 2: COVAR_SAMP(ref_0 E 3: BigQueryTable[table] E 4: name: swast-scratch.testing.functional_alltypes E 5: schema: E 6: index : int64 E 7: Unnamed_0 : int64 E 8: id : int64 E 9: bool_col : boolean E 10: tinyint_col : int64 E 11: smallint_col : int64 E 12: int_col : int64 E 13: bigint_col : int64 E 14: float_col : float64 E 15: double_col : float64 E 16: date_string_col : string E 17: string_col : string E 18: timestamp_col : timestamp E 19: year : int64 E 20: month : int64 E 21: E 22: double_col = Column[float64*] 'double_col' from table E 23: ref_0, ref_0 E 24: BigQueryTable[table] E 25: name: swast-scratch.testing.functional_alltypes E 26: schema: E 27: index : int64 E 28: Unnamed_0 : int64 E 29: id : int64 E 30: bool_col : boolean E 31: tinyint_col : int64 E 32: smallint_col : int64 E 33: int_col : int64 E 34: bigint_col : int64 E 35: float_col : float64 E 36: double_col : float64 E 37: date_string_col : string E 38: string_col : string E 39: timestamp_col : timestamp E 40: year : int64 E 41: month : int64 E 42: E 43: float_col = Column[float64*] 'float_col' from table E 44: ref_0) AS `tmp` E 45:FROM `swast-scratch.testing.functional_alltypes` E | . | . | . | . | . | ../../miniconda3/envs/ibis-dev/lib/python3.7/site-packages/google/api_core/future/polling.py:130: BadRequest ======================================================== warnings summary ========================================================= ibis/tests/all/test_aggregation.py::test_reduction_ops[BigQuery-no_cond-covar] /Users/swast/src/ibis/ibis/bigquery/client.py:545: PendingDeprecationWarning: Client.dataset is deprecated and will be removed in a future version. Use a string like 'my_project.my_dataset' or a cloud.google.bigquery.DatasetReference object, instead. table_ref = self.client.dataset(dataset, project=project).table(name) ibis/tests/all/test_aggregation.py::test_reduction_ops[BigQuery-no_cond-covar] /Users/swast/src/ibis/ibis/bigquery/client.py:432: PendingDeprecationWarning: Client.dataset is deprecated and will be removed in a future version. Use a string like 'my_project.my_dataset' or a cloud.google.bigquery.DatasetReference object, instead. dataset_ref = self.client.dataset(dataset, project=project) -- Docs: https://docs.pytest.org/en/latest/warnings.html ===================================================== short test summary info ===================================================== FAILED ibis/tests/all/test_aggregation.py::test_reduction_ops[BigQuery-no_cond-covar] - google.api_core.exceptions.BadRequest: 4... FAILED ibis/tests/all/test_aggregation.py::test_reduction_ops[BigQuery-is_in-covar] - google.api_core.exceptions.BadRequest: 400... ================================================== 2 failed, 2 warnings in 4.17s ================================================== ``` </details> **Thoughts on fix** I believe this is the relevant source: https://github.com/ibis-project/ibis/blob/4f110e1fcfb36e5763a1ed44b1e1ceaa5b5e30b2/ibis/bigquery/compiler.py#L556-L576 Perhaps it is just missing some calls to `translator.translate`?
[ { "content": "import datetime\nfrom functools import partial\n\nimport numpy as np\nimport regex as re\nimport toolz\nfrom multipledispatch import Dispatcher\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.lineage as lin\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.sql.compiler as comp\nfrom ibis.bigquery.datatypes import ibis_type_to_bigquery_type\nfrom ibis.impala import compiler as impala_compiler\nfrom ibis.impala.compiler import (\n ImpalaSelect,\n ImpalaTableSetFormatter,\n _reduction,\n fixed_arity,\n unary,\n)\n\n\nclass BigQueryUDFNode(ops.ValueOp):\n pass\n\n\nclass BigQuerySelectBuilder(comp.SelectBuilder):\n @property\n def _select_class(self):\n return BigQuerySelect\n\n\nclass BigQueryUDFDefinition(comp.DDL):\n def __init__(self, expr, context):\n self.expr = expr\n self.context = context\n\n def compile(self):\n return self.expr.op().js\n\n\nclass BigQueryUnion(comp.Union):\n @staticmethod\n def keyword(distinct):\n return 'UNION DISTINCT' if distinct else 'UNION ALL'\n\n\ndef find_bigquery_udf(expr):\n if isinstance(expr.op(), BigQueryUDFNode):\n result = expr\n else:\n result = None\n return lin.proceed, result\n\n\nclass BigQueryQueryBuilder(comp.QueryBuilder):\n\n select_builder = BigQuerySelectBuilder\n union_class = BigQueryUnion\n\n def generate_setup_queries(self):\n queries = map(\n partial(BigQueryUDFDefinition, context=self.context),\n lin.traverse(find_bigquery_udf, self.expr),\n )\n\n # UDFs are uniquely identified by the name of the Node subclass we\n # generate.\n return list(\n toolz.unique(queries, key=lambda x: type(x.expr.op()).__name__)\n )\n\n\ndef build_ast(expr, context):\n builder = BigQueryQueryBuilder(expr, context=context)\n return builder.get_result()\n\n\ndef to_sql(expr, context):\n query_ast = build_ast(expr, context)\n compiled = query_ast.compile()\n return compiled\n\n\nclass BigQueryContext(comp.QueryContext):\n def _to_sql(self, expr, ctx):\n return to_sql(expr, context=ctx)\n\n\ndef _extract_field(sql_attr):\n def extract_field_formatter(translator, expr):\n op = expr.op()\n arg = translator.translate(op.args[0])\n if sql_attr == 'epochseconds':\n return f'UNIX_SECONDS({arg})'\n else:\n return f'EXTRACT({sql_attr} from {arg})'\n\n return extract_field_formatter\n\n\nbigquery_cast = Dispatcher('bigquery_cast')\n\n\n@bigquery_cast.register(str, dt.Timestamp, dt.Integer)\ndef bigquery_cast_timestamp_to_integer(compiled_arg, from_, to):\n return 'UNIX_MICROS({})'.format(compiled_arg)\n\n\n@bigquery_cast.register(str, dt.DataType, dt.DataType)\ndef bigquery_cast_generate(compiled_arg, from_, to):\n sql_type = ibis_type_to_bigquery_type(to)\n return 'CAST({} AS {})'.format(compiled_arg, sql_type)\n\n\ndef _cast(translator, expr):\n op = expr.op()\n arg, target_type = op.args\n arg_formatted = translator.translate(arg)\n return bigquery_cast(arg_formatted, arg.type(), target_type)\n\n\ndef _struct_field(translator, expr):\n arg, field = expr.op().args\n arg_formatted = translator.translate(arg)\n return '{}.`{}`'.format(arg_formatted, field)\n\n\ndef _array_concat(translator, expr):\n return 'ARRAY_CONCAT({})'.format(\n ', '.join(map(translator.translate, expr.op().args))\n )\n\n\ndef _array_index(translator, expr):\n # SAFE_OFFSET returns NULL if out of bounds\n return '{}[SAFE_OFFSET({})]'.format(\n *map(translator.translate, expr.op().args)\n )\n\n\ndef _string_find(translator, expr):\n haystack, needle, start, end = expr.op().args\n\n if start is not None:\n raise NotImplementedError('start not implemented for string find')\n if end is not None:\n raise NotImplementedError('end not implemented for string find')\n\n return 'STRPOS({}, {}) - 1'.format(\n translator.translate(haystack), translator.translate(needle)\n )\n\n\ndef _translate_pattern(translator, pattern):\n # add 'r' to string literals to indicate to BigQuery this is a raw string\n return 'r' * isinstance(pattern.op(), ops.Literal) + translator.translate(\n pattern\n )\n\n\ndef _regex_search(translator, expr):\n arg, pattern = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = 'REGEXP_CONTAINS({}, {})'.format(translator.translate(arg), regex)\n return result\n\n\ndef _regex_extract(translator, expr):\n arg, pattern, index = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = 'REGEXP_EXTRACT_ALL({}, {})[SAFE_OFFSET({})]'.format(\n translator.translate(arg), regex, translator.translate(index)\n )\n return result\n\n\ndef _regex_replace(translator, expr):\n arg, pattern, replacement = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = 'REGEXP_REPLACE({}, {}, {})'.format(\n translator.translate(arg), regex, translator.translate(replacement)\n )\n return result\n\n\ndef _string_concat(translator, expr):\n return 'CONCAT({})'.format(\n ', '.join(map(translator.translate, expr.op().arg))\n )\n\n\ndef _string_join(translator, expr):\n sep, args = expr.op().args\n return 'ARRAY_TO_STRING([{}], {})'.format(\n ', '.join(map(translator.translate, args)), translator.translate(sep)\n )\n\n\ndef _string_ascii(translator, expr):\n (arg,) = expr.op().args\n return 'TO_CODE_POINTS({})[SAFE_OFFSET(0)]'.format(\n translator.translate(arg)\n )\n\n\ndef _string_right(translator, expr):\n arg, nchars = map(translator.translate, expr.op().args)\n return 'SUBSTR({arg}, -LEAST(LENGTH({arg}), {nchars}))'.format(\n arg=arg, nchars=nchars\n )\n\n\ndef _array_literal_format(expr):\n return str(list(expr.op().value))\n\n\ndef _log(translator, expr):\n op = expr.op()\n arg, base = op.args\n arg_formatted = translator.translate(arg)\n\n if base is None:\n return 'ln({})'.format(arg_formatted)\n\n base_formatted = translator.translate(base)\n return 'log({}, {})'.format(arg_formatted, base_formatted)\n\n\ndef _literal(translator, expr):\n\n if isinstance(expr, ir.NumericValue):\n value = expr.op().value\n if not np.isfinite(value):\n return 'CAST({!r} AS FLOAT64)'.format(str(value))\n\n # special case literal timestamp, date, and time scalars\n if isinstance(expr.op(), ops.Literal):\n value = expr.op().value\n if isinstance(expr, ir.DateScalar):\n if isinstance(value, datetime.datetime):\n raw_value = value.date()\n else:\n raw_value = value\n return \"DATE '{}'\".format(raw_value)\n elif isinstance(expr, ir.TimestampScalar):\n return \"TIMESTAMP '{}'\".format(value)\n elif isinstance(expr, ir.TimeScalar):\n # TODO: define extractors on TimeValue expressions\n return \"TIME '{}'\".format(value)\n\n try:\n return impala_compiler._literal(translator, expr)\n except NotImplementedError:\n if isinstance(expr, ir.ArrayValue):\n return _array_literal_format(expr)\n raise NotImplementedError(type(expr).__name__)\n\n\ndef _arbitrary(translator, expr):\n arg, how, where = expr.op().args\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n if how not in (None, 'first'):\n raise com.UnsupportedOperationError(\n '{!r} value not supported for arbitrary in BigQuery'.format(how)\n )\n\n return 'ANY_VALUE({})'.format(translator.translate(arg))\n\n\n_date_units = {\n 'Y': 'YEAR',\n 'Q': 'QUARTER',\n 'W': 'WEEK',\n 'M': 'MONTH',\n 'D': 'DAY',\n}\n\n\n_timestamp_units = {\n 'us': 'MICROSECOND',\n 'ms': 'MILLISECOND',\n 's': 'SECOND',\n 'm': 'MINUTE',\n 'h': 'HOUR',\n}\n_time_units = _timestamp_units.copy()\n_timestamp_units.update(_date_units)\n\n\ndef _truncate(kind, units):\n def truncator(translator, expr):\n arg, unit = expr.op().args\n trans_arg = translator.translate(arg)\n valid_unit = units.get(unit)\n if valid_unit is None:\n raise com.UnsupportedOperationError(\n 'BigQuery does not support truncating {} values to unit '\n '{!r}'.format(arg.type(), unit)\n )\n return '{}_TRUNC({}, {})'.format(kind, trans_arg, valid_unit)\n\n return truncator\n\n\ndef _timestamp_op(func, units):\n def _formatter(translator, expr):\n op = expr.op()\n arg, offset = op.args\n\n unit = offset.type().unit\n if unit not in units:\n raise com.UnsupportedOperationError(\n 'BigQuery does not allow binary operation '\n '{} with INTERVAL offset {}'.format(func, unit)\n )\n formatted_arg = translator.translate(arg)\n formatted_offset = translator.translate(offset)\n result = '{}({}, {})'.format(func, formatted_arg, formatted_offset)\n return result\n\n return _formatter\n\n\nSTRFTIME_FORMAT_FUNCTIONS = {\n dt.Date: 'DATE',\n dt.Time: 'TIME',\n dt.Timestamp: 'TIMESTAMP',\n}\n\n\n_operation_registry = impala_compiler._operation_registry.copy()\n_operation_registry.update(\n {\n ops.ExtractYear: _extract_field('year'),\n ops.ExtractQuarter: _extract_field('quarter'),\n ops.ExtractMonth: _extract_field('month'),\n ops.ExtractDay: _extract_field('day'),\n ops.ExtractHour: _extract_field('hour'),\n ops.ExtractMinute: _extract_field('minute'),\n ops.ExtractSecond: _extract_field('second'),\n ops.ExtractMillisecond: _extract_field('millisecond'),\n ops.ExtractEpochSeconds: _extract_field('epochseconds'),\n ops.StringReplace: fixed_arity('REPLACE', 3),\n ops.StringSplit: fixed_arity('SPLIT', 2),\n ops.StringConcat: _string_concat,\n ops.StringJoin: _string_join,\n ops.StringAscii: _string_ascii,\n ops.StringFind: _string_find,\n ops.StrRight: _string_right,\n ops.Repeat: fixed_arity('REPEAT', 2),\n ops.RegexSearch: _regex_search,\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: _regex_replace,\n ops.GroupConcat: _reduction('STRING_AGG'),\n ops.IfNull: fixed_arity('IFNULL', 2),\n ops.Cast: _cast,\n ops.StructField: _struct_field,\n ops.ArrayCollect: unary('ARRAY_AGG'),\n ops.ArrayConcat: _array_concat,\n ops.ArrayIndex: _array_index,\n ops.ArrayLength: unary('ARRAY_LENGTH'),\n ops.HLLCardinality: _reduction('APPROX_COUNT_DISTINCT'),\n ops.Log: _log,\n ops.Sign: unary('SIGN'),\n ops.Modulus: fixed_arity('MOD', 2),\n ops.Date: unary('DATE'),\n # BigQuery doesn't have these operations built in.\n # ops.ArrayRepeat: _array_repeat,\n # ops.ArraySlice: _array_slice,\n ops.Literal: _literal,\n ops.Arbitrary: _arbitrary,\n ops.TimestampTruncate: _truncate('TIMESTAMP', _timestamp_units),\n ops.DateTruncate: _truncate('DATE', _date_units),\n ops.TimeTruncate: _truncate('TIME', _timestamp_units),\n ops.Time: unary('TIME'),\n ops.TimestampAdd: _timestamp_op(\n 'TIMESTAMP_ADD', {'h', 'm', 's', 'ms', 'us'}\n ),\n ops.TimestampSub: _timestamp_op(\n 'TIMESTAMP_DIFF', {'h', 'm', 's', 'ms', 'us'}\n ),\n ops.DateAdd: _timestamp_op('DATE_ADD', {'D', 'W', 'M', 'Q', 'Y'}),\n ops.DateSub: _timestamp_op('DATE_SUB', {'D', 'W', 'M', 'Q', 'Y'}),\n ops.TimestampNow: fixed_arity('CURRENT_TIMESTAMP', 0),\n }\n)\n\n_invalid_operations = {\n ops.Translate,\n ops.FindInSet,\n ops.Capitalize,\n ops.DateDiff,\n ops.TimestampDiff,\n}\n\n_operation_registry = {\n k: v\n for k, v in _operation_registry.items()\n if k not in _invalid_operations\n}\n\n\nclass BigQueryExprTranslator(impala_compiler.ImpalaExprTranslator):\n _registry = _operation_registry\n _rewrites = impala_compiler.ImpalaExprTranslator._rewrites.copy()\n\n context_class = BigQueryContext\n\n def _trans_param(self, expr):\n op = expr.op()\n if op not in self.context.params:\n raise KeyError(op)\n return '@{}'.format(expr.get_name())\n\n\ncompiles = BigQueryExprTranslator.compiles\nrewrites = BigQueryExprTranslator.rewrites\n\n\n@compiles(ops.DayOfWeekIndex)\ndef bigquery_day_of_week_index(t, e):\n arg = e.op().args[0]\n arg_formatted = t.translate(arg)\n return 'MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7)'.format(arg_formatted)\n\n\n@rewrites(ops.DayOfWeekName)\ndef bigquery_day_of_week_name(e):\n arg = e.op().args[0]\n return arg.strftime('%A')\n\n\n@compiles(ops.Divide)\ndef bigquery_compiles_divide(t, e):\n return 'IEEE_DIVIDE({}, {})'.format(*map(t.translate, e.op().args))\n\n\n@compiles(ops.Strftime)\ndef compiles_strftime(translator, expr):\n arg, format_string = expr.op().args\n arg_type = arg.type()\n strftime_format_func_name = STRFTIME_FORMAT_FUNCTIONS[type(arg_type)]\n fmt_string = translator.translate(format_string)\n arg_formatted = translator.translate(arg)\n if isinstance(arg_type, dt.Timestamp):\n return 'FORMAT_{}({}, {}, {!r})'.format(\n strftime_format_func_name,\n fmt_string,\n arg_formatted,\n arg_type.timezone if arg_type.timezone is not None else 'UTC',\n )\n return 'FORMAT_{}({}, {})'.format(\n strftime_format_func_name, fmt_string, arg_formatted\n )\n\n\n@compiles(ops.StringToTimestamp)\ndef compiles_string_to_timestamp(translator, expr):\n arg, format_string, timezone_arg = expr.op().args\n fmt_string = translator.translate(format_string)\n arg_formatted = translator.translate(arg)\n if timezone_arg is not None:\n timezone_str = translator.translate(timezone_arg)\n return 'PARSE_TIMESTAMP({}, {}, {})'.format(\n fmt_string, arg_formatted, timezone_str\n )\n return 'PARSE_TIMESTAMP({}, {})'.format(fmt_string, arg_formatted)\n\n\nclass BigQueryTableSetFormatter(ImpalaTableSetFormatter):\n def _quote_identifier(self, name):\n if re.match(r'^[A-Za-z][A-Za-z_0-9]*$', name):\n return name\n return '`{}`'.format(name)\n\n\nclass BigQuerySelect(ImpalaSelect):\n\n translator = BigQueryExprTranslator\n\n @property\n def table_set_formatter(self):\n return BigQueryTableSetFormatter\n\n\n@rewrites(ops.IdenticalTo)\ndef identical_to(expr):\n left, right = expr.op().args\n return (left.isnull() & right.isnull()) | (left == right)\n\n\n@rewrites(ops.Log2)\ndef log2(expr):\n (arg,) = expr.op().args\n return arg.log(2)\n\n\n@rewrites(ops.Sum)\ndef bq_sum(expr):\n arg = expr.op().args[0]\n where = expr.op().args[1]\n if isinstance(arg, ir.BooleanColumn):\n return arg.cast('int64').sum(where=where)\n else:\n return expr\n\n\n@rewrites(ops.Mean)\ndef bq_mean(expr):\n arg = expr.op().args[0]\n where = expr.op().args[1]\n if isinstance(arg, ir.BooleanColumn):\n return arg.cast('int64').mean(where=where)\n else:\n return expr\n\n\nUNIT_FUNCS = {'s': 'SECONDS', 'ms': 'MILLIS', 'us': 'MICROS'}\n\n\n@compiles(ops.TimestampFromUNIX)\ndef compiles_timestamp_from_unix(t, e):\n value, unit = e.op().args\n return 'TIMESTAMP_{}({})'.format(UNIT_FUNCS[unit], t.translate(value))\n\n\n@compiles(ops.Floor)\ndef compiles_floor(t, e):\n bigquery_type = ibis_type_to_bigquery_type(e.type())\n arg = e.op().arg\n return 'CAST(FLOOR({}) AS {})'.format(t.translate(arg), bigquery_type)\n\n\n@compiles(ops.CMSMedian)\ndef compiles_approx(translator, expr):\n expr = expr.op()\n arg = expr.arg\n where = expr.where\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return 'APPROX_QUANTILES({}, 2)[OFFSET(1)]'.format(\n translator.translate(arg)\n )\n\n\n@compiles(ops.Covariance)\ndef compiles_covar(translator, expr):\n expr = expr.op()\n left = expr.left\n right = expr.right\n where = expr.where\n\n if expr.how == 'sample':\n how = 'SAMP'\n elif expr.how == 'pop':\n how = 'POP'\n else:\n raise ValueError(\n \"Covariance with how={!r} is not supported.\".format(how)\n )\n\n if where is not None:\n left = where.ifelse(left, ibis.NA)\n right = where.ifelse(right, ibis.NA)\n\n return \"COVAR_{}({}, {})\".format(how, left, right)\n\n\n@rewrites(ops.Any)\n@rewrites(ops.All)\n@rewrites(ops.NotAny)\n@rewrites(ops.NotAll)\ndef bigquery_any_all_no_op(expr):\n return expr\n\n\n@compiles(ops.Any)\ndef bigquery_compile_any(translator, expr):\n return \"LOGICAL_OR({})\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.NotAny)\ndef bigquery_compile_notany(translator, expr):\n return \"LOGICAL_AND(NOT ({}))\".format(\n *map(translator.translate, expr.op().args)\n )\n\n\n@compiles(ops.All)\ndef bigquery_compile_all(translator, expr):\n return \"LOGICAL_AND({})\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.NotAll)\ndef bigquery_compile_notall(translator, expr):\n return \"LOGICAL_OR(NOT ({}))\".format(\n *map(translator.translate, expr.op().args)\n )\n\n\nclass BigQueryDialect(impala_compiler.ImpalaDialect):\n translator = BigQueryExprTranslator\n\n\ndialect = BigQueryDialect\n", "path": "ibis/bigquery/compiler.py" } ]
[ { "content": "import datetime\nfrom functools import partial\n\nimport numpy as np\nimport regex as re\nimport toolz\nfrom multipledispatch import Dispatcher\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.lineage as lin\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.sql.compiler as comp\nfrom ibis.bigquery.datatypes import ibis_type_to_bigquery_type\nfrom ibis.impala import compiler as impala_compiler\nfrom ibis.impala.compiler import (\n ImpalaSelect,\n ImpalaTableSetFormatter,\n _reduction,\n fixed_arity,\n unary,\n)\n\n\nclass BigQueryUDFNode(ops.ValueOp):\n pass\n\n\nclass BigQuerySelectBuilder(comp.SelectBuilder):\n @property\n def _select_class(self):\n return BigQuerySelect\n\n\nclass BigQueryUDFDefinition(comp.DDL):\n def __init__(self, expr, context):\n self.expr = expr\n self.context = context\n\n def compile(self):\n return self.expr.op().js\n\n\nclass BigQueryUnion(comp.Union):\n @staticmethod\n def keyword(distinct):\n return 'UNION DISTINCT' if distinct else 'UNION ALL'\n\n\ndef find_bigquery_udf(expr):\n if isinstance(expr.op(), BigQueryUDFNode):\n result = expr\n else:\n result = None\n return lin.proceed, result\n\n\nclass BigQueryQueryBuilder(comp.QueryBuilder):\n\n select_builder = BigQuerySelectBuilder\n union_class = BigQueryUnion\n\n def generate_setup_queries(self):\n queries = map(\n partial(BigQueryUDFDefinition, context=self.context),\n lin.traverse(find_bigquery_udf, self.expr),\n )\n\n # UDFs are uniquely identified by the name of the Node subclass we\n # generate.\n return list(\n toolz.unique(queries, key=lambda x: type(x.expr.op()).__name__)\n )\n\n\ndef build_ast(expr, context):\n builder = BigQueryQueryBuilder(expr, context=context)\n return builder.get_result()\n\n\ndef to_sql(expr, context):\n query_ast = build_ast(expr, context)\n compiled = query_ast.compile()\n return compiled\n\n\nclass BigQueryContext(comp.QueryContext):\n def _to_sql(self, expr, ctx):\n return to_sql(expr, context=ctx)\n\n\ndef _extract_field(sql_attr):\n def extract_field_formatter(translator, expr):\n op = expr.op()\n arg = translator.translate(op.args[0])\n if sql_attr == 'epochseconds':\n return f'UNIX_SECONDS({arg})'\n else:\n return f'EXTRACT({sql_attr} from {arg})'\n\n return extract_field_formatter\n\n\nbigquery_cast = Dispatcher('bigquery_cast')\n\n\n@bigquery_cast.register(str, dt.Timestamp, dt.Integer)\ndef bigquery_cast_timestamp_to_integer(compiled_arg, from_, to):\n return 'UNIX_MICROS({})'.format(compiled_arg)\n\n\n@bigquery_cast.register(str, dt.DataType, dt.DataType)\ndef bigquery_cast_generate(compiled_arg, from_, to):\n sql_type = ibis_type_to_bigquery_type(to)\n return 'CAST({} AS {})'.format(compiled_arg, sql_type)\n\n\ndef _cast(translator, expr):\n op = expr.op()\n arg, target_type = op.args\n arg_formatted = translator.translate(arg)\n return bigquery_cast(arg_formatted, arg.type(), target_type)\n\n\ndef _struct_field(translator, expr):\n arg, field = expr.op().args\n arg_formatted = translator.translate(arg)\n return '{}.`{}`'.format(arg_formatted, field)\n\n\ndef _array_concat(translator, expr):\n return 'ARRAY_CONCAT({})'.format(\n ', '.join(map(translator.translate, expr.op().args))\n )\n\n\ndef _array_index(translator, expr):\n # SAFE_OFFSET returns NULL if out of bounds\n return '{}[SAFE_OFFSET({})]'.format(\n *map(translator.translate, expr.op().args)\n )\n\n\ndef _string_find(translator, expr):\n haystack, needle, start, end = expr.op().args\n\n if start is not None:\n raise NotImplementedError('start not implemented for string find')\n if end is not None:\n raise NotImplementedError('end not implemented for string find')\n\n return 'STRPOS({}, {}) - 1'.format(\n translator.translate(haystack), translator.translate(needle)\n )\n\n\ndef _translate_pattern(translator, pattern):\n # add 'r' to string literals to indicate to BigQuery this is a raw string\n return 'r' * isinstance(pattern.op(), ops.Literal) + translator.translate(\n pattern\n )\n\n\ndef _regex_search(translator, expr):\n arg, pattern = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = 'REGEXP_CONTAINS({}, {})'.format(translator.translate(arg), regex)\n return result\n\n\ndef _regex_extract(translator, expr):\n arg, pattern, index = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = 'REGEXP_EXTRACT_ALL({}, {})[SAFE_OFFSET({})]'.format(\n translator.translate(arg), regex, translator.translate(index)\n )\n return result\n\n\ndef _regex_replace(translator, expr):\n arg, pattern, replacement = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = 'REGEXP_REPLACE({}, {}, {})'.format(\n translator.translate(arg), regex, translator.translate(replacement)\n )\n return result\n\n\ndef _string_concat(translator, expr):\n return 'CONCAT({})'.format(\n ', '.join(map(translator.translate, expr.op().arg))\n )\n\n\ndef _string_join(translator, expr):\n sep, args = expr.op().args\n return 'ARRAY_TO_STRING([{}], {})'.format(\n ', '.join(map(translator.translate, args)), translator.translate(sep)\n )\n\n\ndef _string_ascii(translator, expr):\n (arg,) = expr.op().args\n return 'TO_CODE_POINTS({})[SAFE_OFFSET(0)]'.format(\n translator.translate(arg)\n )\n\n\ndef _string_right(translator, expr):\n arg, nchars = map(translator.translate, expr.op().args)\n return 'SUBSTR({arg}, -LEAST(LENGTH({arg}), {nchars}))'.format(\n arg=arg, nchars=nchars\n )\n\n\ndef _array_literal_format(expr):\n return str(list(expr.op().value))\n\n\ndef _log(translator, expr):\n op = expr.op()\n arg, base = op.args\n arg_formatted = translator.translate(arg)\n\n if base is None:\n return 'ln({})'.format(arg_formatted)\n\n base_formatted = translator.translate(base)\n return 'log({}, {})'.format(arg_formatted, base_formatted)\n\n\ndef _literal(translator, expr):\n\n if isinstance(expr, ir.NumericValue):\n value = expr.op().value\n if not np.isfinite(value):\n return 'CAST({!r} AS FLOAT64)'.format(str(value))\n\n # special case literal timestamp, date, and time scalars\n if isinstance(expr.op(), ops.Literal):\n value = expr.op().value\n if isinstance(expr, ir.DateScalar):\n if isinstance(value, datetime.datetime):\n raw_value = value.date()\n else:\n raw_value = value\n return \"DATE '{}'\".format(raw_value)\n elif isinstance(expr, ir.TimestampScalar):\n return \"TIMESTAMP '{}'\".format(value)\n elif isinstance(expr, ir.TimeScalar):\n # TODO: define extractors on TimeValue expressions\n return \"TIME '{}'\".format(value)\n\n try:\n return impala_compiler._literal(translator, expr)\n except NotImplementedError:\n if isinstance(expr, ir.ArrayValue):\n return _array_literal_format(expr)\n raise NotImplementedError(type(expr).__name__)\n\n\ndef _arbitrary(translator, expr):\n arg, how, where = expr.op().args\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n if how not in (None, 'first'):\n raise com.UnsupportedOperationError(\n '{!r} value not supported for arbitrary in BigQuery'.format(how)\n )\n\n return 'ANY_VALUE({})'.format(translator.translate(arg))\n\n\n_date_units = {\n 'Y': 'YEAR',\n 'Q': 'QUARTER',\n 'W': 'WEEK',\n 'M': 'MONTH',\n 'D': 'DAY',\n}\n\n\n_timestamp_units = {\n 'us': 'MICROSECOND',\n 'ms': 'MILLISECOND',\n 's': 'SECOND',\n 'm': 'MINUTE',\n 'h': 'HOUR',\n}\n_time_units = _timestamp_units.copy()\n_timestamp_units.update(_date_units)\n\n\ndef _truncate(kind, units):\n def truncator(translator, expr):\n arg, unit = expr.op().args\n trans_arg = translator.translate(arg)\n valid_unit = units.get(unit)\n if valid_unit is None:\n raise com.UnsupportedOperationError(\n 'BigQuery does not support truncating {} values to unit '\n '{!r}'.format(arg.type(), unit)\n )\n return '{}_TRUNC({}, {})'.format(kind, trans_arg, valid_unit)\n\n return truncator\n\n\ndef _timestamp_op(func, units):\n def _formatter(translator, expr):\n op = expr.op()\n arg, offset = op.args\n\n unit = offset.type().unit\n if unit not in units:\n raise com.UnsupportedOperationError(\n 'BigQuery does not allow binary operation '\n '{} with INTERVAL offset {}'.format(func, unit)\n )\n formatted_arg = translator.translate(arg)\n formatted_offset = translator.translate(offset)\n result = '{}({}, {})'.format(func, formatted_arg, formatted_offset)\n return result\n\n return _formatter\n\n\nSTRFTIME_FORMAT_FUNCTIONS = {\n dt.Date: 'DATE',\n dt.Time: 'TIME',\n dt.Timestamp: 'TIMESTAMP',\n}\n\n\n_operation_registry = impala_compiler._operation_registry.copy()\n_operation_registry.update(\n {\n ops.ExtractYear: _extract_field('year'),\n ops.ExtractQuarter: _extract_field('quarter'),\n ops.ExtractMonth: _extract_field('month'),\n ops.ExtractDay: _extract_field('day'),\n ops.ExtractHour: _extract_field('hour'),\n ops.ExtractMinute: _extract_field('minute'),\n ops.ExtractSecond: _extract_field('second'),\n ops.ExtractMillisecond: _extract_field('millisecond'),\n ops.ExtractEpochSeconds: _extract_field('epochseconds'),\n ops.StringReplace: fixed_arity('REPLACE', 3),\n ops.StringSplit: fixed_arity('SPLIT', 2),\n ops.StringConcat: _string_concat,\n ops.StringJoin: _string_join,\n ops.StringAscii: _string_ascii,\n ops.StringFind: _string_find,\n ops.StrRight: _string_right,\n ops.Repeat: fixed_arity('REPEAT', 2),\n ops.RegexSearch: _regex_search,\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: _regex_replace,\n ops.GroupConcat: _reduction('STRING_AGG'),\n ops.IfNull: fixed_arity('IFNULL', 2),\n ops.Cast: _cast,\n ops.StructField: _struct_field,\n ops.ArrayCollect: unary('ARRAY_AGG'),\n ops.ArrayConcat: _array_concat,\n ops.ArrayIndex: _array_index,\n ops.ArrayLength: unary('ARRAY_LENGTH'),\n ops.HLLCardinality: _reduction('APPROX_COUNT_DISTINCT'),\n ops.Log: _log,\n ops.Sign: unary('SIGN'),\n ops.Modulus: fixed_arity('MOD', 2),\n ops.Date: unary('DATE'),\n # BigQuery doesn't have these operations built in.\n # ops.ArrayRepeat: _array_repeat,\n # ops.ArraySlice: _array_slice,\n ops.Literal: _literal,\n ops.Arbitrary: _arbitrary,\n ops.TimestampTruncate: _truncate('TIMESTAMP', _timestamp_units),\n ops.DateTruncate: _truncate('DATE', _date_units),\n ops.TimeTruncate: _truncate('TIME', _timestamp_units),\n ops.Time: unary('TIME'),\n ops.TimestampAdd: _timestamp_op(\n 'TIMESTAMP_ADD', {'h', 'm', 's', 'ms', 'us'}\n ),\n ops.TimestampSub: _timestamp_op(\n 'TIMESTAMP_DIFF', {'h', 'm', 's', 'ms', 'us'}\n ),\n ops.DateAdd: _timestamp_op('DATE_ADD', {'D', 'W', 'M', 'Q', 'Y'}),\n ops.DateSub: _timestamp_op('DATE_SUB', {'D', 'W', 'M', 'Q', 'Y'}),\n ops.TimestampNow: fixed_arity('CURRENT_TIMESTAMP', 0),\n }\n)\n\n_invalid_operations = {\n ops.Translate,\n ops.FindInSet,\n ops.Capitalize,\n ops.DateDiff,\n ops.TimestampDiff,\n}\n\n_operation_registry = {\n k: v\n for k, v in _operation_registry.items()\n if k not in _invalid_operations\n}\n\n\nclass BigQueryExprTranslator(impala_compiler.ImpalaExprTranslator):\n _registry = _operation_registry\n _rewrites = impala_compiler.ImpalaExprTranslator._rewrites.copy()\n\n context_class = BigQueryContext\n\n def _trans_param(self, expr):\n op = expr.op()\n if op not in self.context.params:\n raise KeyError(op)\n return '@{}'.format(expr.get_name())\n\n\ncompiles = BigQueryExprTranslator.compiles\nrewrites = BigQueryExprTranslator.rewrites\n\n\n@compiles(ops.DayOfWeekIndex)\ndef bigquery_day_of_week_index(t, e):\n arg = e.op().args[0]\n arg_formatted = t.translate(arg)\n return 'MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7)'.format(arg_formatted)\n\n\n@rewrites(ops.DayOfWeekName)\ndef bigquery_day_of_week_name(e):\n arg = e.op().args[0]\n return arg.strftime('%A')\n\n\n@compiles(ops.Divide)\ndef bigquery_compiles_divide(t, e):\n return 'IEEE_DIVIDE({}, {})'.format(*map(t.translate, e.op().args))\n\n\n@compiles(ops.Strftime)\ndef compiles_strftime(translator, expr):\n arg, format_string = expr.op().args\n arg_type = arg.type()\n strftime_format_func_name = STRFTIME_FORMAT_FUNCTIONS[type(arg_type)]\n fmt_string = translator.translate(format_string)\n arg_formatted = translator.translate(arg)\n if isinstance(arg_type, dt.Timestamp):\n return 'FORMAT_{}({}, {}, {!r})'.format(\n strftime_format_func_name,\n fmt_string,\n arg_formatted,\n arg_type.timezone if arg_type.timezone is not None else 'UTC',\n )\n return 'FORMAT_{}({}, {})'.format(\n strftime_format_func_name, fmt_string, arg_formatted\n )\n\n\n@compiles(ops.StringToTimestamp)\ndef compiles_string_to_timestamp(translator, expr):\n arg, format_string, timezone_arg = expr.op().args\n fmt_string = translator.translate(format_string)\n arg_formatted = translator.translate(arg)\n if timezone_arg is not None:\n timezone_str = translator.translate(timezone_arg)\n return 'PARSE_TIMESTAMP({}, {}, {})'.format(\n fmt_string, arg_formatted, timezone_str\n )\n return 'PARSE_TIMESTAMP({}, {})'.format(fmt_string, arg_formatted)\n\n\nclass BigQueryTableSetFormatter(ImpalaTableSetFormatter):\n def _quote_identifier(self, name):\n if re.match(r'^[A-Za-z][A-Za-z_0-9]*$', name):\n return name\n return '`{}`'.format(name)\n\n\nclass BigQuerySelect(ImpalaSelect):\n\n translator = BigQueryExprTranslator\n\n @property\n def table_set_formatter(self):\n return BigQueryTableSetFormatter\n\n\n@rewrites(ops.IdenticalTo)\ndef identical_to(expr):\n left, right = expr.op().args\n return (left.isnull() & right.isnull()) | (left == right)\n\n\n@rewrites(ops.Log2)\ndef log2(expr):\n (arg,) = expr.op().args\n return arg.log(2)\n\n\n@rewrites(ops.Sum)\ndef bq_sum(expr):\n arg = expr.op().args[0]\n where = expr.op().args[1]\n if isinstance(arg, ir.BooleanColumn):\n return arg.cast('int64').sum(where=where)\n else:\n return expr\n\n\n@rewrites(ops.Mean)\ndef bq_mean(expr):\n arg = expr.op().args[0]\n where = expr.op().args[1]\n if isinstance(arg, ir.BooleanColumn):\n return arg.cast('int64').mean(where=where)\n else:\n return expr\n\n\nUNIT_FUNCS = {'s': 'SECONDS', 'ms': 'MILLIS', 'us': 'MICROS'}\n\n\n@compiles(ops.TimestampFromUNIX)\ndef compiles_timestamp_from_unix(t, e):\n value, unit = e.op().args\n return 'TIMESTAMP_{}({})'.format(UNIT_FUNCS[unit], t.translate(value))\n\n\n@compiles(ops.Floor)\ndef compiles_floor(t, e):\n bigquery_type = ibis_type_to_bigquery_type(e.type())\n arg = e.op().arg\n return 'CAST(FLOOR({}) AS {})'.format(t.translate(arg), bigquery_type)\n\n\n@compiles(ops.CMSMedian)\ndef compiles_approx(translator, expr):\n expr = expr.op()\n arg = expr.arg\n where = expr.where\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return 'APPROX_QUANTILES({}, 2)[OFFSET(1)]'.format(\n translator.translate(arg)\n )\n\n\n@compiles(ops.Covariance)\ndef compiles_covar(translator, expr):\n expr = expr.op()\n left = expr.left\n right = expr.right\n where = expr.where\n\n if expr.how == 'sample':\n how = 'SAMP'\n elif expr.how == 'pop':\n how = 'POP'\n else:\n raise ValueError(\n \"Covariance with how={!r} is not supported.\".format(how)\n )\n\n if where is not None:\n left = where.ifelse(left, ibis.NA)\n right = where.ifelse(right, ibis.NA)\n\n return \"COVAR_{}({}, {})\".format(\n how, translator.translate(left), translator.translate(right)\n )\n\n\n@rewrites(ops.Any)\n@rewrites(ops.All)\n@rewrites(ops.NotAny)\n@rewrites(ops.NotAll)\ndef bigquery_any_all_no_op(expr):\n return expr\n\n\n@compiles(ops.Any)\ndef bigquery_compile_any(translator, expr):\n return \"LOGICAL_OR({})\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.NotAny)\ndef bigquery_compile_notany(translator, expr):\n return \"LOGICAL_AND(NOT ({}))\".format(\n *map(translator.translate, expr.op().args)\n )\n\n\n@compiles(ops.All)\ndef bigquery_compile_all(translator, expr):\n return \"LOGICAL_AND({})\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.NotAll)\ndef bigquery_compile_notall(translator, expr):\n return \"LOGICAL_OR(NOT ({}))\".format(\n *map(translator.translate, expr.op().args)\n )\n\n\nclass BigQueryDialect(impala_compiler.ImpalaDialect):\n translator = BigQueryExprTranslator\n\n\ndialect = BigQueryDialect\n", "path": "ibis/bigquery/compiler.py" } ]
diff --git a/docs/source/release/index.rst b/docs/source/release/index.rst index 568b72d63d2d..55683ed0d581 100644 --- a/docs/source/release/index.rst +++ b/docs/source/release/index.rst @@ -12,6 +12,7 @@ Release Notes These release notes are for versions of ibis **1.0 and later**. Release notes for pre-1.0 versions of ibis can be found at :doc:`release-pre-1.0` +* :bug:`2367` Fix the covariance operator in the BigQuery backend. * :feature:`2366` Add PySpark support for ReductionVectorizedUDF * :feature:`2306` Add time context in `scope` in execution for pandas backend * :support:`2351` Simplifying tests directories structure diff --git a/ibis/bigquery/compiler.py b/ibis/bigquery/compiler.py index 55ee2cce540b..6c46148290b9 100644 --- a/ibis/bigquery/compiler.py +++ b/ibis/bigquery/compiler.py @@ -573,7 +573,9 @@ def compiles_covar(translator, expr): left = where.ifelse(left, ibis.NA) right = where.ifelse(right, ibis.NA) - return "COVAR_{}({}, {})".format(how, left, right) + return "COVAR_{}({}, {})".format( + how, translator.translate(left), translator.translate(right) + ) @rewrites(ops.Any) diff --git a/ibis/bigquery/tests/test_compiler.py b/ibis/bigquery/tests/test_compiler.py index 814ebde9e3d3..1d7d4558fb48 100644 --- a/ibis/bigquery/tests/test_compiler.py +++ b/ibis/bigquery/tests/test_compiler.py @@ -457,9 +457,8 @@ def test_cov(alltypes, project_id): FROM `{project_id}.testing.functional_alltypes`""" assert result == expected - expr = d.cov(d, how='error') with pytest.raises(ValueError): - expr.compile() + d.cov(d, how='error') @pytest.mark.parametrize(
ibis-project__ibis-7364
bug: Command drop view IF EXISTS does not exist in Oracle ### What happened? Oracle queries fail while dropping the view. IF EXISTS is not supported in Oracle: https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/DROP-VIEW.html ### What version of ibis are you using? 7.0.0 ### What backend(s) are you using, if any? Oracle ### Relevant log output ```sh sqlalchemy.exc.DatabaseError: (oracledb.exceptions.DatabaseError) ORA-00933: SQL command not properly ended [SQL: DROP VIEW IF EXISTS "_ibis_oracle_metadata_d4gbmh4h2fa2jnq5qo3o3rg6sa"] ``` ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "\"\"\"The Oracle backend.\"\"\"\n\nfrom __future__ import annotations\n\nimport atexit\nimport contextlib\nimport sys\nimport warnings\nfrom typing import TYPE_CHECKING, Any\n\nimport oracledb\n\nfrom ibis import util\n\n# Wow, this is truly horrible\n# Get out your clippers, it's time to shave a yak.\n#\n# 1. snowflake-sqlalchemy doesn't support sqlalchemy 2.0\n# 2. oracledb is only supported in sqlalchemy 2.0\n# 3. Ergo, module hacking is required to avoid doing a silly amount of work\n# to create multiple lockfiles or port snowflake away from sqlalchemy\n# 4. Also the version needs to be spoofed to be >= 7 or else the cx_Oracle\n# dialect barfs\noracledb.__version__ = oracledb.version = \"7\"\n\nsys.modules[\"cx_Oracle\"] = oracledb\n\nimport sqlalchemy as sa # noqa: E402\n\nimport ibis.common.exceptions as exc # noqa: E402\nimport ibis.expr.datatypes as dt # noqa: E402\nimport ibis.expr.operations as ops # noqa: E402\nimport ibis.expr.schema as sch # noqa: E402\nfrom ibis.backends.base.sql.alchemy import ( # noqa: E402\n AlchemyCompiler,\n AlchemyExprTranslator,\n BaseAlchemyBackend,\n)\nfrom ibis.backends.oracle.datatypes import OracleType # noqa: E402\nfrom ibis.backends.oracle.registry import operation_registry # noqa: E402\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n\nclass OracleExprTranslator(AlchemyExprTranslator):\n _registry = operation_registry.copy()\n _rewrites = AlchemyExprTranslator._rewrites.copy()\n _dialect_name = \"oracle\"\n _has_reduction_filter_syntax = False\n _require_order_by = (\n *AlchemyExprTranslator._require_order_by,\n ops.Reduction,\n ops.Lag,\n ops.Lead,\n )\n\n _forbids_frame_clause = (\n *AlchemyExprTranslator._forbids_frame_clause,\n ops.Lag,\n ops.Lead,\n )\n\n _quote_column_names = True\n _quote_table_names = True\n\n type_mapper = OracleType\n\n\nclass OracleCompiler(AlchemyCompiler):\n translator_class = OracleExprTranslator\n support_values_syntax_in_select = False\n supports_indexed_grouping_keys = False\n null_limit = None\n\n\nclass Backend(BaseAlchemyBackend):\n name = \"oracle\"\n compiler = OracleCompiler\n supports_create_or_replace = False\n supports_temporary_tables = True\n _temporary_prefix = \"GLOBAL TEMPORARY\"\n\n def do_connect(\n self,\n *,\n user: str,\n password: str,\n host: str = \"localhost\",\n port: int = 1521,\n database: str | None = None,\n sid: str | None = None,\n service_name: str | None = None,\n dsn: str | None = None,\n **_: Any,\n ) -> None:\n \"\"\"Create an Ibis client using the passed connection parameters.\n\n Parameters\n ----------\n user\n Username\n password\n Password\n host\n Hostname\n port\n Port\n database\n Used as an Oracle service name if provided.\n sid\n Unique name of an Oracle Instance, used to construct a DSN if\n provided.\n service_name\n Oracle service name, used to construct a DSN if provided. Only one\n of database and service_name should be provided.\n dsn\n An Oracle Data Source Name. If provided, overrides all other\n connection arguments except username and password.\n \"\"\"\n # SID: unique name of an INSTANCE running an oracle process (a single, identifiable machine)\n # service name: an ALIAS to one (or many) individual instances that can\n # be hotswapped without the client knowing / caring\n if dsn is not None and (\n database is not None or sid is not None or service_name is not None\n ):\n warnings.warn(\n \"Oracle DSN provided, overriding additional provided connection arguments\"\n )\n\n if service_name is not None and database is not None:\n raise exc.IbisInputError(\n \"Values provided for both service_name and database. \"\n \"Both of these values map to an Oracle service_name, \"\n \"please provide only one of them.\"\n )\n\n if service_name is None and database is not None:\n service_name = database\n\n if dsn is None:\n dsn = oracledb.makedsn(host, port, service_name=service_name, sid=sid)\n url = sa.engine.url.make_url(f\"oracle://{user}:{password}@{dsn}\")\n\n engine = sa.create_engine(\n url,\n poolclass=sa.pool.StaticPool,\n # We set the statement cache size to 0 because Oracle will otherwise\n # attempt to reuse prepared statements even if the type of the bound variable\n # has changed.\n # This is apparently accepted behavior.\n # https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_b.html#statement-caching-in-thin-and-thick-modes\n connect_args={\"stmtcachesize\": 0},\n )\n\n super().do_connect(engine)\n\n def normalize_name(name):\n if name is None:\n return None\n elif not name:\n return \"\"\n elif name.lower() == name:\n return sa.sql.quoted_name(name, quote=True)\n else:\n return name\n\n self.con.dialect.normalize_name = normalize_name\n\n def _from_url(self, url: str, **kwargs):\n return self.do_connect(user=url.username, password=url.password, dsn=url.host)\n\n @property\n def current_database(self) -> str:\n return self._scalar_query(\"SELECT * FROM global_name\")\n\n def _metadata(self, query: str) -> Iterable[tuple[str, dt.DataType]]:\n from sqlalchemy_views import CreateView, DropView\n\n name = util.gen_name(\"oracle_metadata\")\n\n view = sa.table(name)\n create_view = CreateView(view, sa.text(query))\n drop_view = DropView(view, if_exists=True)\n\n t = sa.table(\n \"all_tab_columns\",\n sa.column(\"table_name\"),\n sa.column(\"column_name\"),\n sa.column(\"data_type\"),\n sa.column(\"data_precision\"),\n sa.column(\"data_scale\"),\n sa.column(\"nullable\"),\n )\n metadata_query = sa.select(\n t.c.column_name,\n t.c.data_type,\n t.c.data_precision,\n t.c.data_scale,\n (t.c.nullable == \"Y\").label(\"nullable\"),\n ).where(t.c.table_name == name)\n\n with self.begin() as con:\n con.execute(create_view)\n try:\n results = con.execute(metadata_query).fetchall()\n finally:\n # drop the view no matter what\n con.execute(drop_view)\n\n for name, type_string, precision, scale, nullable in results:\n if precision is not None and scale is not None and precision != 0:\n typ = dt.Decimal(precision=precision, scale=scale, nullable=nullable)\n elif precision == 0:\n # TODO: how to disambiguate between int and float here without inspecting the value?\n typ = dt.float\n else:\n typ = OracleType.from_string(type_string, nullable=nullable)\n yield name, typ\n\n def _table_from_schema(\n self,\n name: str,\n schema: sch.Schema,\n temp: bool = False,\n database: str | None = None,\n **kwargs: Any,\n ) -> sa.Table:\n if temp:\n kwargs[\"oracle_on_commit\"] = \"PRESERVE ROWS\"\n t = super()._table_from_schema(name, schema, temp, database, **kwargs)\n if temp:\n atexit.register(self._clean_up_tmp_table, t)\n return t\n\n def _clean_up_tmp_table(self, name: str) -> None:\n tmptable = self._get_sqla_table(name, autoload=False)\n with self.begin() as bind:\n # global temporary tables cannot be dropped without first truncating them\n #\n # https://stackoverflow.com/questions/32423397/force-oracle-drop-global-temp-table\n #\n # ignore DatabaseError exceptions because the table may not exist\n # because it's already been deleted\n with contextlib.suppress(sa.exc.DatabaseError):\n bind.exec_driver_sql(f'TRUNCATE TABLE \"{tmptable.name}\"')\n with contextlib.suppress(sa.exc.DatabaseError):\n tmptable.drop(bind=bind)\n\n def _clean_up_cached_table(self, op):\n self._clean_up_tmp_table(op.name)\n", "path": "ibis/backends/oracle/__init__.py" } ]
[ { "content": "\"\"\"The Oracle backend.\"\"\"\n\nfrom __future__ import annotations\n\nimport atexit\nimport contextlib\nimport sys\nimport warnings\nfrom typing import TYPE_CHECKING, Any\n\nimport oracledb\n\nfrom ibis import util\n\n# Wow, this is truly horrible\n# Get out your clippers, it's time to shave a yak.\n#\n# 1. snowflake-sqlalchemy doesn't support sqlalchemy 2.0\n# 2. oracledb is only supported in sqlalchemy 2.0\n# 3. Ergo, module hacking is required to avoid doing a silly amount of work\n# to create multiple lockfiles or port snowflake away from sqlalchemy\n# 4. Also the version needs to be spoofed to be >= 7 or else the cx_Oracle\n# dialect barfs\noracledb.__version__ = oracledb.version = \"7\"\n\nsys.modules[\"cx_Oracle\"] = oracledb\n\nimport sqlalchemy as sa # noqa: E402\n\nimport ibis.common.exceptions as exc # noqa: E402\nimport ibis.expr.datatypes as dt # noqa: E402\nimport ibis.expr.operations as ops # noqa: E402\nimport ibis.expr.schema as sch # noqa: E402\nfrom ibis.backends.base.sql.alchemy import ( # noqa: E402\n AlchemyCompiler,\n AlchemyExprTranslator,\n BaseAlchemyBackend,\n)\nfrom ibis.backends.oracle.datatypes import OracleType # noqa: E402\nfrom ibis.backends.oracle.registry import operation_registry # noqa: E402\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n\nclass OracleExprTranslator(AlchemyExprTranslator):\n _registry = operation_registry.copy()\n _rewrites = AlchemyExprTranslator._rewrites.copy()\n _dialect_name = \"oracle\"\n _has_reduction_filter_syntax = False\n _require_order_by = (\n *AlchemyExprTranslator._require_order_by,\n ops.Reduction,\n ops.Lag,\n ops.Lead,\n )\n\n _forbids_frame_clause = (\n *AlchemyExprTranslator._forbids_frame_clause,\n ops.Lag,\n ops.Lead,\n )\n\n _quote_column_names = True\n _quote_table_names = True\n\n type_mapper = OracleType\n\n\nclass OracleCompiler(AlchemyCompiler):\n translator_class = OracleExprTranslator\n support_values_syntax_in_select = False\n supports_indexed_grouping_keys = False\n null_limit = None\n\n\nclass Backend(BaseAlchemyBackend):\n name = \"oracle\"\n compiler = OracleCompiler\n supports_create_or_replace = False\n supports_temporary_tables = True\n _temporary_prefix = \"GLOBAL TEMPORARY\"\n\n def do_connect(\n self,\n *,\n user: str,\n password: str,\n host: str = \"localhost\",\n port: int = 1521,\n database: str | None = None,\n sid: str | None = None,\n service_name: str | None = None,\n dsn: str | None = None,\n **_: Any,\n ) -> None:\n \"\"\"Create an Ibis client using the passed connection parameters.\n\n Parameters\n ----------\n user\n Username\n password\n Password\n host\n Hostname\n port\n Port\n database\n Used as an Oracle service name if provided.\n sid\n Unique name of an Oracle Instance, used to construct a DSN if\n provided.\n service_name\n Oracle service name, used to construct a DSN if provided. Only one\n of database and service_name should be provided.\n dsn\n An Oracle Data Source Name. If provided, overrides all other\n connection arguments except username and password.\n \"\"\"\n # SID: unique name of an INSTANCE running an oracle process (a single, identifiable machine)\n # service name: an ALIAS to one (or many) individual instances that can\n # be hotswapped without the client knowing / caring\n if dsn is not None and (\n database is not None or sid is not None or service_name is not None\n ):\n warnings.warn(\n \"Oracle DSN provided, overriding additional provided connection arguments\"\n )\n\n if service_name is not None and database is not None:\n raise exc.IbisInputError(\n \"Values provided for both service_name and database. \"\n \"Both of these values map to an Oracle service_name, \"\n \"please provide only one of them.\"\n )\n\n if service_name is None and database is not None:\n service_name = database\n\n if dsn is None:\n dsn = oracledb.makedsn(host, port, service_name=service_name, sid=sid)\n url = sa.engine.url.make_url(f\"oracle://{user}:{password}@{dsn}\")\n\n engine = sa.create_engine(\n url,\n poolclass=sa.pool.StaticPool,\n # We set the statement cache size to 0 because Oracle will otherwise\n # attempt to reuse prepared statements even if the type of the bound variable\n # has changed.\n # This is apparently accepted behavior.\n # https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_b.html#statement-caching-in-thin-and-thick-modes\n connect_args={\"stmtcachesize\": 0},\n )\n\n super().do_connect(engine)\n\n def normalize_name(name):\n if name is None:\n return None\n elif not name:\n return \"\"\n elif name.lower() == name:\n return sa.sql.quoted_name(name, quote=True)\n else:\n return name\n\n self.con.dialect.normalize_name = normalize_name\n\n def _from_url(self, url: str, **kwargs):\n return self.do_connect(user=url.username, password=url.password, dsn=url.host)\n\n @property\n def current_database(self) -> str:\n return self._scalar_query(\"SELECT * FROM global_name\")\n\n def _metadata(self, query: str) -> Iterable[tuple[str, dt.DataType]]:\n from sqlalchemy_views import CreateView, DropView\n\n name = util.gen_name(\"oracle_metadata\")\n\n view = sa.table(name)\n create_view = CreateView(view, sa.text(query))\n drop_view = DropView(view, if_exists=False)\n\n t = sa.table(\n \"all_tab_columns\",\n sa.column(\"table_name\"),\n sa.column(\"column_name\"),\n sa.column(\"data_type\"),\n sa.column(\"data_precision\"),\n sa.column(\"data_scale\"),\n sa.column(\"nullable\"),\n )\n metadata_query = sa.select(\n t.c.column_name,\n t.c.data_type,\n t.c.data_precision,\n t.c.data_scale,\n (t.c.nullable == \"Y\").label(\"nullable\"),\n ).where(t.c.table_name == name)\n\n with self.begin() as con:\n con.execute(create_view)\n try:\n results = con.execute(metadata_query).fetchall()\n finally:\n # drop the view no matter what\n con.execute(drop_view)\n\n for name, type_string, precision, scale, nullable in results:\n if precision is not None and scale is not None and precision != 0:\n typ = dt.Decimal(precision=precision, scale=scale, nullable=nullable)\n elif precision == 0:\n # TODO: how to disambiguate between int and float here without inspecting the value?\n typ = dt.float\n else:\n typ = OracleType.from_string(type_string, nullable=nullable)\n yield name, typ\n\n def _table_from_schema(\n self,\n name: str,\n schema: sch.Schema,\n temp: bool = False,\n database: str | None = None,\n **kwargs: Any,\n ) -> sa.Table:\n if temp:\n kwargs[\"oracle_on_commit\"] = \"PRESERVE ROWS\"\n t = super()._table_from_schema(name, schema, temp, database, **kwargs)\n if temp:\n atexit.register(self._clean_up_tmp_table, t)\n return t\n\n def _clean_up_tmp_table(self, name: str) -> None:\n tmptable = self._get_sqla_table(name, autoload=False)\n with self.begin() as bind:\n # global temporary tables cannot be dropped without first truncating them\n #\n # https://stackoverflow.com/questions/32423397/force-oracle-drop-global-temp-table\n #\n # ignore DatabaseError exceptions because the table may not exist\n # because it's already been deleted\n with contextlib.suppress(sa.exc.DatabaseError):\n bind.exec_driver_sql(f'TRUNCATE TABLE \"{tmptable.name}\"')\n with contextlib.suppress(sa.exc.DatabaseError):\n tmptable.drop(bind=bind)\n\n def _clean_up_cached_table(self, op):\n self._clean_up_tmp_table(op.name)\n", "path": "ibis/backends/oracle/__init__.py" } ]
diff --git a/ibis/backends/oracle/__init__.py b/ibis/backends/oracle/__init__.py index 38617b7959f1..cd236c97f9c9 100644 --- a/ibis/backends/oracle/__init__.py +++ b/ibis/backends/oracle/__init__.py @@ -181,7 +181,7 @@ def _metadata(self, query: str) -> Iterable[tuple[str, dt.DataType]]: view = sa.table(name) create_view = CreateView(view, sa.text(query)) - drop_view = DropView(view, if_exists=True) + drop_view = DropView(view, if_exists=False) t = sa.table( "all_tab_columns",
fidals__shopelectro-491
order.es6:234: Test order redirect to ya.kassa The puzzle `473-f28eab07` from #473 has to be resolved: https://github.com/fidals/shopelectro/blob/f0e50b7c3b66e1d18f3f8356c245e16167c51fc3/front/js/components/order.es6#L234-L234 The puzzle was created by duker33 on 06-Aug-18. Estimate: 30 minutes, If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'refarm_redirects',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'refarm_redirects.middleware.RedirectAllMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front_build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDATABASE_URL = os.environ[\"POSTGRES_URL\"]\n\n# to activate django connections pool for persistent connections.\n# https://docs.djangoproject.com/en/1.11/ref/databases/#persistent-connections\nCONN_MAX_AGE = None\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n 'HOST': os.environ['POSTGRES_URL'],\n 'PORT': '5432',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENTS = os.environ.get('EMAIL_RECIPIENTS', '[email protected]').split(',')\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n\ndef get_robots_content():\n with open(os.path.join(TEMPLATE_DIR, 'robots.txt')) as robots_file:\n return robots_file.read()\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n },\n 'robots': {\n 'slug': 'robots.txt',\n 'content': get_robots_content(),\n },\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n", "path": "shopelectro/settings/base.py" } ]
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'refarm_redirects',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'refarm_redirects.middleware.RedirectAllMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front_build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDATABASE_URL = os.environ[\"POSTGRES_URL\"]\n\n# to activate django connections pool for persistent connections.\n# https://docs.djangoproject.com/en/1.11/ref/databases/#persistent-connections\nCONN_MAX_AGE = None\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n 'HOST': os.environ['POSTGRES_URL'],\n 'PORT': '5432',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENTS = os.environ.get('EMAIL_RECIPIENTS', '[email protected]').split(',')\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n\ndef get_robots_content():\n with open(os.path.join(TEMPLATE_DIR, 'robots.txt')) as robots_file:\n return robots_file.read()\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n },\n 'robots': {\n 'slug': 'robots.txt',\n 'content': get_robots_content(),\n },\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n\n# default for local tests. Prod's one may differ\nYANDEX_KASSA_LINK = 'https://money.yandex.ru/eshop.xml'\n", "path": "shopelectro/settings/base.py" } ]
diff --git a/docker/env_files/paths.dist b/docker/env_files/paths.dist index 11975f16..32e14972 100644 --- a/docker/env_files/paths.dist +++ b/docker/env_files/paths.dist @@ -1,5 +1,5 @@ # Identify the dependencies folder -DEPS_DIR=/usr/local/lib/python3.6/site-packages +DEPS_DIR=/usr/app/deps # Directory, where you cloned `refarm-site` repository REFARM_DIR=/path/to_my/refarm_site # Identify the source folder diff --git a/front/js/components/order.es6 b/front/js/components/order.es6 index 0d13d9d6..c57a4ff4 100644 --- a/front/js/components/order.es6 +++ b/front/js/components/order.es6 @@ -204,11 +204,10 @@ <input type="text" name="customerNumber" value="${formData.customerNumber}"> <input type="text" name="orderNumber" value="${formData.orderNumber}"> <input type="text" name="paymentType" value="${formData.paymentType}"> - <input type="submit"> </form> `; - DOM.$yandexFormWrapper.html(formHtml); + DOM.$yandexFormWrapper.append($(formHtml)); } /** @@ -235,9 +234,9 @@ server.sendYandexOrder(orderInfo) .then((formData) => renderYandexForm(formData)) // setTimeout to wait "onOrderSend" handling - .then(() => setTimeout(() => $(DOM.yandexForm).submit(), 100)); + .then(setTimeout(() => $(DOM.yandexForm).submit(), 100)); } else { - $(DOM.fullForm).submit(); + setTimeout(() => $(DOM.fullForm).submit(), 100) } } diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py index 7450d2f6..24d92dca 100644 --- a/shopelectro/settings/base.py +++ b/shopelectro/settings/base.py @@ -371,3 +371,6 @@ def get_robots_content(): # Multipliers are related to prices in this order: # big/medium/small/retail. First three are wholesale prices. PRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0 + +# default for local tests. Prod's one may differ +YANDEX_KASSA_LINK = 'https://money.yandex.ru/eshop.xml' diff --git a/shopelectro/tests/tests_selenium.py b/shopelectro/tests/tests_selenium.py index ce1447f6..bebf5a2c 100644 --- a/shopelectro/tests/tests_selenium.py +++ b/shopelectro/tests/tests_selenium.py @@ -604,6 +604,9 @@ def test_feedback_filter(self): @helpers.disable_celery class OrderPage(helpers.SeleniumTestCase): + # Ya.Kassa's domain with card processing UI + YA_KASSA_INNER_DOMAIN = 'money.yandex.ru' + @staticmethod def get_cell(pos, col): # table columns mapping: http://prntscr.com/bsv5hp # Ignore InvalidLinkBear @@ -647,12 +650,19 @@ def buy_products(self): .format(i) ).click() - def perform_operations_on_cart(self): - self.click((By.ID, 'id_payment_type_0')) + def select_payment_type(self, payment_type='cash'): + # @todo #473:15m Move `payment_type` to dict or dataclass + input_item = self.browser.find_element_by_css_selector( + f'input[name="payment_type"][value="{payment_type}"]' + ) + input_item.click() + + def append_products_to_cart(self): + self.select_payment_type('cash') add_one_more = self.click((By.XPATH, self.add_product)) self.wait.until(EC.staleness_of(add_one_more)) - def fill_and_submit_form(self): + def fill_contacts_data(self): @helpers.try_again_on_stale_element(3) def insert_value(id, keys, expected_keys=''): def expected_conditions(browser): @@ -664,8 +674,10 @@ def expected_conditions(browser): insert_value('id_city', 'Санкт-Петербург') insert_value('id_phone', '2222222222', expected_keys='+7 (222) 222 22 22') insert_value('id_email', '[email protected]') + + def submit_form(self): + # @todo #473:30m Hide all form processing methods to a separated class. self.click((By.ID, 'submit-order')) - self.wait.until(EC.url_to_be(self.success_order_url)) def test_table_is_presented_if_there_is_some_products(self): """If there are some products in cart, we should see them in table on OrderPage.""" @@ -736,8 +748,10 @@ def assert_count(count): def test_confirm_order(self): """After filling the form we should be able to confirm an Order.""" - self.perform_operations_on_cart() - self.fill_and_submit_form() + self.append_products_to_cart() + self.fill_contacts_data() + self.submit_form() + self.wait.until(EC.url_to_be(self.success_order_url)) self.assertEqual( self.browser.current_url, self.live_server_url + reverse(Page.CUSTOM_PAGES_URL_NAME, args=('order-success', )) @@ -749,10 +763,12 @@ def test_order_email(self): 'order-table-product-id') clean_codes = [code.text for code in codes] - self.perform_operations_on_cart() + self.append_products_to_cart() final_price = self.browser.find_element_by_id('cart-page-sum').text[:-5] - self.fill_and_submit_form() + self.fill_contacts_data() + self.submit_form() + self.wait.until(EC.url_to_be(self.success_order_url)) self.assertEqual(len(mail.outbox), 1) sent_mail_body = mail.outbox[0].body @@ -778,6 +794,32 @@ def test_order_email(self): sent_mail_body ) + def test_pay_with_yandex_kassa(self): + success_page_domain = self.YA_KASSA_INNER_DOMAIN + self.fill_contacts_data() + self.select_payment_type('AC') + self.submit_form() + self.wait.until(EC.url_contains(success_page_domain)) + self.assertIn(success_page_domain, self.browser.current_url) + + # @todo #489:60m Fix yandex.kassa payment type bug. + # See details in the test case below. + @unittest.expectedFailure + def test_change_cart_and_pay_with_yandex_kassa(self): + """ + The same as `test_pay_with_yandex_kassa`, but with the detail. + + Appending products to cart on order page + suddenly breaks yandex kassa payment type. + """ + success_page_domain = self.YA_KASSA_INNER_DOMAIN + self.append_products_to_cart() + self.fill_contacts_data() # Ignore CPDBear + self.select_payment_type('AC') + self.submit_form() + self.wait.until(EC.url_contains(success_page_domain)) + self.assertIn(success_page_domain, self.browser.current_url) + class SitePage(helpers.SeleniumTestCase):
kivy__kivy-4598
ToggleButton can get released with allow_no_selection=False Ohai buddiez, I hope you're all doing goodie ^__^ I found a new bug probably due to "always_release" on ButtonBehavior having been changed recently: `Changed in version 1.9.2: The default value is now False.` Take the following example: https://gist.github.com/42e02d13c31a6504b57d5cd3ac23a460 If you try to press a button, then release outside of it, its state will be "normal", even though it should remain "down". I have made a small change to ButtonBehavior which adds an event "on_release_outside" and makes it set the state to normal by default, and overrode it in ToggleButtonBehavior to do nothing instead. Incoming PR, so please give feedback! (EDIT: See #4594 ) Thanks for reading, see you around buddiez bliblibli ^__^
[ { "content": "'''\nButton Behavior\n===============\n\nThe :class:`~kivy.uix.behaviors.button.ButtonBehavior`\n`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides\n:class:`~kivy.uix.button.Button` behavior. You can combine this class with\nother widgets, such as an :class:`~kivy.uix.image.Image`, to provide\nalternative buttons that preserve Kivy button behavior.\n\nFor an overview of behaviors, please refer to the :mod:`~kivy.uix.behaviors`\ndocumentation.\n\nExample\n-------\n\nThe following example adds button behavior to an image to make a checkbox that\nbehaves like a button::\n\n from kivy.app import App\n from kivy.uix.image import Image\n from kivy.uix.behaviors import ButtonBehavior\n\n\n class MyButton(ButtonBehavior, Image):\n def __init__(self, **kwargs):\n super(MyButton, self).__init__(**kwargs)\n self.source = 'atlas://data/images/defaulttheme/checkbox_off'\n\n def on_press(self):\n self.source = 'atlas://data/images/defaulttheme/checkbox_on'\n\n def on_release(self):\n self.source = 'atlas://data/images/defaulttheme/checkbox_off'\n\n\n class SampleApp(App):\n def build(self):\n return MyButton()\n\n\n SampleApp().run()\n\nSee :class:`~kivy.uix.behaviors.ButtonBehavior` for details.\n'''\n\n__all__ = ('ButtonBehavior', )\n\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.properties import OptionProperty, ObjectProperty, \\\n BooleanProperty, NumericProperty\nfrom time import time\n\n\nclass ButtonBehavior(object):\n '''\n This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides\n :class:`~kivy.uix.button.Button` behavior. Please see the\n :mod:`button behaviors module <kivy.uix.behaviors.button>` documentation\n for more information.\n\n :Events:\n `on_press`\n Fired when the button is pressed.\n `on_release`\n Fired when the button is released (i.e. the touch/click that\n pressed the button goes away).\n\n '''\n\n state = OptionProperty('normal', options=('normal', 'down'))\n '''The state of the button, must be one of 'normal' or 'down'.\n The state is 'down' only when the button is currently touched/clicked,\n otherwise its 'normal'.\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'normal'.\n '''\n\n last_touch = ObjectProperty(None)\n '''Contains the last relevant touch received by the Button. This can\n be used in `on_press` or `on_release` in order to know which touch\n dispatched the event.\n\n .. versionadded:: 1.8.0\n\n :attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and\n defaults to `None`.\n '''\n\n \n min_state_time = NumericProperty(0)\n '''The minimum period of time which the widget must remain in the\n `'down'` state.\n\n .. versionadded:: 1.9.1\n\n :attr:`min_state_time` is a float and defaults to 0.035. This value is\n taken from :class:`~kivy.config.Config`.\n '''\n\n always_release = BooleanProperty(False)\n '''This determines whether or not the widget fires an `on_release` event if\n the touch_up is outside the widget.\n\n .. versionadded:: 1.9.0\n\n .. versionchanged:: 1.9.2\n The default value is now False.\n\n :attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to `False`.\n '''\n\n def __init__(self, **kwargs):\n self.register_event_type('on_press')\n self.register_event_type('on_release')\n if 'min_state_time' not in kwargs:\n self.min_state_time = float(Config.get('graphics', 'min_state_time'))\n super(ButtonBehavior, self).__init__(**kwargs)\n self.__state_event = None\n self.__touch_time = None\n self.fbind('state', self.cancel_event)\n\n def _do_press(self):\n self.state = 'down'\n\n def _do_release(self, *args):\n self.state = 'normal'\n\n def cancel_event(self, *args):\n if self.__state_event:\n self.__state_event.cancel()\n self.__state_event = None\n\n def on_touch_down(self, touch):\n if super(ButtonBehavior, self).on_touch_down(touch):\n return True\n if touch.is_mouse_scrolling:\n return False\n if not self.collide_point(touch.x, touch.y):\n return False\n if self in touch.ud:\n return False\n touch.grab(self)\n touch.ud[self] = True\n self.last_touch = touch\n self.__touch_time = time()\n self._do_press()\n self.dispatch('on_press')\n return True\n\n def on_touch_move(self, touch):\n if touch.grab_current is self:\n return True\n if super(ButtonBehavior, self).on_touch_move(touch):\n return True\n return self in touch.ud\n\n def on_touch_up(self, touch):\n if touch.grab_current is not self:\n return super(ButtonBehavior, self).on_touch_up(touch)\n assert(self in touch.ud)\n touch.ungrab(self)\n self.last_touch = touch\n\n if (not self.always_release\n and not self.collide_point(*touch.pos)):\n self.state = 'normal'\n return\n\n touchtime = time() - self.__touch_time\n if touchtime < self.min_state_time:\n self.__state_event = Clock.schedule_once(\n self._do_release, self.min_state_time - touchtime)\n else:\n self._do_release()\n self.dispatch('on_release')\n return True\n\n def on_press(self):\n pass\n\n def on_release(self):\n pass\n\n def trigger_action(self, duration=0.1):\n '''Trigger whatever action(s) have been bound to the button by calling\n both the on_press and on_release callbacks.\n\n This simulates a quick button press without using any touch events.\n\n Duration is the length of the press in seconds. Pass 0 if you want\n the action to happen instantly.\n\n .. versionadded:: 1.8.0\n '''\n self._do_press()\n self.dispatch('on_press')\n\n def trigger_release(dt):\n self._do_release()\n self.dispatch('on_release')\n if not duration:\n trigger_release(0)\n else:\n Clock.schedule_once(trigger_release, duration)\n", "path": "kivy/uix/behaviors/button.py" } ]
[ { "content": "'''\nButton Behavior\n===============\n\nThe :class:`~kivy.uix.behaviors.button.ButtonBehavior`\n`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides\n:class:`~kivy.uix.button.Button` behavior. You can combine this class with\nother widgets, such as an :class:`~kivy.uix.image.Image`, to provide\nalternative buttons that preserve Kivy button behavior.\n\nFor an overview of behaviors, please refer to the :mod:`~kivy.uix.behaviors`\ndocumentation.\n\nExample\n-------\n\nThe following example adds button behavior to an image to make a checkbox that\nbehaves like a button::\n\n from kivy.app import App\n from kivy.uix.image import Image\n from kivy.uix.behaviors import ButtonBehavior\n\n\n class MyButton(ButtonBehavior, Image):\n def __init__(self, **kwargs):\n super(MyButton, self).__init__(**kwargs)\n self.source = 'atlas://data/images/defaulttheme/checkbox_off'\n\n def on_press(self):\n self.source = 'atlas://data/images/defaulttheme/checkbox_on'\n\n def on_release(self):\n self.source = 'atlas://data/images/defaulttheme/checkbox_off'\n\n\n class SampleApp(App):\n def build(self):\n return MyButton()\n\n\n SampleApp().run()\n\nSee :class:`~kivy.uix.behaviors.ButtonBehavior` for details.\n'''\n\n__all__ = ('ButtonBehavior', )\n\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.properties import OptionProperty, ObjectProperty, \\\n BooleanProperty, NumericProperty\nfrom time import time\n\n\nclass ButtonBehavior(object):\n '''\n This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides\n :class:`~kivy.uix.button.Button` behavior. Please see the\n :mod:`button behaviors module <kivy.uix.behaviors.button>` documentation\n for more information.\n\n :Events:\n `on_press`\n Fired when the button is pressed.\n `on_release`\n Fired when the button is released (i.e. the touch/click that\n pressed the button goes away).\n\n '''\n\n state = OptionProperty('normal', options=('normal', 'down'))\n '''The state of the button, must be one of 'normal' or 'down'.\n The state is 'down' only when the button is currently touched/clicked,\n otherwise its 'normal'.\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'normal'.\n '''\n\n last_touch = ObjectProperty(None)\n '''Contains the last relevant touch received by the Button. This can\n be used in `on_press` or `on_release` in order to know which touch\n dispatched the event.\n\n .. versionadded:: 1.8.0\n\n :attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and\n defaults to `None`.\n '''\n\n \n min_state_time = NumericProperty(0)\n '''The minimum period of time which the widget must remain in the\n `'down'` state.\n\n .. versionadded:: 1.9.1\n\n :attr:`min_state_time` is a float and defaults to 0.035. This value is\n taken from :class:`~kivy.config.Config`.\n '''\n\n always_release = BooleanProperty(False)\n '''This determines whether or not the widget fires an `on_release` event if\n the touch_up is outside the widget.\n\n .. versionadded:: 1.9.0\n\n .. versionchanged:: 1.9.2\n The default value is now False.\n\n :attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to `False`.\n '''\n\n def __init__(self, **kwargs):\n self.register_event_type('on_press')\n self.register_event_type('on_release')\n if 'min_state_time' not in kwargs:\n self.min_state_time = float(Config.get('graphics', 'min_state_time'))\n super(ButtonBehavior, self).__init__(**kwargs)\n self.__state_event = None\n self.__touch_time = None\n self.fbind('state', self.cancel_event)\n\n def _do_press(self):\n self.state = 'down'\n\n def _do_release(self, *args):\n self.state = 'normal'\n\n def cancel_event(self, *args):\n if self.__state_event:\n self.__state_event.cancel()\n self.__state_event = None\n\n def on_touch_down(self, touch):\n if super(ButtonBehavior, self).on_touch_down(touch):\n return True\n if touch.is_mouse_scrolling:\n return False\n if not self.collide_point(touch.x, touch.y):\n return False\n if self in touch.ud:\n return False\n touch.grab(self)\n touch.ud[self] = True\n self.last_touch = touch\n self.__touch_time = time()\n self._do_press()\n self.dispatch('on_press')\n return True\n\n def on_touch_move(self, touch):\n if touch.grab_current is self:\n return True\n if super(ButtonBehavior, self).on_touch_move(touch):\n return True\n return self in touch.ud\n\n def on_touch_up(self, touch):\n if touch.grab_current is not self:\n return super(ButtonBehavior, self).on_touch_up(touch)\n assert(self in touch.ud)\n touch.ungrab(self)\n self.last_touch = touch\n\n if (not self.always_release\n and not self.collide_point(*touch.pos)):\n self._do_release()\n return\n\n touchtime = time() - self.__touch_time\n if touchtime < self.min_state_time:\n self.__state_event = Clock.schedule_once(\n self._do_release, self.min_state_time - touchtime)\n else:\n self._do_release()\n self.dispatch('on_release')\n return True\n\n def on_press(self):\n pass\n\n def on_release(self):\n pass\n\n def trigger_action(self, duration=0.1):\n '''Trigger whatever action(s) have been bound to the button by calling\n both the on_press and on_release callbacks.\n\n This simulates a quick button press without using any touch events.\n\n Duration is the length of the press in seconds. Pass 0 if you want\n the action to happen instantly.\n\n .. versionadded:: 1.8.0\n '''\n self._do_press()\n self.dispatch('on_press')\n\n def trigger_release(dt):\n self._do_release()\n self.dispatch('on_release')\n if not duration:\n trigger_release(0)\n else:\n Clock.schedule_once(trigger_release, duration)\n", "path": "kivy/uix/behaviors/button.py" } ]
diff --git a/kivy/uix/behaviors/button.py b/kivy/uix/behaviors/button.py index d4f81e2f23..f2ef3a55b1 100644 --- a/kivy/uix/behaviors/button.py +++ b/kivy/uix/behaviors/button.py @@ -167,7 +167,7 @@ def on_touch_up(self, touch): if (not self.always_release and not self.collide_point(*touch.pos)): - self.state = 'normal' + self._do_release() return touchtime = time() - self.__touch_time
django-oscar__django-oscar-3324
product-lookup not working ### Issue Summary Can't select related products ### Steps to Reproduce 1. run sandbox 2. in Dashboard->products select any product eg. The shellcoder's handbook 3. in product's upselling click text field `Recommended product:` 4. dropdown contains `The results could not be loaded.` and console throws ` Internal Server Error: /en-gb/dashboard/catalogue/product-lookup/` with traceback ending ``` django-oscar/src/oscar/apps/dashboard/catalogue/views.py", line 649, in get_queryset return self.model.browsable.all() AttributeError: type object 'Product' has no attribute 'browsable' ``` ### Technical details * Python version: 3.7.5 * Django version: 2.2.11 * Oscar version: commit 5de733e
[ { "content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import generic\nfrom django_tables2 import SingleTableMixin, SingleTableView\n\nfrom oscar.core.loading import get_classes, get_model\nfrom oscar.views.generic import ObjectLookupView\n\n(ProductForm,\n ProductClassSelectForm,\n ProductSearchForm,\n ProductClassForm,\n CategoryForm,\n StockAlertSearchForm,\n AttributeOptionGroupForm,\n OptionForm) \\\n = get_classes('dashboard.catalogue.forms',\n ('ProductForm',\n 'ProductClassSelectForm',\n 'ProductSearchForm',\n 'ProductClassForm',\n 'CategoryForm',\n 'StockAlertSearchForm',\n 'AttributeOptionGroupForm',\n 'OptionForm'))\n(StockRecordFormSet,\n ProductCategoryFormSet,\n ProductImageFormSet,\n ProductRecommendationFormSet,\n ProductAttributesFormSet,\n AttributeOptionFormSet) \\\n = get_classes('dashboard.catalogue.formsets',\n ('StockRecordFormSet',\n 'ProductCategoryFormSet',\n 'ProductImageFormSet',\n 'ProductRecommendationFormSet',\n 'ProductAttributesFormSet',\n 'AttributeOptionFormSet'))\nProductTable, CategoryTable, AttributeOptionGroupTable, OptionTable \\\n = get_classes('dashboard.catalogue.tables',\n ('ProductTable', 'CategoryTable',\n 'AttributeOptionGroupTable', 'OptionTable'))\n(PopUpWindowCreateMixin,\n PopUpWindowUpdateMixin,\n PopUpWindowDeleteMixin) \\\n = get_classes('dashboard.views',\n ('PopUpWindowCreateMixin',\n 'PopUpWindowUpdateMixin',\n 'PopUpWindowDeleteMixin'))\nProduct = get_model('catalogue', 'Product')\nCategory = get_model('catalogue', 'Category')\nProductImage = get_model('catalogue', 'ProductImage')\nProductCategory = get_model('catalogue', 'ProductCategory')\nProductClass = get_model('catalogue', 'ProductClass')\nStockRecord = get_model('partner', 'StockRecord')\nStockAlert = get_model('partner', 'StockAlert')\nPartner = get_model('partner', 'Partner')\nAttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')\nOption = get_model('catalogue', 'Option')\n\n\ndef filter_products(queryset, user):\n \"\"\"\n Restrict the queryset to products the given user has access to.\n A staff user is allowed to access all Products.\n A non-staff user is only allowed access to a product if they are in at\n least one stock record's partner user list.\n \"\"\"\n if user.is_staff:\n return queryset\n\n return queryset.filter(stockrecords__partner__users__pk=user.pk).distinct()\n\n\nclass ProductListView(SingleTableView):\n\n \"\"\"\n Dashboard view of the product list.\n Supports the permission-based dashboard.\n \"\"\"\n\n template_name = 'oscar/dashboard/catalogue/product_list.html'\n form_class = ProductSearchForm\n productclass_form_class = ProductClassSelectForm\n table_class = ProductTable\n context_table_name = 'products'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['form'] = self.form\n ctx['productclass_form'] = self.productclass_form_class()\n return ctx\n\n def get_description(self, form):\n if form.is_valid() and any(form.cleaned_data.values()):\n return _('Product search results')\n return _('Products')\n\n def get_table(self, **kwargs):\n if 'recently_edited' in self.request.GET:\n kwargs.update(dict(orderable=False))\n\n table = super().get_table(**kwargs)\n table.caption = self.get_description(self.form)\n return table\n\n def get_table_pagination(self, table):\n return dict(per_page=settings.OSCAR_DASHBOARD_ITEMS_PER_PAGE)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Apply any filters to restrict the products that appear on the list\n \"\"\"\n return filter_products(queryset, self.request.user)\n\n def get_queryset(self):\n \"\"\"\n Build the queryset for this list\n \"\"\"\n queryset = Product.objects.browsable_dashboard().base_queryset()\n queryset = self.filter_queryset(queryset)\n queryset = self.apply_search(queryset)\n return queryset\n\n def apply_search(self, queryset):\n \"\"\"\n Search through the filtered queryset.\n\n We must make sure that we don't return search results that the user is not allowed\n to see (see filter_queryset).\n \"\"\"\n self.form = self.form_class(self.request.GET)\n\n if not self.form.is_valid():\n return queryset\n\n data = self.form.cleaned_data\n\n if data.get('upc'):\n # Filter the queryset by upc\n # For usability reasons, we first look at exact matches and only return\n # them if there are any. Otherwise we return all results\n # that contain the UPC.\n\n # Look up all matches (child products, products not allowed to access) ...\n matches_upc = Product.objects.filter(upc__iexact=data['upc'])\n\n # ... and use that to pick all standalone or parent products that the user is\n # allowed to access.\n qs_match = queryset.filter(\n Q(id__in=matches_upc.values('id')) | Q(id__in=matches_upc.values('parent_id')))\n\n if qs_match.exists():\n # If there's a direct UPC match, return just that.\n queryset = qs_match\n else:\n # No direct UPC match. Let's try the same with an icontains search.\n matches_upc = Product.objects.filter(upc__icontains=data['upc'])\n queryset = queryset.filter(\n Q(id__in=matches_upc.values('id')) | Q(id__in=matches_upc.values('parent_id')))\n\n if data.get('title'):\n queryset = queryset.filter(title__icontains=data['title'])\n\n return queryset\n\n\nclass ProductCreateRedirectView(generic.RedirectView):\n permanent = False\n productclass_form_class = ProductClassSelectForm\n\n def get_product_create_url(self, product_class):\n \"\"\" Allow site to provide custom URL \"\"\"\n return reverse('dashboard:catalogue-product-create',\n kwargs={'product_class_slug': product_class.slug})\n\n def get_invalid_product_class_url(self):\n messages.error(self.request, _(\"Please choose a product type\"))\n return reverse('dashboard:catalogue-product-list')\n\n def get_redirect_url(self, **kwargs):\n form = self.productclass_form_class(self.request.GET)\n if form.is_valid():\n product_class = form.cleaned_data['product_class']\n return self.get_product_create_url(product_class)\n\n else:\n return self.get_invalid_product_class_url()\n\n\nclass ProductCreateUpdateView(generic.UpdateView):\n \"\"\"\n Dashboard view that is can both create and update products of all kinds.\n It can be used in three different ways, each of them with a unique URL\n pattern:\n - When creating a new standalone product, this view is called with the\n desired product class\n - When editing an existing product, this view is called with the product's\n primary key. If the product is a child product, the template considerably\n reduces the available form fields.\n - When creating a new child product, this view is called with the parent's\n primary key.\n\n Supports the permission-based dashboard.\n \"\"\"\n\n template_name = 'oscar/dashboard/catalogue/product_update.html'\n model = Product\n context_object_name = 'product'\n\n form_class = ProductForm\n category_formset = ProductCategoryFormSet\n image_formset = ProductImageFormSet\n recommendations_formset = ProductRecommendationFormSet\n stockrecord_formset = StockRecordFormSet\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.formsets = {'category_formset': self.category_formset,\n 'image_formset': self.image_formset,\n 'recommended_formset': self.recommendations_formset,\n 'stockrecord_formset': self.stockrecord_formset}\n\n def dispatch(self, request, *args, **kwargs):\n resp = super().dispatch(\n request, *args, **kwargs)\n return self.check_objects_or_redirect() or resp\n\n def check_objects_or_redirect(self):\n \"\"\"\n Allows checking the objects fetched by get_object and redirect\n if they don't satisfy our needs.\n Is used to redirect when create a new variant and the specified\n parent product can't actually be turned into a parent product.\n \"\"\"\n if self.creating and self.parent is not None:\n is_valid, reason = self.parent.can_be_parent(give_reason=True)\n if not is_valid:\n messages.error(self.request, reason)\n return redirect('dashboard:catalogue-product-list')\n\n def get_queryset(self):\n \"\"\"\n Filter products that the user doesn't have permission to update\n \"\"\"\n return filter_products(Product.objects.all(), self.request.user)\n\n def get_object(self, queryset=None):\n \"\"\"\n This parts allows generic.UpdateView to handle creating products as\n well. The only distinction between an UpdateView and a CreateView\n is that self.object is None. We emulate this behavior.\n\n This method is also responsible for setting self.product_class and\n self.parent.\n \"\"\"\n self.creating = 'pk' not in self.kwargs\n if self.creating:\n # Specifying a parent product is only done when creating a child\n # product.\n parent_pk = self.kwargs.get('parent_pk')\n if parent_pk is None:\n self.parent = None\n # A product class needs to be specified when creating a\n # standalone product.\n product_class_slug = self.kwargs.get('product_class_slug')\n self.product_class = get_object_or_404(\n ProductClass, slug=product_class_slug)\n else:\n self.parent = get_object_or_404(Product, pk=parent_pk)\n self.product_class = self.parent.product_class\n\n return None # success\n else:\n product = super().get_object(queryset)\n self.product_class = product.get_product_class()\n self.parent = product.parent\n return product\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['product_class'] = self.product_class\n ctx['parent'] = self.parent\n ctx['title'] = self.get_page_title()\n\n for ctx_name, formset_class in self.formsets.items():\n if ctx_name not in ctx:\n ctx[ctx_name] = formset_class(self.product_class,\n self.request.user,\n instance=self.object)\n return ctx\n\n def get_page_title(self):\n if self.creating:\n if self.parent is None:\n return _('Create new %(product_class)s product') % {\n 'product_class': self.product_class.name}\n else:\n return _('Create new variant of %(parent_product)s') % {\n 'parent_product': self.parent.title}\n else:\n if self.object.title or not self.parent:\n return self.object.title\n else:\n return _('Editing variant of %(parent_product)s') % {\n 'parent_product': self.parent.title}\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['product_class'] = self.product_class\n kwargs['parent'] = self.parent\n return kwargs\n\n def process_all_forms(self, form):\n \"\"\"\n Short-circuits the regular logic to have one place to have our\n logic to check all forms\n \"\"\"\n # Need to create the product here because the inline forms need it\n # can't use commit=False because ProductForm does not support it\n if self.creating and form.is_valid():\n self.object = form.save()\n\n formsets = {}\n for ctx_name, formset_class in self.formsets.items():\n formsets[ctx_name] = formset_class(self.product_class,\n self.request.user,\n self.request.POST,\n self.request.FILES,\n instance=self.object)\n\n is_valid = form.is_valid() and all([formset.is_valid()\n for formset in formsets.values()])\n\n cross_form_validation_result = self.clean(form, formsets)\n if is_valid and cross_form_validation_result:\n return self.forms_valid(form, formsets)\n else:\n return self.forms_invalid(form, formsets)\n\n # form_valid and form_invalid are called depending on the validation result\n # of just the product form and redisplay the form respectively return a\n # redirect to the success URL. In both cases we need to check our formsets\n # as well, so both methods do the same. process_all_forms then calls\n # forms_valid or forms_invalid respectively, which do the redisplay or\n # redirect.\n form_valid = form_invalid = process_all_forms\n\n def clean(self, form, formsets):\n \"\"\"\n Perform any cross-form/formset validation. If there are errors, attach\n errors to a form or a form field so that they are displayed to the user\n and return False. If everything is valid, return True. This method will\n be called regardless of whether the individual forms are valid.\n \"\"\"\n return True\n\n def forms_valid(self, form, formsets):\n \"\"\"\n Save all changes and display a success url.\n When creating the first child product, this method also sets the new\n parent's structure accordingly.\n \"\"\"\n if self.creating:\n self.handle_adding_child(self.parent)\n else:\n # a just created product was already saved in process_all_forms()\n self.object = form.save()\n\n # Save formsets\n for formset in formsets.values():\n formset.save()\n\n for idx, image in enumerate(self.object.images.all()):\n image.display_order = idx\n image.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def handle_adding_child(self, parent):\n \"\"\"\n When creating the first child product, the parent product needs\n to be implicitly converted from a standalone product to a\n parent product.\n \"\"\"\n # ProductForm eagerly sets the future parent's structure to PARENT to\n # pass validation, but it's not persisted in the database. We ensure\n # it's persisted by calling save()\n if parent is not None:\n parent.structure = Product.PARENT\n parent.save()\n\n def forms_invalid(self, form, formsets):\n # delete the temporary product again\n if self.creating and self.object and self.object.pk is not None:\n self.object.delete()\n self.object = None\n\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the errors below\"))\n ctx = self.get_context_data(form=form, **formsets)\n return self.render_to_response(ctx)\n\n def get_url_with_querystring(self, url):\n url_parts = [url]\n if self.request.GET.urlencode():\n url_parts += [self.request.GET.urlencode()]\n return \"?\".join(url_parts)\n\n def get_success_url(self):\n \"\"\"\n Renders a success message and redirects depending on the button:\n - Standard case is pressing \"Save\"; redirects to the product list\n - When \"Save and continue\" is pressed, we stay on the same page\n - When \"Create (another) child product\" is pressed, it redirects\n to a new product creation page\n \"\"\"\n msg = render_to_string(\n 'oscar/dashboard/catalogue/messages/product_saved.html',\n {\n 'product': self.object,\n 'creating': self.creating,\n 'request': self.request\n })\n messages.success(self.request, msg, extra_tags=\"safe noicon\")\n\n action = self.request.POST.get('action')\n if action == 'continue':\n url = reverse(\n 'dashboard:catalogue-product', kwargs={\"pk\": self.object.id})\n elif action == 'create-another-child' and self.parent:\n url = reverse(\n 'dashboard:catalogue-product-create-child',\n kwargs={'parent_pk': self.parent.pk})\n elif action == 'create-child':\n url = reverse(\n 'dashboard:catalogue-product-create-child',\n kwargs={'parent_pk': self.object.pk})\n else:\n url = reverse('dashboard:catalogue-product-list')\n return self.get_url_with_querystring(url)\n\n\nclass ProductDeleteView(generic.DeleteView):\n \"\"\"\n Dashboard view to delete a product. Has special logic for deleting the\n last child product.\n Supports the permission-based dashboard.\n \"\"\"\n template_name = 'oscar/dashboard/catalogue/product_delete.html'\n model = Product\n context_object_name = 'product'\n\n def get_queryset(self):\n \"\"\"\n Filter products that the user doesn't have permission to update\n \"\"\"\n return filter_products(Product.objects.all(), self.request.user)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n if self.object.is_child:\n ctx['title'] = _(\"Delete product variant?\")\n else:\n ctx['title'] = _(\"Delete product?\")\n return ctx\n\n def delete(self, request, *args, **kwargs):\n # We override the core delete method and don't call super in order to\n # apply more sophisticated logic around handling child products.\n # Calling super makes it difficult to test if the product being deleted\n # is the last child.\n\n self.object = self.get_object()\n\n # Before performing the delete, record whether this product is the last\n # child.\n is_last_child = False\n if self.object.is_child:\n parent = self.object.parent\n is_last_child = parent.children.count() == 1\n\n # This also deletes any child products.\n self.object.delete()\n\n # If the product being deleted is the last child, then pass control\n # to a method than can adjust the parent itself.\n if is_last_child:\n self.handle_deleting_last_child(parent)\n\n return HttpResponseRedirect(self.get_success_url())\n\n def handle_deleting_last_child(self, parent):\n # If the last child product is deleted, this view defaults to turning\n # the parent product into a standalone product. While this is\n # appropriate for many scenarios, it is intentionally easily\n # overridable and not automatically done in e.g. a Product's delete()\n # method as it is more a UX helper than hard business logic.\n parent.structure = parent.STANDALONE\n parent.save()\n\n def get_success_url(self):\n \"\"\"\n When deleting child products, this view redirects to editing the\n parent product. When deleting any other product, it redirects to the\n product list view.\n \"\"\"\n if self.object.is_child:\n msg = _(\"Deleted product variant '%s'\") % self.object.get_title()\n messages.success(self.request, msg)\n return reverse(\n 'dashboard:catalogue-product',\n kwargs={'pk': self.object.parent_id})\n else:\n msg = _(\"Deleted product '%s'\") % self.object.title\n messages.success(self.request, msg)\n return reverse('dashboard:catalogue-product-list')\n\n\nclass StockAlertListView(generic.ListView):\n template_name = 'oscar/dashboard/catalogue/stockalert_list.html'\n model = StockAlert\n context_object_name = 'alerts'\n paginate_by = settings.OSCAR_STOCK_ALERTS_PER_PAGE\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['form'] = self.form\n ctx['description'] = self.description\n return ctx\n\n def get_queryset(self):\n if 'status' in self.request.GET:\n self.form = StockAlertSearchForm(self.request.GET)\n if self.form.is_valid():\n status = self.form.cleaned_data['status']\n self.description = _('Alerts with status \"%s\"') % status\n return self.model.objects.filter(status=status)\n else:\n self.description = _('All alerts')\n self.form = StockAlertSearchForm()\n return self.model.objects.all()\n\n\nclass CategoryListView(SingleTableView):\n template_name = 'oscar/dashboard/catalogue/category_list.html'\n table_class = CategoryTable\n context_table_name = 'categories'\n\n def get_queryset(self):\n return Category.get_root_nodes()\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['child_categories'] = Category.get_root_nodes()\n return ctx\n\n\nclass CategoryDetailListView(SingleTableMixin, generic.DetailView):\n template_name = 'oscar/dashboard/catalogue/category_list.html'\n model = Category\n context_object_name = 'category'\n table_class = CategoryTable\n context_table_name = 'categories'\n\n def get_table_data(self):\n return self.object.get_children()\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['child_categories'] = self.object.get_children()\n ctx['ancestors'] = self.object.get_ancestors_and_self()\n return ctx\n\n\nclass CategoryListMixin(object):\n\n def get_success_url(self):\n parent = self.object.get_parent()\n if parent is None:\n return reverse(\"dashboard:catalogue-category-list\")\n else:\n return reverse(\"dashboard:catalogue-category-detail-list\",\n args=(parent.pk,))\n\n\nclass CategoryCreateView(CategoryListMixin, generic.CreateView):\n template_name = 'oscar/dashboard/catalogue/category_form.html'\n model = Category\n form_class = CategoryForm\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = _(\"Add a new category\")\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Category created successfully\"))\n return super().get_success_url()\n\n def get_initial(self):\n # set child category if set in the URL kwargs\n initial = super().get_initial()\n if 'parent' in self.kwargs:\n initial['_ref_node_id'] = self.kwargs['parent']\n return initial\n\n\nclass CategoryUpdateView(CategoryListMixin, generic.UpdateView):\n template_name = 'oscar/dashboard/catalogue/category_form.html'\n model = Category\n form_class = CategoryForm\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = _(\"Update category '%s'\") % self.object.name\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Category updated successfully\"))\n return super().get_success_url()\n\n\nclass CategoryDeleteView(CategoryListMixin, generic.DeleteView):\n template_name = 'oscar/dashboard/catalogue/category_delete.html'\n model = Category\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['parent'] = self.object.get_parent()\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Category deleted successfully\"))\n return super().get_success_url()\n\n\nclass ProductLookupView(ObjectLookupView):\n model = Product\n\n def get_queryset(self):\n return self.model.browsable.all()\n\n def lookup_filter(self, qs, term):\n return qs.filter(Q(title__icontains=term)\n | Q(parent__title__icontains=term))\n\n\nclass ProductClassCreateUpdateView(generic.UpdateView):\n\n template_name = 'oscar/dashboard/catalogue/product_class_form.html'\n model = ProductClass\n form_class = ProductClassForm\n product_attributes_formset = ProductAttributesFormSet\n\n def process_all_forms(self, form):\n \"\"\"\n This validates both the ProductClass form and the\n ProductClassAttributes formset at once\n making it possible to display all their errors at once.\n \"\"\"\n if self.creating and form.is_valid():\n # the object will be needed by the product_attributes_formset\n self.object = form.save(commit=False)\n\n attributes_formset = self.product_attributes_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n\n is_valid = form.is_valid() and attributes_formset.is_valid()\n\n if is_valid:\n return self.forms_valid(form, attributes_formset)\n else:\n return self.forms_invalid(form, attributes_formset)\n\n def forms_valid(self, form, attributes_formset):\n form.save()\n attributes_formset.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def forms_invalid(self, form, attributes_formset):\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the errors below\"\n ))\n ctx = self.get_context_data(form=form,\n attributes_formset=attributes_formset)\n return self.render_to_response(ctx)\n\n # form_valid and form_invalid are called depending on the validation result\n # of just the product class form, and return a redirect to the success URL\n # or redisplay the form, respectively. In both cases we need to check our\n # formsets as well, so both methods do the same. process_all_forms then\n # calls forms_valid or forms_invalid respectively, which do the redisplay\n # or redirect.\n form_valid = form_invalid = process_all_forms\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(\n *args, **kwargs)\n\n if \"attributes_formset\" not in ctx:\n ctx[\"attributes_formset\"] = self.product_attributes_formset(\n instance=self.object)\n\n ctx[\"title\"] = self.get_title()\n\n return ctx\n\n\nclass ProductClassCreateView(ProductClassCreateUpdateView):\n\n creating = True\n\n def get_object(self):\n return None\n\n def get_title(self):\n return _(\"Add a new product type\")\n\n def get_success_url(self):\n messages.info(self.request, _(\"Product type created successfully\"))\n return reverse(\"dashboard:catalogue-class-list\")\n\n\nclass ProductClassUpdateView(ProductClassCreateUpdateView):\n\n creating = False\n\n def get_title(self):\n return _(\"Update product type '%s'\") % self.object.name\n\n def get_success_url(self):\n messages.info(self.request, _(\"Product type updated successfully\"))\n return reverse(\"dashboard:catalogue-class-list\")\n\n def get_object(self):\n product_class = get_object_or_404(ProductClass, pk=self.kwargs['pk'])\n return product_class\n\n\nclass ProductClassListView(generic.ListView):\n template_name = 'oscar/dashboard/catalogue/product_class_list.html'\n context_object_name = 'classes'\n model = ProductClass\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['title'] = _(\"Product Types\")\n return ctx\n\n\nclass ProductClassDeleteView(generic.DeleteView):\n template_name = 'oscar/dashboard/catalogue/product_class_delete.html'\n model = ProductClass\n form_class = ProductClassForm\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['title'] = _(\"Delete product type '%s'\") % self.object.name\n product_count = self.object.products.count()\n\n if product_count > 0:\n ctx['disallow'] = True\n ctx['title'] = _(\"Unable to delete '%s'\") % self.object.name\n messages.error(self.request,\n _(\"%i products are still assigned to this type\") %\n product_count)\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Product type deleted successfully\"))\n return reverse(\"dashboard:catalogue-class-list\")\n\n\nclass AttributeOptionGroupCreateUpdateView(generic.UpdateView):\n\n template_name = 'oscar/dashboard/catalogue/attribute_option_group_form.html'\n model = AttributeOptionGroup\n form_class = AttributeOptionGroupForm\n attribute_option_formset = AttributeOptionFormSet\n\n def process_all_forms(self, form):\n \"\"\"\n This validates both the AttributeOptionGroup form and the\n AttributeOptions formset at once making it possible to display all their\n errors at once.\n \"\"\"\n if self.creating and form.is_valid():\n # the object will be needed by the attribute_option_formset\n self.object = form.save(commit=False)\n\n attribute_option_formset = self.attribute_option_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n\n is_valid = form.is_valid() and attribute_option_formset.is_valid()\n\n if is_valid:\n return self.forms_valid(form, attribute_option_formset)\n else:\n return self.forms_invalid(form, attribute_option_formset)\n\n def forms_valid(self, form, attribute_option_formset):\n form.save()\n attribute_option_formset.save()\n if self.is_popup:\n return self.popup_response(form.instance)\n else:\n return HttpResponseRedirect(self.get_success_url())\n\n def forms_invalid(self, form, attribute_option_formset):\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the errors below\"\n ))\n ctx = self.get_context_data(form=form,\n attribute_option_formset=attribute_option_formset)\n return self.render_to_response(ctx)\n\n # form_valid and form_invalid are called depending on the validation result\n # of just the attribute option group form, and return a redirect to the\n # success URL or redisplay the form, respectively. In both cases we need to\n # check our formsets as well, so both methods do the same.\n # process_all_forms then calls forms_valid or forms_invalid respectively,\n # which do the redisplay or redirect.\n form_valid = form_invalid = process_all_forms\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx.setdefault(\"attribute_option_formset\", self.attribute_option_formset(instance=self.object))\n ctx[\"title\"] = self.get_title()\n return ctx\n\n def get_url_with_querystring(self, url):\n url_parts = [url]\n if self.request.GET.urlencode():\n url_parts += [self.request.GET.urlencode()]\n return \"?\".join(url_parts)\n\n\nclass AttributeOptionGroupCreateView(PopUpWindowCreateMixin, AttributeOptionGroupCreateUpdateView):\n\n creating = True\n\n def get_object(self):\n return None\n\n def get_title(self):\n return _(\"Add a new Attribute Option Group\")\n\n def get_success_url(self):\n self.add_success_message(_(\"Attribute Option Group created successfully\"))\n url = reverse(\"dashboard:catalogue-attribute-option-group-list\")\n return self.get_url_with_querystring(url)\n\n\nclass AttributeOptionGroupUpdateView(PopUpWindowUpdateMixin, AttributeOptionGroupCreateUpdateView):\n\n creating = False\n\n def get_object(self):\n attribute_option_group = get_object_or_404(AttributeOptionGroup, pk=self.kwargs['pk'])\n return attribute_option_group\n\n def get_title(self):\n return _(\"Update Attribute Option Group '%s'\") % self.object.name\n\n def get_success_url(self):\n self.add_success_message(_(\"Attribute Option Group updated successfully\"))\n url = reverse(\"dashboard:catalogue-attribute-option-group-list\")\n return self.get_url_with_querystring(url)\n\n\nclass AttributeOptionGroupListView(SingleTableView):\n\n template_name = 'oscar/dashboard/catalogue/attribute_option_group_list.html'\n model = AttributeOptionGroup\n table_class = AttributeOptionGroupTable\n context_table_name = 'attribute_option_groups'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['querystring'] = self.request.GET.urlencode()\n return ctx\n\n\nclass AttributeOptionGroupDeleteView(PopUpWindowDeleteMixin, generic.DeleteView):\n\n template_name = 'oscar/dashboard/catalogue/attribute_option_group_delete.html'\n model = AttributeOptionGroup\n form_class = AttributeOptionGroupForm\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n ctx['title'] = _(\"Delete Attribute Option Group '%s'\") % self.object.name\n\n product_attribute_count = self.object.product_attributes.count()\n if product_attribute_count > 0:\n ctx['disallow'] = True\n ctx['title'] = _(\"Unable to delete '%s'\") % self.object.name\n messages.error(self.request,\n _(\"%i product attributes are still assigned to this attribute option group\") %\n product_attribute_count)\n\n ctx['http_get_params'] = self.request.GET\n\n return ctx\n\n def get_url_with_querystring(self, url):\n url_parts = [url]\n http_post_params = self.request.POST.copy()\n try:\n del http_post_params['csrfmiddlewaretoken']\n except KeyError:\n pass\n if http_post_params.urlencode():\n url_parts += [http_post_params.urlencode()]\n return \"?\".join(url_parts)\n\n def get_success_url(self):\n self.add_success_message(_(\"Attribute Option Group deleted successfully\"))\n url = reverse(\"dashboard:catalogue-attribute-option-group-list\")\n return self.get_url_with_querystring(url)\n\n\nclass OptionListView(SingleTableView):\n\n template_name = 'oscar/dashboard/catalogue/option_list.html'\n model = Option\n table_class = OptionTable\n context_table_name = 'options'\n\n\nclass OptionCreateUpdateView(generic.UpdateView):\n\n template_name = 'oscar/dashboard/catalogue/option_form.html'\n model = Option\n form_class = OptionForm\n\n def form_valid(self, form):\n self.object = form.save()\n if self.is_popup:\n return self.popup_response(form.instance)\n else:\n return HttpResponseRedirect(self.get_success_url())\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = self.get_title()\n return ctx\n\n def form_invalid(self, form):\n messages.error(\n self.request,\n _(\"Your submitted data was not valid - please correct the errors below\")\n )\n return super().form_invalid(form)\n\n\nclass OptionCreateView(PopUpWindowCreateMixin, OptionCreateUpdateView):\n\n creating = True\n\n def get_object(self):\n return None\n\n def get_title(self):\n return _(\"Add a new Option\")\n\n def get_success_url(self):\n self.add_success_message(_(\"Option created successfully\"))\n return reverse(\"dashboard:catalogue-option-list\")\n\n\nclass OptionUpdateView(PopUpWindowUpdateMixin, OptionCreateUpdateView):\n\n creating = False\n\n def get_object(self):\n attribute_option_group = get_object_or_404(Option, pk=self.kwargs['pk'])\n return attribute_option_group\n\n def get_title(self):\n return _(\"Update Option '%s'\") % self.object.name\n\n def get_success_url(self):\n self.add_success_message(_(\"Option updated successfully\"))\n return reverse(\"dashboard:catalogue-option-list\")\n\n\nclass OptionDeleteView(PopUpWindowDeleteMixin, generic.DeleteView):\n\n template_name = 'oscar/dashboard/catalogue/option_delete.html'\n model = Option\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n ctx['title'] = _(\"Delete Option '%s'\") % self.object.name\n\n products = self.object.product_set.count()\n product_classes = self.object.productclass_set.count()\n if any([products, product_classes]):\n ctx['disallow'] = True\n ctx['title'] = _(\"Unable to delete '%s'\") % self.object.name\n if products:\n messages.error(\n self.request,\n _(\"%i products are still assigned to this option\") % products\n )\n if product_classes:\n messages.error(\n self.request,\n _(\"%i product classes are still assigned to this option\") % product_classes\n )\n\n return ctx\n\n def get_success_url(self):\n self.add_success_message(_(\"Option deleted successfully\"))\n return reverse(\"dashboard:catalogue-option-list\")\n", "path": "src/oscar/apps/dashboard/catalogue/views.py" } ]
[ { "content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import generic\nfrom django_tables2 import SingleTableMixin, SingleTableView\n\nfrom oscar.core.loading import get_classes, get_model\nfrom oscar.views.generic import ObjectLookupView\n\n(ProductForm,\n ProductClassSelectForm,\n ProductSearchForm,\n ProductClassForm,\n CategoryForm,\n StockAlertSearchForm,\n AttributeOptionGroupForm,\n OptionForm) \\\n = get_classes('dashboard.catalogue.forms',\n ('ProductForm',\n 'ProductClassSelectForm',\n 'ProductSearchForm',\n 'ProductClassForm',\n 'CategoryForm',\n 'StockAlertSearchForm',\n 'AttributeOptionGroupForm',\n 'OptionForm'))\n(StockRecordFormSet,\n ProductCategoryFormSet,\n ProductImageFormSet,\n ProductRecommendationFormSet,\n ProductAttributesFormSet,\n AttributeOptionFormSet) \\\n = get_classes('dashboard.catalogue.formsets',\n ('StockRecordFormSet',\n 'ProductCategoryFormSet',\n 'ProductImageFormSet',\n 'ProductRecommendationFormSet',\n 'ProductAttributesFormSet',\n 'AttributeOptionFormSet'))\nProductTable, CategoryTable, AttributeOptionGroupTable, OptionTable \\\n = get_classes('dashboard.catalogue.tables',\n ('ProductTable', 'CategoryTable',\n 'AttributeOptionGroupTable', 'OptionTable'))\n(PopUpWindowCreateMixin,\n PopUpWindowUpdateMixin,\n PopUpWindowDeleteMixin) \\\n = get_classes('dashboard.views',\n ('PopUpWindowCreateMixin',\n 'PopUpWindowUpdateMixin',\n 'PopUpWindowDeleteMixin'))\nProduct = get_model('catalogue', 'Product')\nCategory = get_model('catalogue', 'Category')\nProductImage = get_model('catalogue', 'ProductImage')\nProductCategory = get_model('catalogue', 'ProductCategory')\nProductClass = get_model('catalogue', 'ProductClass')\nStockRecord = get_model('partner', 'StockRecord')\nStockAlert = get_model('partner', 'StockAlert')\nPartner = get_model('partner', 'Partner')\nAttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')\nOption = get_model('catalogue', 'Option')\n\n\ndef filter_products(queryset, user):\n \"\"\"\n Restrict the queryset to products the given user has access to.\n A staff user is allowed to access all Products.\n A non-staff user is only allowed access to a product if they are in at\n least one stock record's partner user list.\n \"\"\"\n if user.is_staff:\n return queryset\n\n return queryset.filter(stockrecords__partner__users__pk=user.pk).distinct()\n\n\nclass ProductListView(SingleTableView):\n\n \"\"\"\n Dashboard view of the product list.\n Supports the permission-based dashboard.\n \"\"\"\n\n template_name = 'oscar/dashboard/catalogue/product_list.html'\n form_class = ProductSearchForm\n productclass_form_class = ProductClassSelectForm\n table_class = ProductTable\n context_table_name = 'products'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['form'] = self.form\n ctx['productclass_form'] = self.productclass_form_class()\n return ctx\n\n def get_description(self, form):\n if form.is_valid() and any(form.cleaned_data.values()):\n return _('Product search results')\n return _('Products')\n\n def get_table(self, **kwargs):\n if 'recently_edited' in self.request.GET:\n kwargs.update(dict(orderable=False))\n\n table = super().get_table(**kwargs)\n table.caption = self.get_description(self.form)\n return table\n\n def get_table_pagination(self, table):\n return dict(per_page=settings.OSCAR_DASHBOARD_ITEMS_PER_PAGE)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Apply any filters to restrict the products that appear on the list\n \"\"\"\n return filter_products(queryset, self.request.user)\n\n def get_queryset(self):\n \"\"\"\n Build the queryset for this list\n \"\"\"\n queryset = Product.objects.browsable_dashboard().base_queryset()\n queryset = self.filter_queryset(queryset)\n queryset = self.apply_search(queryset)\n return queryset\n\n def apply_search(self, queryset):\n \"\"\"\n Search through the filtered queryset.\n\n We must make sure that we don't return search results that the user is not allowed\n to see (see filter_queryset).\n \"\"\"\n self.form = self.form_class(self.request.GET)\n\n if not self.form.is_valid():\n return queryset\n\n data = self.form.cleaned_data\n\n if data.get('upc'):\n # Filter the queryset by upc\n # For usability reasons, we first look at exact matches and only return\n # them if there are any. Otherwise we return all results\n # that contain the UPC.\n\n # Look up all matches (child products, products not allowed to access) ...\n matches_upc = Product.objects.filter(upc__iexact=data['upc'])\n\n # ... and use that to pick all standalone or parent products that the user is\n # allowed to access.\n qs_match = queryset.filter(\n Q(id__in=matches_upc.values('id')) | Q(id__in=matches_upc.values('parent_id')))\n\n if qs_match.exists():\n # If there's a direct UPC match, return just that.\n queryset = qs_match\n else:\n # No direct UPC match. Let's try the same with an icontains search.\n matches_upc = Product.objects.filter(upc__icontains=data['upc'])\n queryset = queryset.filter(\n Q(id__in=matches_upc.values('id')) | Q(id__in=matches_upc.values('parent_id')))\n\n if data.get('title'):\n queryset = queryset.filter(title__icontains=data['title'])\n\n return queryset\n\n\nclass ProductCreateRedirectView(generic.RedirectView):\n permanent = False\n productclass_form_class = ProductClassSelectForm\n\n def get_product_create_url(self, product_class):\n \"\"\" Allow site to provide custom URL \"\"\"\n return reverse('dashboard:catalogue-product-create',\n kwargs={'product_class_slug': product_class.slug})\n\n def get_invalid_product_class_url(self):\n messages.error(self.request, _(\"Please choose a product type\"))\n return reverse('dashboard:catalogue-product-list')\n\n def get_redirect_url(self, **kwargs):\n form = self.productclass_form_class(self.request.GET)\n if form.is_valid():\n product_class = form.cleaned_data['product_class']\n return self.get_product_create_url(product_class)\n\n else:\n return self.get_invalid_product_class_url()\n\n\nclass ProductCreateUpdateView(generic.UpdateView):\n \"\"\"\n Dashboard view that is can both create and update products of all kinds.\n It can be used in three different ways, each of them with a unique URL\n pattern:\n - When creating a new standalone product, this view is called with the\n desired product class\n - When editing an existing product, this view is called with the product's\n primary key. If the product is a child product, the template considerably\n reduces the available form fields.\n - When creating a new child product, this view is called with the parent's\n primary key.\n\n Supports the permission-based dashboard.\n \"\"\"\n\n template_name = 'oscar/dashboard/catalogue/product_update.html'\n model = Product\n context_object_name = 'product'\n\n form_class = ProductForm\n category_formset = ProductCategoryFormSet\n image_formset = ProductImageFormSet\n recommendations_formset = ProductRecommendationFormSet\n stockrecord_formset = StockRecordFormSet\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.formsets = {'category_formset': self.category_formset,\n 'image_formset': self.image_formset,\n 'recommended_formset': self.recommendations_formset,\n 'stockrecord_formset': self.stockrecord_formset}\n\n def dispatch(self, request, *args, **kwargs):\n resp = super().dispatch(\n request, *args, **kwargs)\n return self.check_objects_or_redirect() or resp\n\n def check_objects_or_redirect(self):\n \"\"\"\n Allows checking the objects fetched by get_object and redirect\n if they don't satisfy our needs.\n Is used to redirect when create a new variant and the specified\n parent product can't actually be turned into a parent product.\n \"\"\"\n if self.creating and self.parent is not None:\n is_valid, reason = self.parent.can_be_parent(give_reason=True)\n if not is_valid:\n messages.error(self.request, reason)\n return redirect('dashboard:catalogue-product-list')\n\n def get_queryset(self):\n \"\"\"\n Filter products that the user doesn't have permission to update\n \"\"\"\n return filter_products(Product.objects.all(), self.request.user)\n\n def get_object(self, queryset=None):\n \"\"\"\n This parts allows generic.UpdateView to handle creating products as\n well. The only distinction between an UpdateView and a CreateView\n is that self.object is None. We emulate this behavior.\n\n This method is also responsible for setting self.product_class and\n self.parent.\n \"\"\"\n self.creating = 'pk' not in self.kwargs\n if self.creating:\n # Specifying a parent product is only done when creating a child\n # product.\n parent_pk = self.kwargs.get('parent_pk')\n if parent_pk is None:\n self.parent = None\n # A product class needs to be specified when creating a\n # standalone product.\n product_class_slug = self.kwargs.get('product_class_slug')\n self.product_class = get_object_or_404(\n ProductClass, slug=product_class_slug)\n else:\n self.parent = get_object_or_404(Product, pk=parent_pk)\n self.product_class = self.parent.product_class\n\n return None # success\n else:\n product = super().get_object(queryset)\n self.product_class = product.get_product_class()\n self.parent = product.parent\n return product\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['product_class'] = self.product_class\n ctx['parent'] = self.parent\n ctx['title'] = self.get_page_title()\n\n for ctx_name, formset_class in self.formsets.items():\n if ctx_name not in ctx:\n ctx[ctx_name] = formset_class(self.product_class,\n self.request.user,\n instance=self.object)\n return ctx\n\n def get_page_title(self):\n if self.creating:\n if self.parent is None:\n return _('Create new %(product_class)s product') % {\n 'product_class': self.product_class.name}\n else:\n return _('Create new variant of %(parent_product)s') % {\n 'parent_product': self.parent.title}\n else:\n if self.object.title or not self.parent:\n return self.object.title\n else:\n return _('Editing variant of %(parent_product)s') % {\n 'parent_product': self.parent.title}\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['product_class'] = self.product_class\n kwargs['parent'] = self.parent\n return kwargs\n\n def process_all_forms(self, form):\n \"\"\"\n Short-circuits the regular logic to have one place to have our\n logic to check all forms\n \"\"\"\n # Need to create the product here because the inline forms need it\n # can't use commit=False because ProductForm does not support it\n if self.creating and form.is_valid():\n self.object = form.save()\n\n formsets = {}\n for ctx_name, formset_class in self.formsets.items():\n formsets[ctx_name] = formset_class(self.product_class,\n self.request.user,\n self.request.POST,\n self.request.FILES,\n instance=self.object)\n\n is_valid = form.is_valid() and all([formset.is_valid()\n for formset in formsets.values()])\n\n cross_form_validation_result = self.clean(form, formsets)\n if is_valid and cross_form_validation_result:\n return self.forms_valid(form, formsets)\n else:\n return self.forms_invalid(form, formsets)\n\n # form_valid and form_invalid are called depending on the validation result\n # of just the product form and redisplay the form respectively return a\n # redirect to the success URL. In both cases we need to check our formsets\n # as well, so both methods do the same. process_all_forms then calls\n # forms_valid or forms_invalid respectively, which do the redisplay or\n # redirect.\n form_valid = form_invalid = process_all_forms\n\n def clean(self, form, formsets):\n \"\"\"\n Perform any cross-form/formset validation. If there are errors, attach\n errors to a form or a form field so that they are displayed to the user\n and return False. If everything is valid, return True. This method will\n be called regardless of whether the individual forms are valid.\n \"\"\"\n return True\n\n def forms_valid(self, form, formsets):\n \"\"\"\n Save all changes and display a success url.\n When creating the first child product, this method also sets the new\n parent's structure accordingly.\n \"\"\"\n if self.creating:\n self.handle_adding_child(self.parent)\n else:\n # a just created product was already saved in process_all_forms()\n self.object = form.save()\n\n # Save formsets\n for formset in formsets.values():\n formset.save()\n\n for idx, image in enumerate(self.object.images.all()):\n image.display_order = idx\n image.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def handle_adding_child(self, parent):\n \"\"\"\n When creating the first child product, the parent product needs\n to be implicitly converted from a standalone product to a\n parent product.\n \"\"\"\n # ProductForm eagerly sets the future parent's structure to PARENT to\n # pass validation, but it's not persisted in the database. We ensure\n # it's persisted by calling save()\n if parent is not None:\n parent.structure = Product.PARENT\n parent.save()\n\n def forms_invalid(self, form, formsets):\n # delete the temporary product again\n if self.creating and self.object and self.object.pk is not None:\n self.object.delete()\n self.object = None\n\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the errors below\"))\n ctx = self.get_context_data(form=form, **formsets)\n return self.render_to_response(ctx)\n\n def get_url_with_querystring(self, url):\n url_parts = [url]\n if self.request.GET.urlencode():\n url_parts += [self.request.GET.urlencode()]\n return \"?\".join(url_parts)\n\n def get_success_url(self):\n \"\"\"\n Renders a success message and redirects depending on the button:\n - Standard case is pressing \"Save\"; redirects to the product list\n - When \"Save and continue\" is pressed, we stay on the same page\n - When \"Create (another) child product\" is pressed, it redirects\n to a new product creation page\n \"\"\"\n msg = render_to_string(\n 'oscar/dashboard/catalogue/messages/product_saved.html',\n {\n 'product': self.object,\n 'creating': self.creating,\n 'request': self.request\n })\n messages.success(self.request, msg, extra_tags=\"safe noicon\")\n\n action = self.request.POST.get('action')\n if action == 'continue':\n url = reverse(\n 'dashboard:catalogue-product', kwargs={\"pk\": self.object.id})\n elif action == 'create-another-child' and self.parent:\n url = reverse(\n 'dashboard:catalogue-product-create-child',\n kwargs={'parent_pk': self.parent.pk})\n elif action == 'create-child':\n url = reverse(\n 'dashboard:catalogue-product-create-child',\n kwargs={'parent_pk': self.object.pk})\n else:\n url = reverse('dashboard:catalogue-product-list')\n return self.get_url_with_querystring(url)\n\n\nclass ProductDeleteView(generic.DeleteView):\n \"\"\"\n Dashboard view to delete a product. Has special logic for deleting the\n last child product.\n Supports the permission-based dashboard.\n \"\"\"\n template_name = 'oscar/dashboard/catalogue/product_delete.html'\n model = Product\n context_object_name = 'product'\n\n def get_queryset(self):\n \"\"\"\n Filter products that the user doesn't have permission to update\n \"\"\"\n return filter_products(Product.objects.all(), self.request.user)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n if self.object.is_child:\n ctx['title'] = _(\"Delete product variant?\")\n else:\n ctx['title'] = _(\"Delete product?\")\n return ctx\n\n def delete(self, request, *args, **kwargs):\n # We override the core delete method and don't call super in order to\n # apply more sophisticated logic around handling child products.\n # Calling super makes it difficult to test if the product being deleted\n # is the last child.\n\n self.object = self.get_object()\n\n # Before performing the delete, record whether this product is the last\n # child.\n is_last_child = False\n if self.object.is_child:\n parent = self.object.parent\n is_last_child = parent.children.count() == 1\n\n # This also deletes any child products.\n self.object.delete()\n\n # If the product being deleted is the last child, then pass control\n # to a method than can adjust the parent itself.\n if is_last_child:\n self.handle_deleting_last_child(parent)\n\n return HttpResponseRedirect(self.get_success_url())\n\n def handle_deleting_last_child(self, parent):\n # If the last child product is deleted, this view defaults to turning\n # the parent product into a standalone product. While this is\n # appropriate for many scenarios, it is intentionally easily\n # overridable and not automatically done in e.g. a Product's delete()\n # method as it is more a UX helper than hard business logic.\n parent.structure = parent.STANDALONE\n parent.save()\n\n def get_success_url(self):\n \"\"\"\n When deleting child products, this view redirects to editing the\n parent product. When deleting any other product, it redirects to the\n product list view.\n \"\"\"\n if self.object.is_child:\n msg = _(\"Deleted product variant '%s'\") % self.object.get_title()\n messages.success(self.request, msg)\n return reverse(\n 'dashboard:catalogue-product',\n kwargs={'pk': self.object.parent_id})\n else:\n msg = _(\"Deleted product '%s'\") % self.object.title\n messages.success(self.request, msg)\n return reverse('dashboard:catalogue-product-list')\n\n\nclass StockAlertListView(generic.ListView):\n template_name = 'oscar/dashboard/catalogue/stockalert_list.html'\n model = StockAlert\n context_object_name = 'alerts'\n paginate_by = settings.OSCAR_STOCK_ALERTS_PER_PAGE\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['form'] = self.form\n ctx['description'] = self.description\n return ctx\n\n def get_queryset(self):\n if 'status' in self.request.GET:\n self.form = StockAlertSearchForm(self.request.GET)\n if self.form.is_valid():\n status = self.form.cleaned_data['status']\n self.description = _('Alerts with status \"%s\"') % status\n return self.model.objects.filter(status=status)\n else:\n self.description = _('All alerts')\n self.form = StockAlertSearchForm()\n return self.model.objects.all()\n\n\nclass CategoryListView(SingleTableView):\n template_name = 'oscar/dashboard/catalogue/category_list.html'\n table_class = CategoryTable\n context_table_name = 'categories'\n\n def get_queryset(self):\n return Category.get_root_nodes()\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['child_categories'] = Category.get_root_nodes()\n return ctx\n\n\nclass CategoryDetailListView(SingleTableMixin, generic.DetailView):\n template_name = 'oscar/dashboard/catalogue/category_list.html'\n model = Category\n context_object_name = 'category'\n table_class = CategoryTable\n context_table_name = 'categories'\n\n def get_table_data(self):\n return self.object.get_children()\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['child_categories'] = self.object.get_children()\n ctx['ancestors'] = self.object.get_ancestors_and_self()\n return ctx\n\n\nclass CategoryListMixin(object):\n\n def get_success_url(self):\n parent = self.object.get_parent()\n if parent is None:\n return reverse(\"dashboard:catalogue-category-list\")\n else:\n return reverse(\"dashboard:catalogue-category-detail-list\",\n args=(parent.pk,))\n\n\nclass CategoryCreateView(CategoryListMixin, generic.CreateView):\n template_name = 'oscar/dashboard/catalogue/category_form.html'\n model = Category\n form_class = CategoryForm\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = _(\"Add a new category\")\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Category created successfully\"))\n return super().get_success_url()\n\n def get_initial(self):\n # set child category if set in the URL kwargs\n initial = super().get_initial()\n if 'parent' in self.kwargs:\n initial['_ref_node_id'] = self.kwargs['parent']\n return initial\n\n\nclass CategoryUpdateView(CategoryListMixin, generic.UpdateView):\n template_name = 'oscar/dashboard/catalogue/category_form.html'\n model = Category\n form_class = CategoryForm\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = _(\"Update category '%s'\") % self.object.name\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Category updated successfully\"))\n return super().get_success_url()\n\n\nclass CategoryDeleteView(CategoryListMixin, generic.DeleteView):\n template_name = 'oscar/dashboard/catalogue/category_delete.html'\n model = Category\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['parent'] = self.object.get_parent()\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Category deleted successfully\"))\n return super().get_success_url()\n\n\nclass ProductLookupView(ObjectLookupView):\n model = Product\n\n def get_queryset(self):\n return self.model.objects.browsable().all()\n\n def lookup_filter(self, qs, term):\n return qs.filter(Q(title__icontains=term)\n | Q(parent__title__icontains=term))\n\n\nclass ProductClassCreateUpdateView(generic.UpdateView):\n\n template_name = 'oscar/dashboard/catalogue/product_class_form.html'\n model = ProductClass\n form_class = ProductClassForm\n product_attributes_formset = ProductAttributesFormSet\n\n def process_all_forms(self, form):\n \"\"\"\n This validates both the ProductClass form and the\n ProductClassAttributes formset at once\n making it possible to display all their errors at once.\n \"\"\"\n if self.creating and form.is_valid():\n # the object will be needed by the product_attributes_formset\n self.object = form.save(commit=False)\n\n attributes_formset = self.product_attributes_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n\n is_valid = form.is_valid() and attributes_formset.is_valid()\n\n if is_valid:\n return self.forms_valid(form, attributes_formset)\n else:\n return self.forms_invalid(form, attributes_formset)\n\n def forms_valid(self, form, attributes_formset):\n form.save()\n attributes_formset.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def forms_invalid(self, form, attributes_formset):\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the errors below\"\n ))\n ctx = self.get_context_data(form=form,\n attributes_formset=attributes_formset)\n return self.render_to_response(ctx)\n\n # form_valid and form_invalid are called depending on the validation result\n # of just the product class form, and return a redirect to the success URL\n # or redisplay the form, respectively. In both cases we need to check our\n # formsets as well, so both methods do the same. process_all_forms then\n # calls forms_valid or forms_invalid respectively, which do the redisplay\n # or redirect.\n form_valid = form_invalid = process_all_forms\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(\n *args, **kwargs)\n\n if \"attributes_formset\" not in ctx:\n ctx[\"attributes_formset\"] = self.product_attributes_formset(\n instance=self.object)\n\n ctx[\"title\"] = self.get_title()\n\n return ctx\n\n\nclass ProductClassCreateView(ProductClassCreateUpdateView):\n\n creating = True\n\n def get_object(self):\n return None\n\n def get_title(self):\n return _(\"Add a new product type\")\n\n def get_success_url(self):\n messages.info(self.request, _(\"Product type created successfully\"))\n return reverse(\"dashboard:catalogue-class-list\")\n\n\nclass ProductClassUpdateView(ProductClassCreateUpdateView):\n\n creating = False\n\n def get_title(self):\n return _(\"Update product type '%s'\") % self.object.name\n\n def get_success_url(self):\n messages.info(self.request, _(\"Product type updated successfully\"))\n return reverse(\"dashboard:catalogue-class-list\")\n\n def get_object(self):\n product_class = get_object_or_404(ProductClass, pk=self.kwargs['pk'])\n return product_class\n\n\nclass ProductClassListView(generic.ListView):\n template_name = 'oscar/dashboard/catalogue/product_class_list.html'\n context_object_name = 'classes'\n model = ProductClass\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['title'] = _(\"Product Types\")\n return ctx\n\n\nclass ProductClassDeleteView(generic.DeleteView):\n template_name = 'oscar/dashboard/catalogue/product_class_delete.html'\n model = ProductClass\n form_class = ProductClassForm\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['title'] = _(\"Delete product type '%s'\") % self.object.name\n product_count = self.object.products.count()\n\n if product_count > 0:\n ctx['disallow'] = True\n ctx['title'] = _(\"Unable to delete '%s'\") % self.object.name\n messages.error(self.request,\n _(\"%i products are still assigned to this type\") %\n product_count)\n return ctx\n\n def get_success_url(self):\n messages.info(self.request, _(\"Product type deleted successfully\"))\n return reverse(\"dashboard:catalogue-class-list\")\n\n\nclass AttributeOptionGroupCreateUpdateView(generic.UpdateView):\n\n template_name = 'oscar/dashboard/catalogue/attribute_option_group_form.html'\n model = AttributeOptionGroup\n form_class = AttributeOptionGroupForm\n attribute_option_formset = AttributeOptionFormSet\n\n def process_all_forms(self, form):\n \"\"\"\n This validates both the AttributeOptionGroup form and the\n AttributeOptions formset at once making it possible to display all their\n errors at once.\n \"\"\"\n if self.creating and form.is_valid():\n # the object will be needed by the attribute_option_formset\n self.object = form.save(commit=False)\n\n attribute_option_formset = self.attribute_option_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n\n is_valid = form.is_valid() and attribute_option_formset.is_valid()\n\n if is_valid:\n return self.forms_valid(form, attribute_option_formset)\n else:\n return self.forms_invalid(form, attribute_option_formset)\n\n def forms_valid(self, form, attribute_option_formset):\n form.save()\n attribute_option_formset.save()\n if self.is_popup:\n return self.popup_response(form.instance)\n else:\n return HttpResponseRedirect(self.get_success_url())\n\n def forms_invalid(self, form, attribute_option_formset):\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the errors below\"\n ))\n ctx = self.get_context_data(form=form,\n attribute_option_formset=attribute_option_formset)\n return self.render_to_response(ctx)\n\n # form_valid and form_invalid are called depending on the validation result\n # of just the attribute option group form, and return a redirect to the\n # success URL or redisplay the form, respectively. In both cases we need to\n # check our formsets as well, so both methods do the same.\n # process_all_forms then calls forms_valid or forms_invalid respectively,\n # which do the redisplay or redirect.\n form_valid = form_invalid = process_all_forms\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx.setdefault(\"attribute_option_formset\", self.attribute_option_formset(instance=self.object))\n ctx[\"title\"] = self.get_title()\n return ctx\n\n def get_url_with_querystring(self, url):\n url_parts = [url]\n if self.request.GET.urlencode():\n url_parts += [self.request.GET.urlencode()]\n return \"?\".join(url_parts)\n\n\nclass AttributeOptionGroupCreateView(PopUpWindowCreateMixin, AttributeOptionGroupCreateUpdateView):\n\n creating = True\n\n def get_object(self):\n return None\n\n def get_title(self):\n return _(\"Add a new Attribute Option Group\")\n\n def get_success_url(self):\n self.add_success_message(_(\"Attribute Option Group created successfully\"))\n url = reverse(\"dashboard:catalogue-attribute-option-group-list\")\n return self.get_url_with_querystring(url)\n\n\nclass AttributeOptionGroupUpdateView(PopUpWindowUpdateMixin, AttributeOptionGroupCreateUpdateView):\n\n creating = False\n\n def get_object(self):\n attribute_option_group = get_object_or_404(AttributeOptionGroup, pk=self.kwargs['pk'])\n return attribute_option_group\n\n def get_title(self):\n return _(\"Update Attribute Option Group '%s'\") % self.object.name\n\n def get_success_url(self):\n self.add_success_message(_(\"Attribute Option Group updated successfully\"))\n url = reverse(\"dashboard:catalogue-attribute-option-group-list\")\n return self.get_url_with_querystring(url)\n\n\nclass AttributeOptionGroupListView(SingleTableView):\n\n template_name = 'oscar/dashboard/catalogue/attribute_option_group_list.html'\n model = AttributeOptionGroup\n table_class = AttributeOptionGroupTable\n context_table_name = 'attribute_option_groups'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['querystring'] = self.request.GET.urlencode()\n return ctx\n\n\nclass AttributeOptionGroupDeleteView(PopUpWindowDeleteMixin, generic.DeleteView):\n\n template_name = 'oscar/dashboard/catalogue/attribute_option_group_delete.html'\n model = AttributeOptionGroup\n form_class = AttributeOptionGroupForm\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n ctx['title'] = _(\"Delete Attribute Option Group '%s'\") % self.object.name\n\n product_attribute_count = self.object.product_attributes.count()\n if product_attribute_count > 0:\n ctx['disallow'] = True\n ctx['title'] = _(\"Unable to delete '%s'\") % self.object.name\n messages.error(self.request,\n _(\"%i product attributes are still assigned to this attribute option group\") %\n product_attribute_count)\n\n ctx['http_get_params'] = self.request.GET\n\n return ctx\n\n def get_url_with_querystring(self, url):\n url_parts = [url]\n http_post_params = self.request.POST.copy()\n try:\n del http_post_params['csrfmiddlewaretoken']\n except KeyError:\n pass\n if http_post_params.urlencode():\n url_parts += [http_post_params.urlencode()]\n return \"?\".join(url_parts)\n\n def get_success_url(self):\n self.add_success_message(_(\"Attribute Option Group deleted successfully\"))\n url = reverse(\"dashboard:catalogue-attribute-option-group-list\")\n return self.get_url_with_querystring(url)\n\n\nclass OptionListView(SingleTableView):\n\n template_name = 'oscar/dashboard/catalogue/option_list.html'\n model = Option\n table_class = OptionTable\n context_table_name = 'options'\n\n\nclass OptionCreateUpdateView(generic.UpdateView):\n\n template_name = 'oscar/dashboard/catalogue/option_form.html'\n model = Option\n form_class = OptionForm\n\n def form_valid(self, form):\n self.object = form.save()\n if self.is_popup:\n return self.popup_response(form.instance)\n else:\n return HttpResponseRedirect(self.get_success_url())\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['title'] = self.get_title()\n return ctx\n\n def form_invalid(self, form):\n messages.error(\n self.request,\n _(\"Your submitted data was not valid - please correct the errors below\")\n )\n return super().form_invalid(form)\n\n\nclass OptionCreateView(PopUpWindowCreateMixin, OptionCreateUpdateView):\n\n creating = True\n\n def get_object(self):\n return None\n\n def get_title(self):\n return _(\"Add a new Option\")\n\n def get_success_url(self):\n self.add_success_message(_(\"Option created successfully\"))\n return reverse(\"dashboard:catalogue-option-list\")\n\n\nclass OptionUpdateView(PopUpWindowUpdateMixin, OptionCreateUpdateView):\n\n creating = False\n\n def get_object(self):\n attribute_option_group = get_object_or_404(Option, pk=self.kwargs['pk'])\n return attribute_option_group\n\n def get_title(self):\n return _(\"Update Option '%s'\") % self.object.name\n\n def get_success_url(self):\n self.add_success_message(_(\"Option updated successfully\"))\n return reverse(\"dashboard:catalogue-option-list\")\n\n\nclass OptionDeleteView(PopUpWindowDeleteMixin, generic.DeleteView):\n\n template_name = 'oscar/dashboard/catalogue/option_delete.html'\n model = Option\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n ctx['title'] = _(\"Delete Option '%s'\") % self.object.name\n\n products = self.object.product_set.count()\n product_classes = self.object.productclass_set.count()\n if any([products, product_classes]):\n ctx['disallow'] = True\n ctx['title'] = _(\"Unable to delete '%s'\") % self.object.name\n if products:\n messages.error(\n self.request,\n _(\"%i products are still assigned to this option\") % products\n )\n if product_classes:\n messages.error(\n self.request,\n _(\"%i product classes are still assigned to this option\") % product_classes\n )\n\n return ctx\n\n def get_success_url(self):\n self.add_success_message(_(\"Option deleted successfully\"))\n return reverse(\"dashboard:catalogue-option-list\")\n", "path": "src/oscar/apps/dashboard/catalogue/views.py" } ]
diff --git a/src/oscar/apps/dashboard/catalogue/views.py b/src/oscar/apps/dashboard/catalogue/views.py index ca8fa81d3de..45bd9e50ca0 100644 --- a/src/oscar/apps/dashboard/catalogue/views.py +++ b/src/oscar/apps/dashboard/catalogue/views.py @@ -646,7 +646,7 @@ class ProductLookupView(ObjectLookupView): model = Product def get_queryset(self): - return self.model.browsable.all() + return self.model.objects.browsable().all() def lookup_filter(self, qs, term): return qs.filter(Q(title__icontains=term) diff --git a/tests/functional/dashboard/test_catalogue.py b/tests/functional/dashboard/test_catalogue.py index ff257c9ce66..4e65a787fcf 100644 --- a/tests/functional/dashboard/test_catalogue.py +++ b/tests/functional/dashboard/test_catalogue.py @@ -30,7 +30,8 @@ class TestCatalogueViews(WebTestCase): def test_exist(self): urls = [reverse('dashboard:catalogue-product-list'), reverse('dashboard:catalogue-category-list'), - reverse('dashboard:stock-alert-list')] + reverse('dashboard:stock-alert-list'), + reverse('dashboard:catalogue-product-lookup')] for url in urls: self.assertIsOk(self.get(url))
django-json-api__django-rest-framework-json-api-824
JSON output incorrectly wrapping id in quotes I've dropped DRF-JSON-API into my DRF application following all the defaults but I'm getting strange output where the model's `id` is being wrapped in quotations. This is causing issues with my Ember.js frontend as it's unable to properly parse the output. I've checked the database and the id field is defined as an integer. Every other field seems to be serialized correctly. What setting am I missing?! Sample output: ``` { "links": { "first": "http://localhost:8000/dat/?page%5Bnumber%5D=1", "last": "http://localhost:8000/dat/?page%5Bnumber%5D=1", "next": null, "prev": null }, "data": [ { "type": "Dat", "id": "1", "attributes": { "name": "Nintendo - Nintendo Switch", "description": "Nintendo - Nintendo Switch", "matched_games": 0, "missing_games": 618, "last_verified": "2020-08-05T16:43:27.041339Z" } } ], "meta": { "pagination": { "page": 1, "pages": 1, "count": 1 } } } Response code: 200 (OK); Time: 55ms; Content length: 410 bytes ```
[ { "content": "\"\"\"\nRenderers\n\"\"\"\nimport copy\nfrom collections import OrderedDict, defaultdict\nfrom collections.abc import Iterable\n\nimport inflection\nfrom django.db.models import Manager\nfrom django.utils import encoding\nfrom rest_framework import relations, renderers\nfrom rest_framework.fields import SkipField, get_attribute\nfrom rest_framework.relations import PKOnlyObject\nfrom rest_framework.serializers import BaseSerializer, ListSerializer, Serializer\nfrom rest_framework.settings import api_settings\n\nimport rest_framework_json_api\nfrom rest_framework_json_api import utils\nfrom rest_framework_json_api.relations import HyperlinkedMixin, ResourceRelatedField, SkipDataMixin\nfrom rest_framework_json_api.settings import json_api_settings\n\n\nclass JSONRenderer(renderers.JSONRenderer):\n \"\"\"\n The `JSONRenderer` exposes a number of methods that you may override if you need highly\n custom rendering control.\n\n Render a JSON response per the JSON API spec:\n\n .. code-block:: json\n\n {\n \"data\": [\n {\n \"type\": \"companies\",\n \"id\": 1,\n \"attributes\": {\n \"name\": \"Mozilla\",\n \"slug\": \"mozilla\",\n \"date-created\": \"2014-03-13 16:33:37\"\n }\n }\n ]\n }\n \"\"\"\n\n media_type = 'application/vnd.api+json'\n format = 'vnd.api+json'\n\n @classmethod\n def extract_attributes(cls, fields, resource):\n \"\"\"\n Builds the `attributes` object of the JSON API resource object.\n \"\"\"\n data = OrderedDict()\n render_nested_as_attribute = json_api_settings.SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE\n for field_name, field in iter(fields.items()):\n # ID is always provided in the root of JSON API so remove it from attributes\n if field_name == 'id':\n continue\n # don't output a key for write only fields\n if fields[field_name].write_only:\n continue\n # Skip fields with relations\n if isinstance(\n field, (relations.RelatedField, relations.ManyRelatedField)\n ):\n continue\n\n if isinstance(field, BaseSerializer) and not render_nested_as_attribute:\n continue\n\n # Skip read_only attribute fields when `resource` is an empty\n # serializer. Prevents the \"Raw Data\" form of the browsable API\n # from rendering `\"foo\": null` for read only fields\n try:\n resource[field_name]\n except KeyError:\n if fields[field_name].read_only:\n continue\n\n data.update({\n field_name: resource.get(field_name)\n })\n\n return utils.format_field_names(data)\n\n @classmethod\n def extract_relationships(cls, fields, resource, resource_instance):\n \"\"\"\n Builds the relationships top level object based on related serializers.\n \"\"\"\n # Avoid circular deps\n from rest_framework_json_api.relations import ResourceRelatedField\n\n data = OrderedDict()\n render_nested_as_attribute = json_api_settings.SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE\n\n # Don't try to extract relationships from a non-existent resource\n if resource_instance is None:\n return\n\n for field_name, field in iter(fields.items()):\n # Skip URL field\n if field_name == api_settings.URL_FIELD_NAME:\n continue\n\n # don't output a key for write only fields\n if fields[field_name].write_only:\n continue\n\n # Skip fields without relations\n if not isinstance(\n field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)\n ):\n continue\n\n if isinstance(field, BaseSerializer) and render_nested_as_attribute:\n continue\n\n source = field.source\n relation_type = utils.get_related_resource_type(field)\n\n if isinstance(field, relations.HyperlinkedIdentityField):\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n # special case for HyperlinkedIdentityField\n relation_data = list()\n\n # Don't try to query an empty relation\n relation_queryset = relation_instance \\\n if relation_instance is not None else list()\n\n for related_object in relation_queryset:\n relation_data.append(\n OrderedDict([\n ('type', relation_type),\n ('id', encoding.force_str(related_object.pk))\n ])\n )\n\n data.update({field_name: {\n 'links': {\n \"related\": resource.get(field_name)},\n 'data': relation_data,\n 'meta': {\n 'count': len(relation_data)\n }\n }})\n continue\n\n relation_data = {}\n if isinstance(field, HyperlinkedMixin):\n field_links = field.get_links(resource_instance, field.related_link_lookup_field)\n relation_data.update({'links': field_links} if field_links else dict())\n data.update({field_name: relation_data})\n\n if isinstance(field, (ResourceRelatedField, )):\n if not isinstance(field, SkipDataMixin):\n relation_data.update({'data': resource.get(field_name)})\n\n data.update({field_name: relation_data})\n continue\n\n if isinstance(\n field, (relations.PrimaryKeyRelatedField, relations.HyperlinkedRelatedField)\n ):\n resolved, relation = utils.get_relation_instance(\n resource_instance, '%s_id' % source, field.parent\n )\n if not resolved:\n continue\n relation_id = relation if resource.get(field_name) else None\n relation_data = {\n 'data': (\n OrderedDict([\n ('type', relation_type), ('id', encoding.force_str(relation_id))\n ])\n if relation_id is not None else None)\n }\n\n if (\n isinstance(field, relations.HyperlinkedRelatedField) and\n resource.get(field_name)\n ):\n relation_data.update(\n {\n 'links': {\n 'related': resource.get(field_name)\n }\n }\n )\n data.update({field_name: relation_data})\n continue\n\n if isinstance(field, relations.ManyRelatedField):\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n\n relation_data = {}\n\n if isinstance(resource.get(field_name), Iterable):\n relation_data.update(\n {\n 'meta': {'count': len(resource.get(field_name))}\n }\n )\n\n if isinstance(field.child_relation, ResourceRelatedField):\n # special case for ResourceRelatedField\n relation_data.update(\n {'data': resource.get(field_name)}\n )\n\n if isinstance(field.child_relation, HyperlinkedMixin):\n field_links = field.child_relation.get_links(\n resource_instance,\n field.child_relation.related_link_lookup_field\n )\n relation_data.update(\n {'links': field_links}\n if field_links else dict()\n )\n\n data.update({field_name: relation_data})\n continue\n\n relation_data = list()\n for nested_resource_instance in relation_instance:\n nested_resource_instance_type = (\n relation_type or\n utils.get_resource_type_from_instance(nested_resource_instance)\n )\n\n relation_data.append(OrderedDict([\n ('type', nested_resource_instance_type),\n ('id', encoding.force_str(nested_resource_instance.pk))\n ]))\n data.update({\n field_name: {\n 'data': relation_data,\n 'meta': {\n 'count': len(relation_data)\n }\n }\n })\n continue\n\n if isinstance(field, ListSerializer):\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n\n relation_data = list()\n\n serializer_data = resource.get(field_name)\n resource_instance_queryset = list(relation_instance)\n if isinstance(serializer_data, list):\n for position in range(len(serializer_data)):\n nested_resource_instance = resource_instance_queryset[position]\n nested_resource_instance_type = (\n relation_type or\n utils.get_resource_type_from_instance(nested_resource_instance)\n )\n\n relation_data.append(OrderedDict([\n ('type', nested_resource_instance_type),\n ('id', encoding.force_str(nested_resource_instance.pk))\n ]))\n\n data.update({field_name: {'data': relation_data}})\n continue\n\n if isinstance(field, Serializer):\n relation_instance_id = getattr(resource_instance, source + \"_id\", None)\n if not relation_instance_id:\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n\n if relation_instance is not None:\n relation_instance_id = relation_instance.pk\n\n data.update({\n field_name: {\n 'data': (\n OrderedDict([\n ('type', relation_type),\n ('id', encoding.force_str(relation_instance_id))\n ]) if resource.get(field_name) else None)\n }\n })\n continue\n\n return utils.format_field_names(data)\n\n @classmethod\n def extract_relation_instance(cls, field, resource_instance):\n \"\"\"\n Determines what instance represents given relation and extracts it.\n\n Relation instance is determined exactly same way as it determined\n in parent serializer\n \"\"\"\n try:\n res = field.get_attribute(resource_instance)\n if isinstance(res, PKOnlyObject):\n return get_attribute(resource_instance, field.source_attrs)\n return res\n except SkipField:\n return None\n\n @classmethod\n def extract_included(cls, fields, resource, resource_instance, included_resources,\n included_cache):\n \"\"\"\n Adds related data to the top level included key when the request includes\n ?include=example,example_field2\n \"\"\"\n # this function may be called with an empty record (example: Browsable Interface)\n if not resource_instance:\n return\n\n current_serializer = fields.serializer\n context = current_serializer.context\n included_serializers = utils.get_included_serializers(current_serializer)\n included_resources = copy.copy(included_resources)\n included_resources = [inflection.underscore(value) for value in included_resources]\n render_nested_as_attribute = json_api_settings.SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE\n\n for field_name, field in iter(fields.items()):\n # Skip URL field\n if field_name == api_settings.URL_FIELD_NAME:\n continue\n\n # Skip fields without relations\n if not isinstance(\n field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)\n ):\n continue\n\n if isinstance(field, BaseSerializer) and render_nested_as_attribute:\n continue\n\n try:\n included_resources.remove(field_name)\n except ValueError:\n # Skip fields not in requested included resources\n # If no child field, directly continue with the next field\n if field_name not in [node.split('.')[0] for node in included_resources]:\n continue\n\n relation_instance = cls.extract_relation_instance(\n field, resource_instance\n )\n if isinstance(relation_instance, Manager):\n relation_instance = relation_instance.all()\n\n serializer_data = resource.get(field_name)\n\n if isinstance(field, relations.ManyRelatedField):\n serializer_class = included_serializers[field_name]\n field = serializer_class(relation_instance, many=True, context=context)\n serializer_data = field.data\n\n if isinstance(field, relations.RelatedField):\n if relation_instance is None or not serializer_data:\n continue\n\n many = field._kwargs.get('child_relation', None) is not None\n\n if isinstance(field, ResourceRelatedField) and not many:\n already_included = serializer_data['type'] in included_cache and \\\n serializer_data['id'] in included_cache[serializer_data['type']]\n\n if already_included:\n continue\n\n serializer_class = included_serializers[field_name]\n field = serializer_class(relation_instance, many=many, context=context)\n serializer_data = field.data\n\n new_included_resources = [key.replace('%s.' % field_name, '', 1)\n for key in included_resources\n if field_name == key.split('.')[0]]\n\n if isinstance(field, ListSerializer):\n serializer = field.child\n relation_type = utils.get_resource_type_from_serializer(serializer)\n relation_queryset = list(relation_instance)\n\n if serializer_data:\n for position in range(len(serializer_data)):\n serializer_resource = serializer_data[position]\n nested_resource_instance = relation_queryset[position]\n resource_type = (\n relation_type or\n utils.get_resource_type_from_instance(nested_resource_instance)\n )\n serializer_fields = utils.get_serializer_fields(\n serializer.__class__(\n nested_resource_instance, context=serializer.context\n )\n )\n new_item = cls.build_json_resource_obj(\n serializer_fields,\n serializer_resource,\n nested_resource_instance,\n resource_type,\n getattr(serializer, '_poly_force_type_resolution', False)\n )\n included_cache[new_item['type']][new_item['id']] = \\\n utils.format_field_names(new_item)\n cls.extract_included(\n serializer_fields,\n serializer_resource,\n nested_resource_instance,\n new_included_resources,\n included_cache,\n )\n\n if isinstance(field, Serializer):\n relation_type = utils.get_resource_type_from_serializer(field)\n\n # Get the serializer fields\n serializer_fields = utils.get_serializer_fields(field)\n if serializer_data:\n new_item = cls.build_json_resource_obj(\n serializer_fields,\n serializer_data,\n relation_instance,\n relation_type,\n getattr(field, '_poly_force_type_resolution', False)\n )\n included_cache[new_item['type']][new_item['id']] = utils.format_field_names(\n new_item\n )\n cls.extract_included(\n serializer_fields,\n serializer_data,\n relation_instance,\n new_included_resources,\n included_cache,\n )\n\n @classmethod\n def extract_meta(cls, serializer, resource):\n \"\"\"\n Gathers the data from serializer fields specified in meta_fields and adds it to\n the meta object.\n \"\"\"\n if hasattr(serializer, 'child'):\n meta = getattr(serializer.child, 'Meta', None)\n else:\n meta = getattr(serializer, 'Meta', None)\n meta_fields = getattr(meta, 'meta_fields', [])\n data = OrderedDict()\n for field_name in meta_fields:\n data.update({\n field_name: resource.get(field_name)\n })\n return data\n\n @classmethod\n def extract_root_meta(cls, serializer, resource):\n \"\"\"\n Calls a `get_root_meta` function on a serializer, if it exists.\n \"\"\"\n many = False\n if hasattr(serializer, 'child'):\n many = True\n serializer = serializer.child\n\n data = {}\n if getattr(serializer, 'get_root_meta', None):\n json_api_meta = serializer.get_root_meta(resource, many)\n assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict'\n data.update(json_api_meta)\n return data\n\n @classmethod\n def build_json_resource_obj(cls, fields, resource, resource_instance, resource_name,\n force_type_resolution=False):\n \"\"\"\n Builds the resource object (type, id, attributes) and extracts relationships.\n \"\"\"\n # Determine type from the instance if the underlying model is polymorphic\n if force_type_resolution:\n resource_name = utils.get_resource_type_from_instance(resource_instance)\n resource_data = [\n ('type', resource_name),\n ('id', encoding.force_str(resource_instance.pk) if resource_instance else None),\n ('attributes', cls.extract_attributes(fields, resource)),\n ]\n relationships = cls.extract_relationships(fields, resource, resource_instance)\n if relationships:\n resource_data.append(('relationships', relationships))\n # Add 'self' link if field is present and valid\n if api_settings.URL_FIELD_NAME in resource and \\\n isinstance(fields[api_settings.URL_FIELD_NAME], relations.RelatedField):\n resource_data.append(('links', {'self': resource[api_settings.URL_FIELD_NAME]}))\n return OrderedDict(resource_data)\n\n def render_relationship_view(self, data, accepted_media_type=None, renderer_context=None):\n # Special case for RelationshipView\n view = renderer_context.get(\"view\", None)\n render_data = OrderedDict([\n ('data', data)\n ])\n links = view.get_links()\n if links:\n render_data.update({'links': links}),\n return super(JSONRenderer, self).render(\n render_data, accepted_media_type, renderer_context\n )\n\n def render_errors(self, data, accepted_media_type=None, renderer_context=None):\n return super(JSONRenderer, self).render(\n utils.format_errors(data), accepted_media_type, renderer_context\n )\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n\n renderer_context = renderer_context or {}\n\n view = renderer_context.get(\"view\", None)\n request = renderer_context.get(\"request\", None)\n\n # Get the resource name.\n resource_name = utils.get_resource_name(renderer_context)\n\n # If this is an error response, skip the rest.\n if resource_name == 'errors':\n return self.render_errors(data, accepted_media_type, renderer_context)\n\n # if response.status_code is 204 then the data to be rendered must\n # be None\n response = renderer_context.get('response', None)\n if response is not None and response.status_code == 204:\n return super(JSONRenderer, self).render(\n None, accepted_media_type, renderer_context\n )\n\n from rest_framework_json_api.views import RelationshipView\n if isinstance(view, RelationshipView):\n return self.render_relationship_view(data, accepted_media_type, renderer_context)\n\n # If `resource_name` is set to None then render default as the dev\n # wants to build the output format manually.\n if resource_name is None or resource_name is False:\n return super(JSONRenderer, self).render(\n data, accepted_media_type, renderer_context\n )\n\n json_api_data = data\n # initialize json_api_meta with pagination meta or an empty dict\n json_api_meta = data.get('meta', {}) if isinstance(data, dict) else {}\n included_cache = defaultdict(dict)\n\n if data and 'results' in data:\n serializer_data = data[\"results\"]\n else:\n serializer_data = data\n\n serializer = getattr(serializer_data, 'serializer', None)\n\n included_resources = utils.get_included_resources(request, serializer)\n\n if serializer is not None:\n\n # Extract root meta for any type of serializer\n json_api_meta.update(self.extract_root_meta(serializer, serializer_data))\n\n if getattr(serializer, 'many', False):\n json_api_data = list()\n\n for position in range(len(serializer_data)):\n resource = serializer_data[position] # Get current resource\n resource_instance = serializer.instance[position] # Get current instance\n\n if isinstance(serializer.child, rest_framework_json_api.\n serializers.PolymorphicModelSerializer):\n resource_serializer_class = serializer.child.\\\n get_polymorphic_serializer_for_instance(resource_instance)(\n context=serializer.child.context\n )\n else:\n resource_serializer_class = serializer.child\n\n fields = utils.get_serializer_fields(resource_serializer_class)\n force_type_resolution = getattr(\n resource_serializer_class, '_poly_force_type_resolution', False)\n\n json_resource_obj = self.build_json_resource_obj(\n fields, resource, resource_instance, resource_name, force_type_resolution\n )\n meta = self.extract_meta(serializer, resource)\n if meta:\n json_resource_obj.update({'meta': utils.format_field_names(meta)})\n json_api_data.append(json_resource_obj)\n\n self.extract_included(\n fields, resource, resource_instance, included_resources, included_cache\n )\n else:\n fields = utils.get_serializer_fields(serializer)\n force_type_resolution = getattr(serializer, '_poly_force_type_resolution', False)\n\n resource_instance = serializer.instance\n json_api_data = self.build_json_resource_obj(\n fields, serializer_data, resource_instance, resource_name, force_type_resolution\n )\n\n meta = self.extract_meta(serializer, serializer_data)\n if meta:\n json_api_data.update({'meta': utils.format_field_names(meta)})\n\n self.extract_included(\n fields, serializer_data, resource_instance, included_resources, included_cache\n )\n\n # Make sure we render data in a specific order\n render_data = OrderedDict()\n\n if isinstance(data, dict) and data.get('links'):\n render_data['links'] = data.get('links')\n\n # format the api root link list\n if view.__class__ and view.__class__.__name__ == 'APIRoot':\n render_data['data'] = None\n render_data['links'] = json_api_data\n else:\n render_data['data'] = json_api_data\n\n if included_cache:\n if isinstance(json_api_data, list):\n objects = json_api_data\n else:\n objects = [json_api_data]\n\n for object in objects:\n obj_type = object.get('type')\n obj_id = object.get('id')\n if obj_type in included_cache and \\\n obj_id in included_cache[obj_type]:\n del included_cache[obj_type][obj_id]\n if not included_cache[obj_type]:\n del included_cache[obj_type]\n\n if included_cache:\n render_data['included'] = list()\n for included_type in sorted(included_cache.keys()):\n for included_id in sorted(included_cache[included_type].keys()):\n render_data['included'].append(included_cache[included_type][included_id])\n\n if json_api_meta:\n render_data['meta'] = utils.format_field_names(json_api_meta)\n\n return super(JSONRenderer, self).render(\n render_data, accepted_media_type, renderer_context\n )\n", "path": "rest_framework_json_api/renderers.py" } ]
[ { "content": "\"\"\"\nRenderers\n\"\"\"\nimport copy\nfrom collections import OrderedDict, defaultdict\nfrom collections.abc import Iterable\n\nimport inflection\nfrom django.db.models import Manager\nfrom django.utils import encoding\nfrom rest_framework import relations, renderers\nfrom rest_framework.fields import SkipField, get_attribute\nfrom rest_framework.relations import PKOnlyObject\nfrom rest_framework.serializers import BaseSerializer, ListSerializer, Serializer\nfrom rest_framework.settings import api_settings\n\nimport rest_framework_json_api\nfrom rest_framework_json_api import utils\nfrom rest_framework_json_api.relations import HyperlinkedMixin, ResourceRelatedField, SkipDataMixin\nfrom rest_framework_json_api.settings import json_api_settings\n\n\nclass JSONRenderer(renderers.JSONRenderer):\n \"\"\"\n The `JSONRenderer` exposes a number of methods that you may override if you need highly\n custom rendering control.\n\n Render a JSON response per the JSON API spec:\n\n .. code-block:: json\n\n {\n \"data\": [\n {\n \"type\": \"companies\",\n \"id\": \"1\",\n \"attributes\": {\n \"name\": \"Mozilla\",\n \"slug\": \"mozilla\",\n \"date-created\": \"2014-03-13 16:33:37\"\n }\n }\n ]\n }\n \"\"\"\n\n media_type = 'application/vnd.api+json'\n format = 'vnd.api+json'\n\n @classmethod\n def extract_attributes(cls, fields, resource):\n \"\"\"\n Builds the `attributes` object of the JSON API resource object.\n \"\"\"\n data = OrderedDict()\n render_nested_as_attribute = json_api_settings.SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE\n for field_name, field in iter(fields.items()):\n # ID is always provided in the root of JSON API so remove it from attributes\n if field_name == 'id':\n continue\n # don't output a key for write only fields\n if fields[field_name].write_only:\n continue\n # Skip fields with relations\n if isinstance(\n field, (relations.RelatedField, relations.ManyRelatedField)\n ):\n continue\n\n if isinstance(field, BaseSerializer) and not render_nested_as_attribute:\n continue\n\n # Skip read_only attribute fields when `resource` is an empty\n # serializer. Prevents the \"Raw Data\" form of the browsable API\n # from rendering `\"foo\": null` for read only fields\n try:\n resource[field_name]\n except KeyError:\n if fields[field_name].read_only:\n continue\n\n data.update({\n field_name: resource.get(field_name)\n })\n\n return utils.format_field_names(data)\n\n @classmethod\n def extract_relationships(cls, fields, resource, resource_instance):\n \"\"\"\n Builds the relationships top level object based on related serializers.\n \"\"\"\n # Avoid circular deps\n from rest_framework_json_api.relations import ResourceRelatedField\n\n data = OrderedDict()\n render_nested_as_attribute = json_api_settings.SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE\n\n # Don't try to extract relationships from a non-existent resource\n if resource_instance is None:\n return\n\n for field_name, field in iter(fields.items()):\n # Skip URL field\n if field_name == api_settings.URL_FIELD_NAME:\n continue\n\n # don't output a key for write only fields\n if fields[field_name].write_only:\n continue\n\n # Skip fields without relations\n if not isinstance(\n field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)\n ):\n continue\n\n if isinstance(field, BaseSerializer) and render_nested_as_attribute:\n continue\n\n source = field.source\n relation_type = utils.get_related_resource_type(field)\n\n if isinstance(field, relations.HyperlinkedIdentityField):\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n # special case for HyperlinkedIdentityField\n relation_data = list()\n\n # Don't try to query an empty relation\n relation_queryset = relation_instance \\\n if relation_instance is not None else list()\n\n for related_object in relation_queryset:\n relation_data.append(\n OrderedDict([\n ('type', relation_type),\n ('id', encoding.force_str(related_object.pk))\n ])\n )\n\n data.update({field_name: {\n 'links': {\n \"related\": resource.get(field_name)},\n 'data': relation_data,\n 'meta': {\n 'count': len(relation_data)\n }\n }})\n continue\n\n relation_data = {}\n if isinstance(field, HyperlinkedMixin):\n field_links = field.get_links(resource_instance, field.related_link_lookup_field)\n relation_data.update({'links': field_links} if field_links else dict())\n data.update({field_name: relation_data})\n\n if isinstance(field, (ResourceRelatedField, )):\n if not isinstance(field, SkipDataMixin):\n relation_data.update({'data': resource.get(field_name)})\n\n data.update({field_name: relation_data})\n continue\n\n if isinstance(\n field, (relations.PrimaryKeyRelatedField, relations.HyperlinkedRelatedField)\n ):\n resolved, relation = utils.get_relation_instance(\n resource_instance, '%s_id' % source, field.parent\n )\n if not resolved:\n continue\n relation_id = relation if resource.get(field_name) else None\n relation_data = {\n 'data': (\n OrderedDict([\n ('type', relation_type), ('id', encoding.force_str(relation_id))\n ])\n if relation_id is not None else None)\n }\n\n if (\n isinstance(field, relations.HyperlinkedRelatedField) and\n resource.get(field_name)\n ):\n relation_data.update(\n {\n 'links': {\n 'related': resource.get(field_name)\n }\n }\n )\n data.update({field_name: relation_data})\n continue\n\n if isinstance(field, relations.ManyRelatedField):\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n\n relation_data = {}\n\n if isinstance(resource.get(field_name), Iterable):\n relation_data.update(\n {\n 'meta': {'count': len(resource.get(field_name))}\n }\n )\n\n if isinstance(field.child_relation, ResourceRelatedField):\n # special case for ResourceRelatedField\n relation_data.update(\n {'data': resource.get(field_name)}\n )\n\n if isinstance(field.child_relation, HyperlinkedMixin):\n field_links = field.child_relation.get_links(\n resource_instance,\n field.child_relation.related_link_lookup_field\n )\n relation_data.update(\n {'links': field_links}\n if field_links else dict()\n )\n\n data.update({field_name: relation_data})\n continue\n\n relation_data = list()\n for nested_resource_instance in relation_instance:\n nested_resource_instance_type = (\n relation_type or\n utils.get_resource_type_from_instance(nested_resource_instance)\n )\n\n relation_data.append(OrderedDict([\n ('type', nested_resource_instance_type),\n ('id', encoding.force_str(nested_resource_instance.pk))\n ]))\n data.update({\n field_name: {\n 'data': relation_data,\n 'meta': {\n 'count': len(relation_data)\n }\n }\n })\n continue\n\n if isinstance(field, ListSerializer):\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n\n relation_data = list()\n\n serializer_data = resource.get(field_name)\n resource_instance_queryset = list(relation_instance)\n if isinstance(serializer_data, list):\n for position in range(len(serializer_data)):\n nested_resource_instance = resource_instance_queryset[position]\n nested_resource_instance_type = (\n relation_type or\n utils.get_resource_type_from_instance(nested_resource_instance)\n )\n\n relation_data.append(OrderedDict([\n ('type', nested_resource_instance_type),\n ('id', encoding.force_str(nested_resource_instance.pk))\n ]))\n\n data.update({field_name: {'data': relation_data}})\n continue\n\n if isinstance(field, Serializer):\n relation_instance_id = getattr(resource_instance, source + \"_id\", None)\n if not relation_instance_id:\n resolved, relation_instance = utils.get_relation_instance(\n resource_instance, source, field.parent\n )\n if not resolved:\n continue\n\n if relation_instance is not None:\n relation_instance_id = relation_instance.pk\n\n data.update({\n field_name: {\n 'data': (\n OrderedDict([\n ('type', relation_type),\n ('id', encoding.force_str(relation_instance_id))\n ]) if resource.get(field_name) else None)\n }\n })\n continue\n\n return utils.format_field_names(data)\n\n @classmethod\n def extract_relation_instance(cls, field, resource_instance):\n \"\"\"\n Determines what instance represents given relation and extracts it.\n\n Relation instance is determined exactly same way as it determined\n in parent serializer\n \"\"\"\n try:\n res = field.get_attribute(resource_instance)\n if isinstance(res, PKOnlyObject):\n return get_attribute(resource_instance, field.source_attrs)\n return res\n except SkipField:\n return None\n\n @classmethod\n def extract_included(cls, fields, resource, resource_instance, included_resources,\n included_cache):\n \"\"\"\n Adds related data to the top level included key when the request includes\n ?include=example,example_field2\n \"\"\"\n # this function may be called with an empty record (example: Browsable Interface)\n if not resource_instance:\n return\n\n current_serializer = fields.serializer\n context = current_serializer.context\n included_serializers = utils.get_included_serializers(current_serializer)\n included_resources = copy.copy(included_resources)\n included_resources = [inflection.underscore(value) for value in included_resources]\n render_nested_as_attribute = json_api_settings.SERIALIZE_NESTED_SERIALIZERS_AS_ATTRIBUTE\n\n for field_name, field in iter(fields.items()):\n # Skip URL field\n if field_name == api_settings.URL_FIELD_NAME:\n continue\n\n # Skip fields without relations\n if not isinstance(\n field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)\n ):\n continue\n\n if isinstance(field, BaseSerializer) and render_nested_as_attribute:\n continue\n\n try:\n included_resources.remove(field_name)\n except ValueError:\n # Skip fields not in requested included resources\n # If no child field, directly continue with the next field\n if field_name not in [node.split('.')[0] for node in included_resources]:\n continue\n\n relation_instance = cls.extract_relation_instance(\n field, resource_instance\n )\n if isinstance(relation_instance, Manager):\n relation_instance = relation_instance.all()\n\n serializer_data = resource.get(field_name)\n\n if isinstance(field, relations.ManyRelatedField):\n serializer_class = included_serializers[field_name]\n field = serializer_class(relation_instance, many=True, context=context)\n serializer_data = field.data\n\n if isinstance(field, relations.RelatedField):\n if relation_instance is None or not serializer_data:\n continue\n\n many = field._kwargs.get('child_relation', None) is not None\n\n if isinstance(field, ResourceRelatedField) and not many:\n already_included = serializer_data['type'] in included_cache and \\\n serializer_data['id'] in included_cache[serializer_data['type']]\n\n if already_included:\n continue\n\n serializer_class = included_serializers[field_name]\n field = serializer_class(relation_instance, many=many, context=context)\n serializer_data = field.data\n\n new_included_resources = [key.replace('%s.' % field_name, '', 1)\n for key in included_resources\n if field_name == key.split('.')[0]]\n\n if isinstance(field, ListSerializer):\n serializer = field.child\n relation_type = utils.get_resource_type_from_serializer(serializer)\n relation_queryset = list(relation_instance)\n\n if serializer_data:\n for position in range(len(serializer_data)):\n serializer_resource = serializer_data[position]\n nested_resource_instance = relation_queryset[position]\n resource_type = (\n relation_type or\n utils.get_resource_type_from_instance(nested_resource_instance)\n )\n serializer_fields = utils.get_serializer_fields(\n serializer.__class__(\n nested_resource_instance, context=serializer.context\n )\n )\n new_item = cls.build_json_resource_obj(\n serializer_fields,\n serializer_resource,\n nested_resource_instance,\n resource_type,\n getattr(serializer, '_poly_force_type_resolution', False)\n )\n included_cache[new_item['type']][new_item['id']] = \\\n utils.format_field_names(new_item)\n cls.extract_included(\n serializer_fields,\n serializer_resource,\n nested_resource_instance,\n new_included_resources,\n included_cache,\n )\n\n if isinstance(field, Serializer):\n relation_type = utils.get_resource_type_from_serializer(field)\n\n # Get the serializer fields\n serializer_fields = utils.get_serializer_fields(field)\n if serializer_data:\n new_item = cls.build_json_resource_obj(\n serializer_fields,\n serializer_data,\n relation_instance,\n relation_type,\n getattr(field, '_poly_force_type_resolution', False)\n )\n included_cache[new_item['type']][new_item['id']] = utils.format_field_names(\n new_item\n )\n cls.extract_included(\n serializer_fields,\n serializer_data,\n relation_instance,\n new_included_resources,\n included_cache,\n )\n\n @classmethod\n def extract_meta(cls, serializer, resource):\n \"\"\"\n Gathers the data from serializer fields specified in meta_fields and adds it to\n the meta object.\n \"\"\"\n if hasattr(serializer, 'child'):\n meta = getattr(serializer.child, 'Meta', None)\n else:\n meta = getattr(serializer, 'Meta', None)\n meta_fields = getattr(meta, 'meta_fields', [])\n data = OrderedDict()\n for field_name in meta_fields:\n data.update({\n field_name: resource.get(field_name)\n })\n return data\n\n @classmethod\n def extract_root_meta(cls, serializer, resource):\n \"\"\"\n Calls a `get_root_meta` function on a serializer, if it exists.\n \"\"\"\n many = False\n if hasattr(serializer, 'child'):\n many = True\n serializer = serializer.child\n\n data = {}\n if getattr(serializer, 'get_root_meta', None):\n json_api_meta = serializer.get_root_meta(resource, many)\n assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict'\n data.update(json_api_meta)\n return data\n\n @classmethod\n def build_json_resource_obj(cls, fields, resource, resource_instance, resource_name,\n force_type_resolution=False):\n \"\"\"\n Builds the resource object (type, id, attributes) and extracts relationships.\n \"\"\"\n # Determine type from the instance if the underlying model is polymorphic\n if force_type_resolution:\n resource_name = utils.get_resource_type_from_instance(resource_instance)\n resource_data = [\n ('type', resource_name),\n ('id', encoding.force_str(resource_instance.pk) if resource_instance else None),\n ('attributes', cls.extract_attributes(fields, resource)),\n ]\n relationships = cls.extract_relationships(fields, resource, resource_instance)\n if relationships:\n resource_data.append(('relationships', relationships))\n # Add 'self' link if field is present and valid\n if api_settings.URL_FIELD_NAME in resource and \\\n isinstance(fields[api_settings.URL_FIELD_NAME], relations.RelatedField):\n resource_data.append(('links', {'self': resource[api_settings.URL_FIELD_NAME]}))\n return OrderedDict(resource_data)\n\n def render_relationship_view(self, data, accepted_media_type=None, renderer_context=None):\n # Special case for RelationshipView\n view = renderer_context.get(\"view\", None)\n render_data = OrderedDict([\n ('data', data)\n ])\n links = view.get_links()\n if links:\n render_data.update({'links': links}),\n return super(JSONRenderer, self).render(\n render_data, accepted_media_type, renderer_context\n )\n\n def render_errors(self, data, accepted_media_type=None, renderer_context=None):\n return super(JSONRenderer, self).render(\n utils.format_errors(data), accepted_media_type, renderer_context\n )\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n\n renderer_context = renderer_context or {}\n\n view = renderer_context.get(\"view\", None)\n request = renderer_context.get(\"request\", None)\n\n # Get the resource name.\n resource_name = utils.get_resource_name(renderer_context)\n\n # If this is an error response, skip the rest.\n if resource_name == 'errors':\n return self.render_errors(data, accepted_media_type, renderer_context)\n\n # if response.status_code is 204 then the data to be rendered must\n # be None\n response = renderer_context.get('response', None)\n if response is not None and response.status_code == 204:\n return super(JSONRenderer, self).render(\n None, accepted_media_type, renderer_context\n )\n\n from rest_framework_json_api.views import RelationshipView\n if isinstance(view, RelationshipView):\n return self.render_relationship_view(data, accepted_media_type, renderer_context)\n\n # If `resource_name` is set to None then render default as the dev\n # wants to build the output format manually.\n if resource_name is None or resource_name is False:\n return super(JSONRenderer, self).render(\n data, accepted_media_type, renderer_context\n )\n\n json_api_data = data\n # initialize json_api_meta with pagination meta or an empty dict\n json_api_meta = data.get('meta', {}) if isinstance(data, dict) else {}\n included_cache = defaultdict(dict)\n\n if data and 'results' in data:\n serializer_data = data[\"results\"]\n else:\n serializer_data = data\n\n serializer = getattr(serializer_data, 'serializer', None)\n\n included_resources = utils.get_included_resources(request, serializer)\n\n if serializer is not None:\n\n # Extract root meta for any type of serializer\n json_api_meta.update(self.extract_root_meta(serializer, serializer_data))\n\n if getattr(serializer, 'many', False):\n json_api_data = list()\n\n for position in range(len(serializer_data)):\n resource = serializer_data[position] # Get current resource\n resource_instance = serializer.instance[position] # Get current instance\n\n if isinstance(serializer.child, rest_framework_json_api.\n serializers.PolymorphicModelSerializer):\n resource_serializer_class = serializer.child.\\\n get_polymorphic_serializer_for_instance(resource_instance)(\n context=serializer.child.context\n )\n else:\n resource_serializer_class = serializer.child\n\n fields = utils.get_serializer_fields(resource_serializer_class)\n force_type_resolution = getattr(\n resource_serializer_class, '_poly_force_type_resolution', False)\n\n json_resource_obj = self.build_json_resource_obj(\n fields, resource, resource_instance, resource_name, force_type_resolution\n )\n meta = self.extract_meta(serializer, resource)\n if meta:\n json_resource_obj.update({'meta': utils.format_field_names(meta)})\n json_api_data.append(json_resource_obj)\n\n self.extract_included(\n fields, resource, resource_instance, included_resources, included_cache\n )\n else:\n fields = utils.get_serializer_fields(serializer)\n force_type_resolution = getattr(serializer, '_poly_force_type_resolution', False)\n\n resource_instance = serializer.instance\n json_api_data = self.build_json_resource_obj(\n fields, serializer_data, resource_instance, resource_name, force_type_resolution\n )\n\n meta = self.extract_meta(serializer, serializer_data)\n if meta:\n json_api_data.update({'meta': utils.format_field_names(meta)})\n\n self.extract_included(\n fields, serializer_data, resource_instance, included_resources, included_cache\n )\n\n # Make sure we render data in a specific order\n render_data = OrderedDict()\n\n if isinstance(data, dict) and data.get('links'):\n render_data['links'] = data.get('links')\n\n # format the api root link list\n if view.__class__ and view.__class__.__name__ == 'APIRoot':\n render_data['data'] = None\n render_data['links'] = json_api_data\n else:\n render_data['data'] = json_api_data\n\n if included_cache:\n if isinstance(json_api_data, list):\n objects = json_api_data\n else:\n objects = [json_api_data]\n\n for object in objects:\n obj_type = object.get('type')\n obj_id = object.get('id')\n if obj_type in included_cache and \\\n obj_id in included_cache[obj_type]:\n del included_cache[obj_type][obj_id]\n if not included_cache[obj_type]:\n del included_cache[obj_type]\n\n if included_cache:\n render_data['included'] = list()\n for included_type in sorted(included_cache.keys()):\n for included_id in sorted(included_cache[included_type].keys()):\n render_data['included'].append(included_cache[included_type][included_id])\n\n if json_api_meta:\n render_data['meta'] = utils.format_field_names(json_api_meta)\n\n return super(JSONRenderer, self).render(\n render_data, accepted_media_type, renderer_context\n )\n", "path": "rest_framework_json_api/renderers.py" } ]
diff --git a/README.rst b/README.rst index b3162184..bc52bbf5 100644 --- a/README.rst +++ b/README.rst @@ -48,7 +48,7 @@ like the following:: }, "data": [{ "type": "identities", - "id": 3, + "id": "3", "attributes": { "username": "john", "full-name": "John Coltrane" diff --git a/docs/getting-started.md b/docs/getting-started.md index bef744eb..58768e39 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -32,7 +32,7 @@ like the following: }, "data": [{ "type": "identities", - "id": 3, + "id": "3", "attributes": { "username": "john", "full-name": "John Coltrane" diff --git a/docs/usage.md b/docs/usage.md index 5b7010d3..9fd46e6b 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -338,7 +338,7 @@ Example - Without format conversion: { "data": [{ "type": "identities", - "id": 3, + "id": "3", "attributes": { "username": "john", "first_name": "John", @@ -359,7 +359,7 @@ Example - With format conversion set to `dasherize`: { "data": [{ "type": "identities", - "id": 3, + "id": "3", "attributes": { "username": "john", "first-name": "John", @@ -389,7 +389,7 @@ Example without format conversion: { "data": [{ "type": "blog_identity", - "id": 3, + "id": "3", "attributes": { ... }, @@ -412,7 +412,7 @@ When set to dasherize: { "data": [{ "type": "blog-identity", - "id": 3, + "id": "3", "attributes": { ... }, @@ -438,7 +438,7 @@ Example without pluralization: { "data": [{ "type": "identity", - "id": 3, + "id": "3", "attributes": { ... }, @@ -446,7 +446,7 @@ Example without pluralization: "home_towns": { "data": [{ "type": "home_town", - "id": 3 + "id": "3" }] } } @@ -461,7 +461,7 @@ When set to pluralize: { "data": [{ "type": "identities", - "id": 3, + "id": "3", "attributes": { ... }, @@ -469,7 +469,7 @@ When set to pluralize: "home_towns": { "data": [{ "type": "home_towns", - "id": 3 + "id": "3" }] } } diff --git a/rest_framework_json_api/renderers.py b/rest_framework_json_api/renderers.py index ccd71510..cfc74f1f 100644 --- a/rest_framework_json_api/renderers.py +++ b/rest_framework_json_api/renderers.py @@ -33,7 +33,7 @@ class JSONRenderer(renderers.JSONRenderer): "data": [ { "type": "companies", - "id": 1, + "id": "1", "attributes": { "name": "Mozilla", "slug": "mozilla",
qtile__qtile-4246
Tasklist Widget Icons not vertically centered ### The issue: Depending on the icon size, the placement of the icon relative to the text shifts. Here is an example with icon size = 45 and text size = 30. ![1664826413](https://user-images.githubusercontent.com/12877032/193666223-f80a1051-ecbd-45fc-894a-1c50ba4d9159.png) And here it is with icon size = 30 as well: ![1664826716](https://user-images.githubusercontent.com/12877032/193666538-9f88eb29-d0b5-4676-8dc9-c8b423b5cc8d.png) A simple fix would be to add a user-defined variable to the `y` calculation for `draw_icon`. I subclassed tasklist and manually modified the `y` value to match my icon size, and that works okay. A more flexible solution would be to modify the y value so it is always drawn such that it is centered. #1714 discussed the same issue but was never fixed. ### Required: - [X] I have searched past issues to see if this bug has already been reported.
[ { "content": "# Copyright (c) 2012-2014 roger\n# Copyright (c) 2012-2015 Tycho Andersen\n# Copyright (c) 2013 dequis\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2018 Piotr Przymus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport re\n\nimport cairocffi\n\ntry:\n from xdg.IconTheme import getIconPath\n\n has_xdg = True\nexcept ImportError:\n has_xdg = False\n\nfrom libqtile import bar, hook, pangocffi\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass TaskList(base._Widget, base.PaddingMixin, base.MarginMixin):\n \"\"\"Displays the icon and name of each window in the current group\n\n Contrary to WindowTabs this is an interactive widget. The window that\n currently has focus is highlighted.\n\n Optional requirements: `pyxdg <https://pypi.org/project/pyxdg/>`__ is needed\n to use theme icons and to display icons on Wayland.\n \"\"\"\n\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"font\", \"sans\", \"Default font\"),\n (\"fontsize\", None, \"Font size. Calculated if None.\"),\n (\"foreground\", \"ffffff\", \"Foreground colour\"),\n (\"fontshadow\", None, \"font shadow color, default is None(no shadow)\"),\n (\"borderwidth\", 2, \"Current group border width\"),\n (\"border\", \"215578\", \"Border colour\"),\n (\"rounded\", True, \"To round or not to round borders\"),\n (\n \"highlight_method\",\n \"border\",\n \"Method of highlighting (one of 'border' or 'block') \"\n \"Uses `*_border` color settings\",\n ),\n (\"urgent_border\", \"FF0000\", \"Urgent border color\"),\n (\n \"urgent_alert_method\",\n \"border\",\n \"Method for alerting you of WM urgent \" \"hints (one of 'border' or 'text')\",\n ),\n (\n \"unfocused_border\",\n None,\n \"Border color for unfocused windows. \"\n \"Affects only hightlight_method 'border' and 'block'. \"\n \"Defaults to None, which means no special color.\",\n ),\n (\n \"max_title_width\",\n None,\n \"Max size in pixels of task title.\" \"(if set to None, as much as available.)\",\n ),\n (\n \"title_width_method\",\n None,\n \"Method to compute the width of task title. (None, 'uniform'.)\"\n \"Defaults to None, the normal behaviour.\",\n ),\n (\n \"parse_text\",\n None,\n \"Function to parse and modify window names. \"\n \"e.g. function in config that removes excess \"\n \"strings from window name: \"\n \"def my_func(text)\"\n ' for string in [\" - Chromium\", \" - Firefox\"]:'\n ' text = text.replace(string, \"\")'\n \" return text\"\n \"then set option parse_text=my_func\",\n ),\n (\"spacing\", None, \"Spacing between tasks.\" \"(if set to None, will be equal to margin_x)\"),\n (\n \"txt_minimized\",\n \"_ \",\n \"Text representation of the minimized window state. \" 'e.g., \"_ \" or \"\\U0001F5D5 \"',\n ),\n (\n \"txt_maximized\",\n \"[] \",\n \"Text representation of the maximized window state. \" 'e.g., \"[] \" or \"\\U0001F5D6 \"',\n ),\n (\n \"txt_floating\",\n \"V \",\n \"Text representation of the floating window state. \" 'e.g., \"V \" or \"\\U0001F5D7 \"',\n ),\n (\n \"markup_normal\",\n None,\n \"Text markup of the normal window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_minimized\",\n None,\n \"Text markup of the minimized window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_maximized\",\n None,\n \"Text markup of the maximized window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_floating\",\n None,\n \"Text markup of the floating window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_focused\",\n None,\n \"Text markup of the focused window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"icon_size\",\n None,\n \"Icon size. \" \"(Calculated if set to None. Icons are hidden if set to 0.)\",\n ),\n (\n \"theme_mode\",\n None,\n \"When to use theme icons. `None` = never, `preferred` = use if available, \"\n \"`fallback` = use if app does not provide icon directly. \"\n \"`preferred` and `fallback` have identical behaviour on Wayland.\",\n ),\n (\n \"theme_path\",\n None,\n \"Path to icon theme to be used by pyxdg for icons. ``None`` will use default icon theme.\",\n ),\n (\n \"window_name_location\",\n False,\n \"Whether to show the location of the window in the title.\",\n ),\n (\n \"window_name_location_offset\",\n 0,\n \"The offset given to window loction\",\n ),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.STRETCH, **config)\n self.add_defaults(TaskList.defaults)\n self.add_defaults(base.PaddingMixin.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n self._icons_cache = {}\n self._box_end_positions = []\n self.markup = False\n self.clicked = None\n if self.spacing is None:\n self.spacing = self.margin_x\n\n self.add_callbacks({\"Button1\": self.select_window})\n\n def box_width(self, text):\n \"\"\"\n calculate box width for given text.\n If max_title_width is given, the returned width is limited to it.\n \"\"\"\n if self.markup:\n text = re.sub(\"<[^<]+?>\", \"\", text)\n width, _ = self.drawer.max_layout_size([text], self.font, self.fontsize)\n width = width + 2 * (self.padding_x + self.borderwidth)\n return width\n\n def get_taskname(self, window):\n \"\"\"\n Get display name for given window.\n Depending on its state minimized, maximized and floating\n appropriate characters are prepended.\n \"\"\"\n state = \"\"\n markup_str = self.markup_normal\n\n # Enforce markup and new string format behaviour when\n # at least one markup_* option is used.\n # Mixing non markup and markup may cause problems.\n if (\n self.markup_minimized\n or self.markup_maximized\n or self.markup_floating\n or self.markup_focused\n ):\n enforce_markup = True\n else:\n enforce_markup = False\n\n if window is None:\n pass\n elif window.minimized:\n state = self.txt_minimized\n markup_str = self.markup_minimized\n elif window.maximized:\n state = self.txt_maximized\n markup_str = self.markup_maximized\n elif window.floating:\n state = self.txt_floating\n markup_str = self.markup_floating\n elif window is window.group.current_window:\n markup_str = self.markup_focused\n\n window_location = (\n f\"[{window.group.windows.index(window) + self.window_name_location_offset}] \"\n if self.window_name_location\n else \"\"\n )\n window_name = window_location + window.name if window and window.name else \"?\"\n\n if callable(self.parse_text):\n try:\n window_name = self.parse_text(window_name)\n except: # noqa: E722\n logger.exception(\"parse_text function failed:\")\n\n # Emulate default widget behavior if markup_str is None\n if enforce_markup and markup_str is None:\n markup_str = \"%s{}\" % (state)\n\n if markup_str is not None:\n self.markup = True\n window_name = pangocffi.markup_escape_text(window_name)\n return markup_str.format(window_name)\n\n return \"%s%s\" % (state, window_name)\n\n @property\n def windows(self):\n if self.qtile.core.name == \"x11\":\n return [\n w\n for w in self.bar.screen.group.windows\n if w.window.get_wm_type() in (\"normal\", None)\n ]\n return self.bar.screen.group.windows\n\n def calc_box_widths(self):\n \"\"\"\n Calculate box width for each window in current group.\n If the available space is less than overall size of boxes,\n the boxes are shrunk by percentage if greater than average.\n \"\"\"\n windows = self.windows\n window_count = len(windows)\n\n # if no windows present for current group just return empty list\n if not window_count:\n return []\n\n # Determine available and max average width for task name boxes.\n width_total = self.width - 2 * self.margin_x - (window_count - 1) * self.spacing\n width_avg = width_total / window_count\n\n names = [self.get_taskname(w) for w in windows]\n\n if self.icon_size == 0:\n icons = len(windows) * [None]\n else:\n icons = [self.get_window_icon(w) for w in windows]\n\n # Obey title_width_method if specified\n if self.title_width_method == \"uniform\":\n width_uniform = width_total // window_count\n width_boxes = [width_uniform for w in range(window_count)]\n else:\n # Default behaviour: calculated width for each task according to\n # icon and task name consisting\n # of state abbreviation and window name\n width_boxes = [\n (\n self.box_width(names[idx])\n + ((self.icon_size + self.padding_x) if icons[idx] else 0)\n )\n for idx in range(window_count)\n ]\n\n # Obey max_title_width if specified\n if self.max_title_width:\n width_boxes = [min(w, self.max_title_width) for w in width_boxes]\n\n width_sum = sum(width_boxes)\n\n # calculated box width are to wide for available widget space:\n if width_sum > width_total:\n # sum the width of tasks shorter than calculated average\n # and calculate a ratio to shrink boxes greater than width_avg\n width_shorter_sum = sum([w for w in width_boxes if w < width_avg])\n\n ratio = (width_total - width_shorter_sum) / (width_sum - width_shorter_sum)\n # determine new box widths by shrinking boxes greater than avg\n width_boxes = [(w if w < width_avg else w * ratio) for w in width_boxes]\n\n return zip(windows, icons, names, width_boxes)\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if not has_xdg and self.theme_mode is not None:\n logger.warning(\"You must install pyxdg to use theme icons.\")\n self.theme_mode = None\n\n if self.theme_mode and self.theme_mode not in [\"preferred\", \"fallback\"]:\n logger.warning(\n \"Unexpected theme_mode (%s). Theme icons will be disabled.\", self.theme_mode\n )\n self.theme_mode = None\n\n if qtile.core.name == \"wayland\" and self.theme_mode is None and self.icon_size != 0:\n # Disable icons\n self.icon_size = 0\n\n if self.icon_size is None:\n self.icon_size = self.bar.height - 2 * (self.borderwidth + self.margin_y)\n\n if self.fontsize is None:\n calc = self.bar.height - self.margin_y * 2 - self.borderwidth * 2 - self.padding_y * 2\n self.fontsize = max(calc, 1)\n self.layout = self.drawer.textlayout(\n \"\", \"ffffff\", self.font, self.fontsize, self.fontshadow, wrap=False\n )\n self.setup_hooks()\n\n def update(self, window=None):\n if not window or window in self.windows:\n self.bar.draw()\n\n def remove_icon_cache(self, window):\n wid = window.wid\n if wid in self._icons_cache:\n self._icons_cache.pop(wid)\n\n def invalidate_cache(self, window):\n self.remove_icon_cache(window)\n self.update(window)\n\n def setup_hooks(self):\n hook.subscribe.client_name_updated(self.update)\n hook.subscribe.focus_change(self.update)\n hook.subscribe.float_change(self.update)\n hook.subscribe.client_urgent_hint_changed(self.update)\n\n hook.subscribe.net_wm_icon_change(self.invalidate_cache)\n hook.subscribe.client_killed(self.remove_icon_cache)\n\n def drawtext(self, text, textcolor, width):\n if self.markup:\n self.layout.markup = self.markup\n\n self.layout.text = text\n\n self.layout.font_family = self.font\n self.layout.font_size = self.fontsize\n self.layout.colour = textcolor\n if width is not None:\n self.layout.width = width\n\n def drawbox(\n self,\n offset,\n text,\n bordercolor,\n textcolor,\n width=None,\n rounded=False,\n block=False,\n icon=None,\n ):\n self.drawtext(text, textcolor, width)\n\n icon_padding = (self.icon_size + self.padding_x) if icon else 0\n padding_x = [self.padding_x + icon_padding, self.padding_x]\n\n if bordercolor is None:\n # border colour is set to None when we don't want to draw a border at all\n # Rather than dealing with alpha blending issues, we just set border width\n # to 0.\n border_width = 0\n framecolor = self.background or self.bar.background\n else:\n border_width = self.borderwidth\n framecolor = bordercolor\n\n framed = self.layout.framed(border_width, framecolor, padding_x, self.padding_y)\n if block and bordercolor is not None:\n framed.draw_fill(offset, self.margin_y, rounded)\n else:\n framed.draw(offset, self.margin_y, rounded)\n\n if icon:\n self.draw_icon(icon, offset)\n\n def get_clicked(self, x, y):\n box_start = self.margin_x\n for box_end, win in zip(self._box_end_positions, self.windows):\n if box_start <= x <= box_end:\n return win\n else:\n box_start = box_end + self.spacing\n # not found any , return None\n return None\n\n def button_press(self, x, y, button):\n self.clicked = self.get_clicked(x, y)\n base._Widget.button_press(self, x, y, button)\n\n def select_window(self):\n if self.clicked:\n current_win = self.bar.screen.group.current_window\n window = self.clicked\n if window is not current_win:\n window.group.focus(window, False)\n if window.floating:\n window.bring_to_front()\n else:\n window.toggle_minimize()\n\n def _get_class_icon(self, window):\n if not getattr(window, \"icons\", False):\n return None\n\n icons = sorted(\n iter(window.icons.items()),\n key=lambda x: abs(self.icon_size - int(x[0].split(\"x\")[0])),\n )\n icon = icons[0]\n width, height = map(int, icon[0].split(\"x\"))\n\n img = cairocffi.ImageSurface.create_for_data(\n icon[1], cairocffi.FORMAT_ARGB32, width, height\n )\n\n return img\n\n def _get_theme_icon(self, window):\n classes = window.get_wm_class()\n\n if not classes:\n return None\n\n icon = None\n\n for cl in classes:\n for app in set([cl, cl.lower()]):\n icon = getIconPath(app, theme=self.theme_path)\n if icon is not None:\n break\n else:\n continue\n break\n\n if not icon:\n return None\n\n img = Img.from_path(icon)\n\n return img.surface\n\n def get_window_icon(self, window):\n if not getattr(window, \"icons\", False) and self.theme_mode is None:\n return None\n\n cache = self._icons_cache.get(window.wid)\n if cache:\n return cache\n\n surface = None\n img = None\n\n if self.qtile.core.name == \"x11\":\n img = self._get_class_icon(window)\n\n if self.theme_mode == \"preferred\" or (self.theme_mode == \"fallback\" and img is None):\n xdg_img = self._get_theme_icon(window)\n if xdg_img:\n img = xdg_img\n\n if img is not None:\n surface = cairocffi.SurfacePattern(img)\n height = img.get_height()\n width = img.get_width()\n scaler = cairocffi.Matrix()\n if height != self.icon_size:\n sp = height / self.icon_size\n height = self.icon_size\n width /= sp\n scaler.scale(sp, sp)\n surface.set_matrix(scaler)\n\n self._icons_cache[window.wid] = surface\n return surface\n\n def draw_icon(self, surface, offset):\n if not surface:\n return\n\n x = offset + self.borderwidth + self.padding_x\n y = self.padding_y + self.borderwidth\n\n self.drawer.ctx.save()\n self.drawer.ctx.translate(x, y)\n self.drawer.ctx.set_source(surface)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n def draw(self):\n self.drawer.clear(self.background or self.bar.background)\n offset = self.margin_x\n\n self._box_end_positions = []\n for w, icon, task, bw in self.calc_box_widths():\n self._box_end_positions.append(offset + bw)\n\n if w.urgent:\n border = self.urgent_border\n text_color = border\n elif w is w.group.current_window:\n border = self.border\n text_color = border\n else:\n border = self.unfocused_border or None\n text_color = self.foreground\n\n if self.highlight_method == \"text\":\n border = None\n else:\n text_color = self.foreground\n\n textwidth = (\n bw - 2 * self.padding_x - ((self.icon_size + self.padding_x) if icon else 0)\n )\n self.drawbox(\n offset,\n task,\n border,\n text_color,\n rounded=self.rounded,\n block=(self.highlight_method == \"block\"),\n width=textwidth,\n icon=icon,\n )\n offset += bw + self.spacing\n\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n", "path": "libqtile/widget/tasklist.py" } ]
[ { "content": "# Copyright (c) 2012-2014 roger\n# Copyright (c) 2012-2015 Tycho Andersen\n# Copyright (c) 2013 dequis\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2018 Piotr Przymus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport re\n\nimport cairocffi\n\ntry:\n from xdg.IconTheme import getIconPath\n\n has_xdg = True\nexcept ImportError:\n has_xdg = False\n\nfrom libqtile import bar, hook, pangocffi\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass TaskList(base._Widget, base.PaddingMixin, base.MarginMixin):\n \"\"\"Displays the icon and name of each window in the current group\n\n Contrary to WindowTabs this is an interactive widget. The window that\n currently has focus is highlighted.\n\n Optional requirements: `pyxdg <https://pypi.org/project/pyxdg/>`__ is needed\n to use theme icons and to display icons on Wayland.\n \"\"\"\n\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"font\", \"sans\", \"Default font\"),\n (\"fontsize\", None, \"Font size. Calculated if None.\"),\n (\"foreground\", \"ffffff\", \"Foreground colour\"),\n (\"fontshadow\", None, \"font shadow color, default is None(no shadow)\"),\n (\"borderwidth\", 2, \"Current group border width\"),\n (\"border\", \"215578\", \"Border colour\"),\n (\"rounded\", True, \"To round or not to round borders\"),\n (\n \"highlight_method\",\n \"border\",\n \"Method of highlighting (one of 'border' or 'block') \"\n \"Uses `*_border` color settings\",\n ),\n (\"urgent_border\", \"FF0000\", \"Urgent border color\"),\n (\n \"urgent_alert_method\",\n \"border\",\n \"Method for alerting you of WM urgent \" \"hints (one of 'border' or 'text')\",\n ),\n (\n \"unfocused_border\",\n None,\n \"Border color for unfocused windows. \"\n \"Affects only hightlight_method 'border' and 'block'. \"\n \"Defaults to None, which means no special color.\",\n ),\n (\n \"max_title_width\",\n None,\n \"Max size in pixels of task title.\" \"(if set to None, as much as available.)\",\n ),\n (\n \"title_width_method\",\n None,\n \"Method to compute the width of task title. (None, 'uniform'.)\"\n \"Defaults to None, the normal behaviour.\",\n ),\n (\n \"parse_text\",\n None,\n \"Function to parse and modify window names. \"\n \"e.g. function in config that removes excess \"\n \"strings from window name: \"\n \"def my_func(text)\"\n ' for string in [\" - Chromium\", \" - Firefox\"]:'\n ' text = text.replace(string, \"\")'\n \" return text\"\n \"then set option parse_text=my_func\",\n ),\n (\"spacing\", None, \"Spacing between tasks.\" \"(if set to None, will be equal to margin_x)\"),\n (\n \"txt_minimized\",\n \"_ \",\n \"Text representation of the minimized window state. \" 'e.g., \"_ \" or \"\\U0001F5D5 \"',\n ),\n (\n \"txt_maximized\",\n \"[] \",\n \"Text representation of the maximized window state. \" 'e.g., \"[] \" or \"\\U0001F5D6 \"',\n ),\n (\n \"txt_floating\",\n \"V \",\n \"Text representation of the floating window state. \" 'e.g., \"V \" or \"\\U0001F5D7 \"',\n ),\n (\n \"markup_normal\",\n None,\n \"Text markup of the normal window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_minimized\",\n None,\n \"Text markup of the minimized window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_maximized\",\n None,\n \"Text markup of the maximized window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_floating\",\n None,\n \"Text markup of the floating window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"markup_focused\",\n None,\n \"Text markup of the focused window state. Supports pangomarkup with markup=True.\"\n 'e.g., \"{}\" or \"<span underline=\"low\">{}</span>\"',\n ),\n (\n \"icon_size\",\n None,\n \"Icon size. \" \"(Calculated if set to None. Icons are hidden if set to 0.)\",\n ),\n (\n \"theme_mode\",\n None,\n \"When to use theme icons. `None` = never, `preferred` = use if available, \"\n \"`fallback` = use if app does not provide icon directly. \"\n \"`preferred` and `fallback` have identical behaviour on Wayland.\",\n ),\n (\n \"theme_path\",\n None,\n \"Path to icon theme to be used by pyxdg for icons. ``None`` will use default icon theme.\",\n ),\n (\n \"window_name_location\",\n False,\n \"Whether to show the location of the window in the title.\",\n ),\n (\n \"window_name_location_offset\",\n 0,\n \"The offset given to window loction\",\n ),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.STRETCH, **config)\n self.add_defaults(TaskList.defaults)\n self.add_defaults(base.PaddingMixin.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n self._icons_cache = {}\n self._box_end_positions = []\n self.markup = False\n self.clicked = None\n if self.spacing is None:\n self.spacing = self.margin_x\n\n self.add_callbacks({\"Button1\": self.select_window})\n\n def box_width(self, text):\n \"\"\"\n calculate box width for given text.\n If max_title_width is given, the returned width is limited to it.\n \"\"\"\n if self.markup:\n text = re.sub(\"<[^<]+?>\", \"\", text)\n width, _ = self.drawer.max_layout_size([text], self.font, self.fontsize)\n width = width + 2 * (self.padding_x + self.borderwidth)\n return width\n\n def get_taskname(self, window):\n \"\"\"\n Get display name for given window.\n Depending on its state minimized, maximized and floating\n appropriate characters are prepended.\n \"\"\"\n state = \"\"\n markup_str = self.markup_normal\n\n # Enforce markup and new string format behaviour when\n # at least one markup_* option is used.\n # Mixing non markup and markup may cause problems.\n if (\n self.markup_minimized\n or self.markup_maximized\n or self.markup_floating\n or self.markup_focused\n ):\n enforce_markup = True\n else:\n enforce_markup = False\n\n if window is None:\n pass\n elif window.minimized:\n state = self.txt_minimized\n markup_str = self.markup_minimized\n elif window.maximized:\n state = self.txt_maximized\n markup_str = self.markup_maximized\n elif window.floating:\n state = self.txt_floating\n markup_str = self.markup_floating\n elif window is window.group.current_window:\n markup_str = self.markup_focused\n\n window_location = (\n f\"[{window.group.windows.index(window) + self.window_name_location_offset}] \"\n if self.window_name_location\n else \"\"\n )\n window_name = window_location + window.name if window and window.name else \"?\"\n\n if callable(self.parse_text):\n try:\n window_name = self.parse_text(window_name)\n except: # noqa: E722\n logger.exception(\"parse_text function failed:\")\n\n # Emulate default widget behavior if markup_str is None\n if enforce_markup and markup_str is None:\n markup_str = \"%s{}\" % (state)\n\n if markup_str is not None:\n self.markup = True\n window_name = pangocffi.markup_escape_text(window_name)\n return markup_str.format(window_name)\n\n return \"%s%s\" % (state, window_name)\n\n @property\n def windows(self):\n if self.qtile.core.name == \"x11\":\n return [\n w\n for w in self.bar.screen.group.windows\n if w.window.get_wm_type() in (\"normal\", None)\n ]\n return self.bar.screen.group.windows\n\n def calc_box_widths(self):\n \"\"\"\n Calculate box width for each window in current group.\n If the available space is less than overall size of boxes,\n the boxes are shrunk by percentage if greater than average.\n \"\"\"\n windows = self.windows\n window_count = len(windows)\n\n # if no windows present for current group just return empty list\n if not window_count:\n return []\n\n # Determine available and max average width for task name boxes.\n width_total = self.width - 2 * self.margin_x - (window_count - 1) * self.spacing\n width_avg = width_total / window_count\n\n names = [self.get_taskname(w) for w in windows]\n\n if self.icon_size == 0:\n icons = len(windows) * [None]\n else:\n icons = [self.get_window_icon(w) for w in windows]\n\n # Obey title_width_method if specified\n if self.title_width_method == \"uniform\":\n width_uniform = width_total // window_count\n width_boxes = [width_uniform for w in range(window_count)]\n else:\n # Default behaviour: calculated width for each task according to\n # icon and task name consisting\n # of state abbreviation and window name\n width_boxes = [\n (\n self.box_width(names[idx])\n + ((self.icon_size + self.padding_x) if icons[idx] else 0)\n )\n for idx in range(window_count)\n ]\n\n # Obey max_title_width if specified\n if self.max_title_width:\n width_boxes = [min(w, self.max_title_width) for w in width_boxes]\n\n width_sum = sum(width_boxes)\n\n # calculated box width are to wide for available widget space:\n if width_sum > width_total:\n # sum the width of tasks shorter than calculated average\n # and calculate a ratio to shrink boxes greater than width_avg\n width_shorter_sum = sum([w for w in width_boxes if w < width_avg])\n\n ratio = (width_total - width_shorter_sum) / (width_sum - width_shorter_sum)\n # determine new box widths by shrinking boxes greater than avg\n width_boxes = [(w if w < width_avg else w * ratio) for w in width_boxes]\n\n return zip(windows, icons, names, width_boxes)\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if not has_xdg and self.theme_mode is not None:\n logger.warning(\"You must install pyxdg to use theme icons.\")\n self.theme_mode = None\n\n if self.theme_mode and self.theme_mode not in [\"preferred\", \"fallback\"]:\n logger.warning(\n \"Unexpected theme_mode (%s). Theme icons will be disabled.\", self.theme_mode\n )\n self.theme_mode = None\n\n if qtile.core.name == \"wayland\" and self.theme_mode is None and self.icon_size != 0:\n # Disable icons\n self.icon_size = 0\n\n if self.icon_size is None:\n self.icon_size = self.bar.height - 2 * (self.borderwidth + self.margin_y)\n\n if self.fontsize is None:\n calc = self.bar.height - self.margin_y * 2 - self.borderwidth * 2 - self.padding_y * 2\n self.fontsize = max(calc, 1)\n self.layout = self.drawer.textlayout(\n \"\", \"ffffff\", self.font, self.fontsize, self.fontshadow, wrap=False\n )\n self.setup_hooks()\n\n def update(self, window=None):\n if not window or window in self.windows:\n self.bar.draw()\n\n def remove_icon_cache(self, window):\n wid = window.wid\n if wid in self._icons_cache:\n self._icons_cache.pop(wid)\n\n def invalidate_cache(self, window):\n self.remove_icon_cache(window)\n self.update(window)\n\n def setup_hooks(self):\n hook.subscribe.client_name_updated(self.update)\n hook.subscribe.focus_change(self.update)\n hook.subscribe.float_change(self.update)\n hook.subscribe.client_urgent_hint_changed(self.update)\n\n hook.subscribe.net_wm_icon_change(self.invalidate_cache)\n hook.subscribe.client_killed(self.remove_icon_cache)\n\n def drawtext(self, text, textcolor, width):\n if self.markup:\n self.layout.markup = self.markup\n\n self.layout.text = text\n\n self.layout.font_family = self.font\n self.layout.font_size = self.fontsize\n self.layout.colour = textcolor\n if width is not None:\n self.layout.width = width\n\n def drawbox(\n self,\n offset,\n text,\n bordercolor,\n textcolor,\n width=None,\n rounded=False,\n block=False,\n icon=None,\n ):\n self.drawtext(text, textcolor, width)\n\n icon_padding = (self.icon_size + self.padding_x) if icon else 0\n padding_x = [self.padding_x + icon_padding, self.padding_x]\n\n if bordercolor is None:\n # border colour is set to None when we don't want to draw a border at all\n # Rather than dealing with alpha blending issues, we just set border width\n # to 0.\n border_width = 0\n framecolor = self.background or self.bar.background\n else:\n border_width = self.borderwidth\n framecolor = bordercolor\n\n framed = self.layout.framed(border_width, framecolor, padding_x, self.padding_y)\n if block and bordercolor is not None:\n framed.draw_fill(offset, self.margin_y, rounded)\n else:\n framed.draw(offset, self.margin_y, rounded)\n\n if icon:\n self.draw_icon(icon, offset)\n\n def get_clicked(self, x, y):\n box_start = self.margin_x\n for box_end, win in zip(self._box_end_positions, self.windows):\n if box_start <= x <= box_end:\n return win\n else:\n box_start = box_end + self.spacing\n # not found any , return None\n return None\n\n def button_press(self, x, y, button):\n self.clicked = self.get_clicked(x, y)\n base._Widget.button_press(self, x, y, button)\n\n def select_window(self):\n if self.clicked:\n current_win = self.bar.screen.group.current_window\n window = self.clicked\n if window is not current_win:\n window.group.focus(window, False)\n if window.floating:\n window.bring_to_front()\n else:\n window.toggle_minimize()\n\n def _get_class_icon(self, window):\n if not getattr(window, \"icons\", False):\n return None\n\n icons = sorted(\n iter(window.icons.items()),\n key=lambda x: abs(self.icon_size - int(x[0].split(\"x\")[0])),\n )\n icon = icons[0]\n width, height = map(int, icon[0].split(\"x\"))\n\n img = cairocffi.ImageSurface.create_for_data(\n icon[1], cairocffi.FORMAT_ARGB32, width, height\n )\n\n return img\n\n def _get_theme_icon(self, window):\n classes = window.get_wm_class()\n\n if not classes:\n return None\n\n icon = None\n\n for cl in classes:\n for app in set([cl, cl.lower()]):\n icon = getIconPath(app, theme=self.theme_path)\n if icon is not None:\n break\n else:\n continue\n break\n\n if not icon:\n return None\n\n img = Img.from_path(icon)\n\n return img.surface\n\n def get_window_icon(self, window):\n if not getattr(window, \"icons\", False) and self.theme_mode is None:\n return None\n\n cache = self._icons_cache.get(window.wid)\n if cache:\n return cache\n\n surface = None\n img = None\n\n if self.qtile.core.name == \"x11\":\n img = self._get_class_icon(window)\n\n if self.theme_mode == \"preferred\" or (self.theme_mode == \"fallback\" and img is None):\n xdg_img = self._get_theme_icon(window)\n if xdg_img:\n img = xdg_img\n\n if img is not None:\n surface = cairocffi.SurfacePattern(img)\n height = img.get_height()\n width = img.get_width()\n scaler = cairocffi.Matrix()\n if height != self.icon_size:\n sp = height / self.icon_size\n height = self.icon_size\n width /= sp\n scaler.scale(sp, sp)\n surface.set_matrix(scaler)\n\n self._icons_cache[window.wid] = surface\n return surface\n\n def draw_icon(self, surface, offset):\n if not surface:\n return\n\n x = offset + self.borderwidth + self.padding_x\n y = (self.height - self.icon_size) // 2\n\n self.drawer.ctx.save()\n self.drawer.ctx.translate(x, y)\n self.drawer.ctx.set_source(surface)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n def draw(self):\n self.drawer.clear(self.background or self.bar.background)\n offset = self.margin_x\n\n self._box_end_positions = []\n for w, icon, task, bw in self.calc_box_widths():\n self._box_end_positions.append(offset + bw)\n\n if w.urgent:\n border = self.urgent_border\n text_color = border\n elif w is w.group.current_window:\n border = self.border\n text_color = border\n else:\n border = self.unfocused_border or None\n text_color = self.foreground\n\n if self.highlight_method == \"text\":\n border = None\n else:\n text_color = self.foreground\n\n textwidth = (\n bw - 2 * self.padding_x - ((self.icon_size + self.padding_x) if icon else 0)\n )\n self.drawbox(\n offset,\n task,\n border,\n text_color,\n rounded=self.rounded,\n block=(self.highlight_method == \"block\"),\n width=textwidth,\n icon=icon,\n )\n offset += bw + self.spacing\n\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n", "path": "libqtile/widget/tasklist.py" } ]
diff --git a/CHANGELOG b/CHANGELOG index 08904917f6..e714988635 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -59,6 +59,7 @@ Qtile x.xx.x, released XXXX-XX-XX: - Fix setting tiled position by mouse for layouts using _SimpleLayoutBase. To support this in other layouts, add a swap method taking two windows. - Fix unfullscreening bug in conjunction with Chromium based clients when auto_fullscreen is set to `False`. - Ensure `CurrentLayoutIcon` expands paths for custom folders. + - Fix vertical alignment of icons in `TaskList` widget * python version support - We have added support for python 3.11 and pypy 3.9. - python 3.7, 3.8 and pypy 3.7 are not longer supported. diff --git a/libqtile/widget/tasklist.py b/libqtile/widget/tasklist.py index 6a75f29485..14995b9ff5 100644 --- a/libqtile/widget/tasklist.py +++ b/libqtile/widget/tasklist.py @@ -531,7 +531,7 @@ def draw_icon(self, surface, offset): return x = offset + self.borderwidth + self.padding_x - y = self.padding_y + self.borderwidth + y = (self.height - self.icon_size) // 2 self.drawer.ctx.save() self.drawer.ctx.translate(x, y)
litestar-org__litestar-2433
Bug: `2.2.0` does not have `[full]` group ### Description The move from `poetry` to `pdm` in 2.2.0 has a regression for the `[full]` group. ### URL to code causing the issue _No response_ ### MCVE ```python pip install litestar[full]==2.2.0 && pip show pydantic ``` ### Steps to reproduce - `pip install litestar[full]` - Observe no `[full]` group is available, and `pip show $package` does not show expected pacakges ### Screenshots _No response_ ### Logs _No response_ ### Litestar Version 2.2.0 ### Platform - [ ] Linux - [ ] Mac - [ ] Windows - [X] Other (Please specify in the description above) <!-- POLAR PLEDGE BADGE START --> > [!NOTE] > Check out all issues funded or available for funding here: https://polar.sh/litestar-org > * If you would like to see an issue prioritized, make a pledge towards it! > * We receive the pledge once the issue is completed & verified <a href="https://polar.sh/litestar-org/litestar/issues/2434"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2434/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2434/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END -->
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, NamedTuple\n\nfrom litestar.utils.deprecation import warn_deprecation\n\n__all__ = (\n \"ControllerRouterHandler\",\n \"PathParameterDefinition\",\n \"PathParameterDefinition\",\n \"ReservedKwargs\",\n \"ResponseType\",\n \"RouteHandlerMapItem\",\n \"RouteHandlerType\",\n)\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n from litestar.app import Litestar\n from litestar.controller import Controller\n from litestar.handlers.asgi_handlers import ASGIRouteHandler\n from litestar.handlers.http_handlers import HTTPRouteHandler\n from litestar.handlers.websocket_handlers import WebsocketRouteHandler\n from litestar.response import Response\n from litestar.router import Router\n from litestar.types import Method\n\nReservedKwargs: TypeAlias = Literal[\"request\", \"socket\", \"headers\", \"query\", \"cookies\", \"state\", \"data\"]\nRouteHandlerType: TypeAlias = \"HTTPRouteHandler | WebsocketRouteHandler | ASGIRouteHandler\"\nResponseType: TypeAlias = \"type[Response]\"\nControllerRouterHandler: TypeAlias = \"type[Controller] | RouteHandlerType | Router | Callable[..., Any]\"\nRouteHandlerMapItem: TypeAlias = 'dict[Method | Literal[\"websocket\", \"asgi\"], RouteHandlerType]'\n\n# deprecated\n_LitestarType: TypeAlias = \"Litestar\"\n\n\nclass PathParameterDefinition(NamedTuple):\n \"\"\"Path parameter tuple.\"\"\"\n\n name: str\n full: str\n type: type\n parser: Callable[[str], Any] | None\n\n\ndef __getattr__(name: str) -> Any:\n if name == \"LitestarType\":\n warn_deprecation(\n \"2.3.0\",\n \"LitestarType\",\n \"import\",\n removal_in=\"3.0.0\",\n alternative=\"Litestar\",\n )\n return _LitestarType\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "path": "litestar/types/internal_types.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, NamedTuple\n\nfrom litestar.utils.deprecation import warn_deprecation\n\n__all__ = (\n \"ControllerRouterHandler\",\n \"PathParameterDefinition\",\n \"PathParameterDefinition\",\n \"ReservedKwargs\",\n \"ResponseType\",\n \"RouteHandlerMapItem\",\n \"RouteHandlerType\",\n)\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n from litestar.app import Litestar\n from litestar.controller import Controller\n from litestar.handlers.asgi_handlers import ASGIRouteHandler\n from litestar.handlers.http_handlers import HTTPRouteHandler\n from litestar.handlers.websocket_handlers import WebsocketRouteHandler\n from litestar.response import Response\n from litestar.router import Router\n from litestar.types import Method\n\nReservedKwargs: TypeAlias = Literal[\"request\", \"socket\", \"headers\", \"query\", \"cookies\", \"state\", \"data\"]\nRouteHandlerType: TypeAlias = \"HTTPRouteHandler | WebsocketRouteHandler | ASGIRouteHandler\"\nResponseType: TypeAlias = \"type[Response]\"\nControllerRouterHandler: TypeAlias = \"type[Controller] | RouteHandlerType | Router | Callable[..., Any]\"\nRouteHandlerMapItem: TypeAlias = 'dict[Method | Literal[\"websocket\", \"asgi\"], RouteHandlerType]'\n\n# deprecated\n_LitestarType: TypeAlias = \"Litestar\"\n\n\nclass PathParameterDefinition(NamedTuple):\n \"\"\"Path parameter tuple.\"\"\"\n\n name: str\n full: str\n type: type\n parser: Callable[[str], Any] | None\n\n\ndef __getattr__(name: str) -> Any:\n if name == \"LitestarType\":\n warn_deprecation(\n \"2.2.1\",\n \"LitestarType\",\n \"import\",\n removal_in=\"3.0.0\",\n alternative=\"Litestar\",\n )\n return _LitestarType\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "path": "litestar/types/internal_types.py" } ]
diff --git a/.gitignore b/.gitignore index 12e59642c5..ca6f32027e 100644 --- a/.gitignore +++ b/.gitignore @@ -24,7 +24,6 @@ target/ *.iml .DS_Store .coverage -.python-version .ruff_cache /docs/_build/ coverage.* @@ -34,3 +33,9 @@ setup.py .pdm.toml .pdm-python .pdm-build/ +# pdm - PEP 582 +__pypackages__/ + +# pyenv / rtx / asdf +.tool-versions +.python-version diff --git a/litestar/types/internal_types.py b/litestar/types/internal_types.py index 1224426b85..963467424c 100644 --- a/litestar/types/internal_types.py +++ b/litestar/types/internal_types.py @@ -48,7 +48,7 @@ class PathParameterDefinition(NamedTuple): def __getattr__(name: str) -> Any: if name == "LitestarType": warn_deprecation( - "2.3.0", + "2.2.1", "LitestarType", "import", removal_in="3.0.0", diff --git a/pdm.lock b/pdm.lock index 4d45f9a940..fb216aa70f 100644 --- a/pdm.lock +++ b/pdm.lock @@ -2,11 +2,11 @@ # It is not intended for manual editing. [metadata] -groups = ["default", "annotated-types", "attrs", "brotli", "cli", "cryptography", "dev", "dev-contrib", "docs", "jinja", "jwt", "linting", "mako", "minijinja", "opentelemetry", "piccolo", "picologging", "prometheus", "pydantic", "redis", "sqlalchemy", "standard", "structlog", "test"] +groups = ["default", "annotated-types", "attrs", "brotli", "cli", "cryptography", "dev", "dev-contrib", "docs", "jinja", "jwt", "linting", "mako", "minijinja", "opentelemetry", "piccolo", "picologging", "prometheus", "pydantic", "redis", "sqlalchemy", "standard", "structlog", "test", "full"] cross_platform = true static_urls = false lock_version = "4.3" -content_hash = "sha256:fa9d278def9dc75febddad6d5bd694fa3cb3841c586aaae35d21d4ce7976bc86" +content_hash = "sha256:41b63a9ef2b44b3ac01593f1ee20d7cdb14a762e03facd36c6106fd2de32eedd" [[package]] name = "accessible-pygments" @@ -804,12 +804,12 @@ files = [ [[package]] name = "cssutils" -version = "2.7.1" -requires_python = ">=3.7" +version = "2.9.0" +requires_python = ">=3.8" summary = "A CSS Cascading Style Sheets library for Python" files = [ - {file = "cssutils-2.7.1-py3-none-any.whl", hash = "sha256:1e92e0d9dab2ec8af9f38d715393964ba533dc3beacab9b072511dfc241db775"}, - {file = "cssutils-2.7.1.tar.gz", hash = "sha256:340ecfd9835d21df8f98500f0dfcea0aee41cb4e19ecbc2cf94f0a6d36d7cb6c"}, + {file = "cssutils-2.9.0-py3-none-any.whl", hash = "sha256:f8b013169e281c0c6083207366c5005f5dd4549055f7aba840384fb06a78745c"}, + {file = "cssutils-2.9.0.tar.gz", hash = "sha256:89477b3d17d790e97b9fb4def708767061055795aae6f7c82ae32e967c9be4cd"}, ] [[package]] @@ -1256,7 +1256,7 @@ files = [ [[package]] name = "hypothesis" -version = "6.87.3" +version = "6.87.4" requires_python = ">=3.8" summary = "A library for property-based testing" dependencies = [ @@ -1265,8 +1265,8 @@ dependencies = [ "sortedcontainers<3.0.0,>=2.1.0", ] files = [ - {file = "hypothesis-6.87.3-py3-none-any.whl", hash = "sha256:684a7b56a4a2e990cb0efb3124c2d886c5138453550b6f4f4a3b75bfc8ef24d4"}, - {file = "hypothesis-6.87.3.tar.gz", hash = "sha256:e67391efb9e6f663031f493d04b5edfb2e47bfc5a6ea56190aed3bc7993d5899"}, + {file = "hypothesis-6.87.4-py3-none-any.whl", hash = "sha256:0526d5bb45fd82b3ddc7d16ab04897652725cea938b62fff78bfd945dfc9775f"}, + {file = "hypothesis-6.87.4.tar.gz", hash = "sha256:c508779be66e266c45dbf9c1b2713e560dfd89abb044d92eafe91e8b2728af01"}, ] [[package]] @@ -2944,7 +2944,7 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.21" +version = "2.0.22" requires_python = ">=3.7" summary = "Database Abstraction Library" dependencies = [ @@ -2952,48 +2952,38 @@ dependencies = [ "typing-extensions>=4.2.0", ] files = [ - {file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1e7dc99b23e33c71d720c4ae37ebb095bebebbd31a24b7d99dfc4753d2803ede"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f0c4ee579acfe6c994637527c386d1c22eb60bc1c1d36d940d8477e482095d4"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f7d57a7e140efe69ce2d7b057c3f9a595f98d0bbdfc23fd055efdfbaa46e3a5"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca38746eac23dd7c20bec9278d2058c7ad662b2f1576e4c3dbfcd7c00cc48fa"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3cf229704074bce31f7f47d12883afee3b0a02bb233a0ba45ddbfe542939cca4"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fb87f763b5d04a82ae84ccff25554ffd903baafba6698e18ebaf32561f2fe4aa"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-win32.whl", hash = "sha256:89e274604abb1a7fd5c14867a412c9d49c08ccf6ce3e1e04fffc068b5b6499d4"}, - {file = "SQLAlchemy-2.0.21-cp310-cp310-win_amd64.whl", hash = "sha256:e36339a68126ffb708dc6d1948161cea2a9e85d7d7b0c54f6999853d70d44430"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bf8eebccc66829010f06fbd2b80095d7872991bfe8415098b9fe47deaaa58063"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b977bfce15afa53d9cf6a632482d7968477625f030d86a109f7bdfe8ce3c064a"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ff3dc2f60dbf82c9e599c2915db1526d65415be323464f84de8db3e361ba5b9"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44ac5c89b6896f4740e7091f4a0ff2e62881da80c239dd9408f84f75a293dae9"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf91ebf15258c4701d71dcdd9c4ba39521fb6a37379ea68088ce8cd869b446"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b69f1f754d92eb1cc6b50938359dead36b96a1dcf11a8670bff65fd9b21a4b09"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-win32.whl", hash = "sha256:af520a730d523eab77d754f5cf44cc7dd7ad2d54907adeb3233177eeb22f271b"}, - {file = "SQLAlchemy-2.0.21-cp311-cp311-win_amd64.whl", hash = "sha256:141675dae56522126986fa4ca713739d00ed3a6f08f3c2eb92c39c6dfec463ce"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:56628ca27aa17b5890391ded4e385bf0480209726f198799b7e980c6bd473bd7"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db726be58837fe5ac39859e0fa40baafe54c6d54c02aba1d47d25536170b690f"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7421c1bfdbb7214313919472307be650bd45c4dc2fcb317d64d078993de045b"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:632784f7a6f12cfa0e84bf2a5003b07660addccf5563c132cd23b7cc1d7371a9"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f6f7276cf26145a888f2182a98f204541b519d9ea358a65d82095d9c9e22f917"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2a1f7ffac934bc0ea717fa1596f938483fb8c402233f9b26679b4f7b38d6ab6e"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-win32.whl", hash = "sha256:bfece2f7cec502ec5f759bbc09ce711445372deeac3628f6fa1c16b7fb45b682"}, - {file = "SQLAlchemy-2.0.21-cp312-cp312-win_amd64.whl", hash = "sha256:526b869a0f4f000d8d8ee3409d0becca30ae73f494cbb48801da0129601f72c6"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b4eae01faee9f2b17f08885e3f047153ae0416648f8e8c8bd9bc677c5ce64be9"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3eb7c03fe1cd3255811cd4e74db1ab8dca22074d50cd8937edf4ef62d758cdf4"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2d494b6a2a2d05fb99f01b84cc9af9f5f93bf3e1e5dbdafe4bed0c2823584c1"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b19ae41ef26c01a987e49e37c77b9ad060c59f94d3b3efdfdbf4f3daaca7b5fe"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fc6b15465fabccc94bf7e38777d665b6a4f95efd1725049d6184b3a39fd54880"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:014794b60d2021cc8ae0f91d4d0331fe92691ae5467a00841f7130fe877b678e"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-win32.whl", hash = "sha256:0268256a34806e5d1c8f7ee93277d7ea8cc8ae391f487213139018b6805aeaf6"}, - {file = "SQLAlchemy-2.0.21-cp38-cp38-win_amd64.whl", hash = "sha256:73c079e21d10ff2be54a4699f55865d4b275fd6c8bd5d90c5b1ef78ae0197301"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:785e2f2c1cb50d0a44e2cdeea5fd36b5bf2d79c481c10f3a88a8be4cfa2c4615"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c111cd40910ffcb615b33605fc8f8e22146aeb7933d06569ac90f219818345ef"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9cba4e7369de663611ce7460a34be48e999e0bbb1feb9130070f0685e9a6b66"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50a69067af86ec7f11a8e50ba85544657b1477aabf64fa447fd3736b5a0a4f67"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ccb99c3138c9bde118b51a289d90096a3791658da9aea1754667302ed6564f6e"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:513fd5b6513d37e985eb5b7ed89da5fd9e72354e3523980ef00d439bc549c9e9"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-win32.whl", hash = "sha256:f9fefd6298433b6e9188252f3bff53b9ff0443c8fde27298b8a2b19f6617eeb9"}, - {file = "SQLAlchemy-2.0.21-cp39-cp39-win_amd64.whl", hash = "sha256:2e617727fe4091cedb3e4409b39368f424934c7faa78171749f704b49b4bb4ce"}, - {file = "SQLAlchemy-2.0.21-py3-none-any.whl", hash = "sha256:ea7da25ee458d8f404b93eb073116156fd7d8c2a776d8311534851f28277b4ce"}, - {file = "SQLAlchemy-2.0.21.tar.gz", hash = "sha256:05b971ab1ac2994a14c56b35eaaa91f86ba080e9ad481b20d99d77f381bb6258"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f146c61ae128ab43ea3a0955de1af7e1633942c2b2b4985ac51cc292daf33222"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:875de9414393e778b655a3d97d60465eb3fae7c919e88b70cc10b40b9f56042d"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e04ab55cf49daf1aeb8c622c54d23fa4bec91cb051a43cc24351ba97e1dd09f5"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:14cd3bcbb853379fef2cd01e7c64a5d6f1d005406d877ed9509afb7a05ff40a5"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f6ff392b27a743c1ad346d215655503cec64405d3b694228b3454878bf21590"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f776c2c30f0e5f4db45c3ee11a5f2a8d9de68e81eb73ec4237de1e32e04ae81c"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f1792d20d2f4e875ce7a113f43c3561ad12b34ff796b84002a256f37ce9437"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80eeb5189d7d4b1af519fc3f148fe7521b9dfce8f4d6a0820e8f5769b005051"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:69fd9e41cf9368afa034e1c81f3570afb96f30fcd2eb1ef29cb4d9371c6eece2"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54bcceaf4eebef07dadfde424f5c26b491e4a64e61761dea9459103ecd6ccc95"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-win32.whl", hash = "sha256:7ee7ccf47aa503033b6afd57efbac6b9e05180f492aeed9fcf70752556f95624"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-win_amd64.whl", hash = "sha256:b560f075c151900587ade06706b0c51d04b3277c111151997ea0813455378ae0"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2c9bac865ee06d27a1533471405ad240a6f5d83195eca481f9fc4a71d8b87df8"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:625b72d77ac8ac23da3b1622e2da88c4aedaee14df47c8432bf8f6495e655de2"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39a6e21110204a8c08d40ff56a73ba542ec60bab701c36ce721e7990df49fb9"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53a766cb0b468223cafdf63e2d37f14a4757476157927b09300c8c5832d88560"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0e1ce8ebd2e040357dde01a3fb7d30d9b5736b3e54a94002641dfd0aa12ae6ce"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:505f503763a767556fa4deae5194b2be056b64ecca72ac65224381a0acab7ebe"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-win32.whl", hash = "sha256:154a32f3c7b00de3d090bc60ec8006a78149e221f1182e3edcf0376016be9396"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-win_amd64.whl", hash = "sha256:129415f89744b05741c6f0b04a84525f37fbabe5dc3774f7edf100e7458c48cd"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3aa1472bf44f61dd27987cd051f1c893b7d3b17238bff8c23fceaef4f1133868"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:56a7e2bb639df9263bf6418231bc2a92a773f57886d371ddb7a869a24919face"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c6c3e9350f9fb16de5b5e5fbf17b578811a52d71bb784cc5ff71acb7de2a7f9"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af66001d7b76a3fab0d5e4c1ec9339ac45748bc4a399cbc2baa48c1980d3c1f4"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-win32.whl", hash = "sha256:9e55dff5ec115316dd7a083cdc1a52de63693695aecf72bc53a8e1468ce429e5"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-win_amd64.whl", hash = "sha256:4e869a8ff7ee7a833b74868a0887e8462445ec462432d8cbeff5e85f475186da"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9886a72c8e6371280cb247c5d32c9c8fa141dc560124348762db8a8b236f8692"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a571bc8ac092a3175a1d994794a8e7a1f2f651e7c744de24a19b4f740fe95034"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0b3f2686c3f162123adba3cb8b626ed7e9b8433ab528e36ed270b4f70d1cdb"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4bb062784f37b2d75fd9b074c8ec360ad5df71f933f927e9e95c50eb8e05323c"}, + {file = "SQLAlchemy-2.0.22-py3-none-any.whl", hash = "sha256:3076740335e4aaadd7deb3fe6dcb96b3015f1613bd190a4e1634e1b99b02ec86"}, + {file = "SQLAlchemy-2.0.22.tar.gz", hash = "sha256:5434cc601aa17570d79e5377f5fd45ff92f9379e2abed0be5e8c2fba8d353d2b"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index f71247e045..25abf22cdd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ maintainers = [ name = "litestar" readme = "README.md" requires-python = ">=3.8,<4.0" -version = "2.2.0" +version = "2.2.1" [project.urls] Blog = "https://blog.litestar.dev" @@ -64,9 +64,9 @@ Discord = "https://discord.gg/MmcwxztmQb" "Issue Tracker" = "https://github.com/litestar-org/litestar/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" Reddit = "https://www.reddit.com/r/LitestarAPI" Twitter = "https://twitter.com/LitestarAPI" -documentation = "https://docs.litestar.dev/" -homepage = "https://litestar.dev/" -repository = "https://github.com/litestar-org/litestar" +Documentation = "https://docs.litestar.dev/" +Homepage = "https://litestar.dev/" +Repository = "https://github.com/litestar-org/litestar" [project.optional-dependencies] annotated-types = ["annotated-types"] @@ -87,6 +87,9 @@ redis = ["redis[hiredis]>=4.4.4"] sqlalchemy = ["advanced-alchemy==0.2.2"] standard = ["jinja2", "jsbeautifier", "uvicorn[standard]", "fast-query-parsers>=1.0.2"] structlog = ["structlog"] +full = [ + "litestar[annotated-types,attrs,brotli,cli,cryptography,jinja,jwt,mako,minijinja,opentelemetry,piccolo,picologging,prometheus,pydantic,redis,sqlalchemy,standard,structlog]", +] [tool.pdm.dev-dependencies] dev = [
napari__napari-2063
Console no longer working ## 🐛 Bug I think we've got a regression, from #2036 where the console is no longer visible when opened with the button in the viewer. <img width="1200" alt="Screen Shot 2021-01-01 at 3 34 47 PM" src="https://user-images.githubusercontent.com/6531703/103447936-ec407100-4c46-11eb-8487-76cf63aca2a6.png"> I just see the above. I havn't looked into fix yet, but was able to narrow to that commit. I imagine it will be pretty simple fix. cc @tlambert03
[ { "content": "import os.path\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Optional\n\nimport numpy as np\nfrom qtpy.QtCore import QCoreApplication, QObject, QSize, Qt\nfrom qtpy.QtGui import QCursor, QGuiApplication\nfrom qtpy.QtWidgets import QFileDialog, QSplitter, QVBoxLayout, QWidget\n\nfrom ..components.camera import Camera\nfrom ..components.layerlist import LayerList\nfrom ..utils import config, perf\nfrom ..utils.interactions import (\n ReadOnlyWrapper,\n mouse_move_callbacks,\n mouse_press_callbacks,\n mouse_release_callbacks,\n mouse_wheel_callbacks,\n)\nfrom ..utils.io import imsave\nfrom ..utils.key_bindings import components_to_key_combo\nfrom ..utils.theme import get_theme, template\nfrom .dialogs.qt_about_key_bindings import QtAboutKeyBindings\nfrom .dialogs.screenshot_dialog import ScreenshotDialog\nfrom .qt_resources import get_stylesheet\nfrom .tracing.qt_performance import QtPerformance\nfrom .utils import QImg2array, circle_pixmap, square_pixmap\nfrom .widgets.qt_dims import QtDims\nfrom .widgets.qt_layerlist import QtLayerList\nfrom .widgets.qt_viewer_buttons import QtLayerButtons, QtViewerButtons\nfrom .widgets.qt_viewer_dock_widget import QtViewerDockWidget\n\nfrom .._vispy import ( # isort:skip\n VispyAxesVisual,\n VispyCamera,\n VispyCanvas,\n VispyScaleBarVisual,\n VispyWelcomeVisual,\n create_vispy_visual,\n)\n\n\nclass QtViewer(QSplitter):\n \"\"\"Qt view for the napari Viewer model.\n\n Parameters\n ----------\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n welcome : bool\n Flag to show a welcome message when no layers are present in the\n canvas.\n\n Attributes\n ----------\n canvas : vispy.scene.SceneCanvas\n Canvas for rendering the current view.\n console : QtConsole\n iPython console terminal integrated into the napari GUI.\n controls : QtLayerControlsContainer\n Qt view for GUI controls.\n dims : napari.qt_dims.QtDims\n Dimension sliders; Qt View for Dims model.\n dockConsole : QtViewerDockWidget\n QWidget wrapped in a QDockWidget with forwarded viewer events.\n aboutKeybindings : QtAboutKeybindings\n Key bindings for the 'About' Qt dialog.\n dockLayerControls : QtViewerDockWidget\n QWidget wrapped in a QDockWidget with forwarded viewer events.\n dockLayerList : QtViewerDockWidget\n QWidget wrapped in a QDockWidget with forwarded viewer events.\n layerButtons : QtLayerButtons\n Button controls for napari layers.\n layers : QtLayerList\n Qt view for LayerList controls.\n layer_to_visual : dict\n Dictionary mapping napari layers with their corresponding vispy_layers.\n view : vispy scene widget\n View displayed by vispy canvas. Adds a vispy ViewBox as a child widget.\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n viewerButtons : QtViewerButtons\n Button controls for the napari viewer.\n \"\"\"\n\n raw_stylesheet = get_stylesheet()\n\n def __init__(self, viewer, welcome=False):\n\n # Avoid circular import.\n from .layer_controls import QtLayerControlsContainer\n\n super().__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n\n QCoreApplication.setAttribute(\n Qt.AA_UseStyleSheetPropagationInWidgetStyles, True\n )\n\n self.viewer = viewer\n self.dims = QtDims(self.viewer.dims)\n self.controls = QtLayerControlsContainer(self.viewer)\n self.layers = QtLayerList(self.viewer.layers)\n self.layerButtons = QtLayerButtons(self.viewer)\n self.viewerButtons = QtViewerButtons(self.viewer)\n self._console = None\n\n layerList = QWidget()\n layerList.setObjectName('layerList')\n layerListLayout = QVBoxLayout()\n layerListLayout.addWidget(self.layerButtons)\n layerListLayout.addWidget(self.layers)\n layerListLayout.addWidget(self.viewerButtons)\n layerListLayout.setContentsMargins(8, 4, 8, 6)\n layerList.setLayout(layerListLayout)\n self.dockLayerList = QtViewerDockWidget(\n self,\n layerList,\n name='layer list',\n area='left',\n allowed_areas=['left', 'right'],\n )\n self.dockLayerControls = QtViewerDockWidget(\n self,\n self.controls,\n name='layer controls',\n area='left',\n allowed_areas=['left', 'right'],\n )\n self.dockConsole = QtViewerDockWidget(\n self,\n QWidget(),\n name='console',\n area='bottom',\n allowed_areas=['top', 'bottom'],\n shortcut='Ctrl+Shift+C',\n )\n self.dockConsole.setVisible(False)\n # because the console is loaded lazily in the @getter, this line just\n # gets (or creates) the console when the dock console is made visible.\n self.dockConsole.visibilityChanged.connect(\n lambda visible: self.console if visible else None\n )\n self.dockLayerControls.visibilityChanged.connect(self._constrain_width)\n self.dockLayerList.setMaximumWidth(258)\n self.dockLayerList.setMinimumWidth(258)\n\n # Only created if using perfmon.\n self.dockPerformance = self._create_performance_dock_widget()\n\n # This dictionary holds the corresponding vispy visual for each layer\n self.layer_to_visual = {}\n self.viewerButtons.consoleButton.clicked.connect(\n self.toggle_console_visibility\n )\n\n self._create_canvas()\n\n main_widget = QWidget()\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins(10, 22, 10, 2)\n main_layout.addWidget(self.canvas.native)\n main_layout.addWidget(self.dims)\n main_layout.setSpacing(10)\n main_widget.setLayout(main_layout)\n\n self.setOrientation(Qt.Vertical)\n self.addWidget(main_widget)\n\n self._last_visited_dir = str(Path.home())\n\n self._cursors = {\n 'cross': Qt.CrossCursor,\n 'forbidden': Qt.ForbiddenCursor,\n 'pointing': Qt.PointingHandCursor,\n 'standard': QCursor(),\n }\n\n self._update_theme()\n\n self.viewer.camera.events.interactive.connect(self._on_interactive)\n self.viewer.cursor.events.style.connect(self._on_cursor)\n self.viewer.cursor.events.size.connect(self._on_cursor)\n self.viewer.events.theme.connect(self._update_theme)\n self.viewer.layers.events.reordered.connect(self._reorder_layers)\n self.viewer.layers.events.inserted.connect(self._on_add_layer_change)\n self.viewer.layers.events.removed.connect(self._remove_layer)\n\n # stop any animations whenever the layers change\n self.viewer.events.layers_change.connect(lambda x: self.dims.stop())\n\n self.setAcceptDrops(True)\n\n for layer in self.viewer.layers:\n self._add_layer(layer)\n\n self.view = self.canvas.central_widget.add_view()\n self.camera = VispyCamera(\n self.view, self.viewer.camera, self.viewer.dims\n )\n self.canvas.connect(self.camera.on_draw)\n\n # Add axes, scale bar and welcome visuals.\n self._add_visuals(welcome)\n\n # Create the experimental QtPool for octree and/or monitor.\n self._qt_poll = _create_qt_poll(self, self.viewer.camera)\n\n # Create the experimental RemoteManager for the monitor.\n self._remote_manager = _create_remote_manager(\n self.viewer.layers, self._qt_poll\n )\n\n def _create_canvas(self) -> None:\n \"\"\"Create the canvas and hook up events.\"\"\"\n self.canvas = VispyCanvas(\n keys=None,\n vsync=True,\n parent=self,\n size=self.viewer._canvas_size[::-1],\n )\n self.canvas.events.ignore_callback_errors = False\n self.canvas.events.draw.connect(self.dims.enable_play)\n self.canvas.native.setMinimumSize(QSize(200, 200))\n self.canvas.context.set_depth_func('lequal')\n\n self.canvas.connect(self.on_mouse_move)\n self.canvas.connect(self.on_mouse_press)\n self.canvas.connect(self.on_mouse_release)\n self.canvas.connect(self.on_key_press)\n self.canvas.connect(self.on_key_release)\n self.canvas.connect(self.on_mouse_wheel)\n self.canvas.connect(self.on_draw)\n self.canvas.connect(self.on_resize)\n\n def _add_visuals(self, welcome: bool) -> None:\n \"\"\"Add visuals for axes, scale bar, and welcome text.\n\n Parameters\n ----------\n welcome : bool\n Show the welcome visual.\n \"\"\"\n\n self.axes = VispyAxesVisual(\n self.viewer.axes,\n self.viewer.camera,\n self.viewer.dims,\n parent=self.view.scene,\n order=1e6,\n )\n self.scale_bar = VispyScaleBarVisual(\n self.viewer.scale_bar,\n self.viewer.camera,\n parent=self.view,\n order=1e6 + 1,\n )\n self.canvas.events.resize.connect(self.scale_bar._on_position_change)\n\n self._show_welcome = welcome and config.allow_welcome_visual\n if self._show_welcome:\n self.welcome = VispyWelcomeVisual(\n self.viewer, parent=self.view, order=-100\n )\n self.viewer.events.layers_change.connect(\n self.welcome._on_visible_change\n )\n self.viewer.events.theme.connect(self.welcome._on_theme_change)\n self.canvas.events.resize.connect(self.welcome._on_canvas_change)\n\n def _create_performance_dock_widget(self):\n \"\"\"Create the dock widget that shows performance metrics.\n \"\"\"\n if perf.USE_PERFMON:\n return QtViewerDockWidget(\n self,\n QtPerformance(),\n name='performance',\n area='bottom',\n shortcut='Ctrl+Shift+P',\n )\n return None\n\n @property\n def console(self):\n \"\"\"QtConsole: iPython console terminal integrated into the napari GUI.\n \"\"\"\n if self._console is None:\n from .widgets.qt_console import QtConsole\n\n self.console = QtConsole({'viewer': self.viewer})\n return self._console\n\n @console.setter\n def console(self, console):\n self._console = console\n self.dockConsole.widget = console\n self._update_theme()\n\n def _constrain_width(self, event):\n \"\"\"Allow the layer controls to be wider, only if floated.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if self.dockLayerControls.isFloating():\n self.controls.setMaximumWidth(700)\n else:\n self.controls.setMaximumWidth(220)\n\n def _on_add_layer_change(self, event):\n \"\"\"When a layer is added, set its parent and order.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n layer = event.value\n self._add_layer(layer)\n\n def _add_layer(self, layer):\n \"\"\"When a layer is added, set its parent and order.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer to be added.\n \"\"\"\n vispy_layer = create_vispy_visual(layer)\n\n # QtPoll is experimental.\n if self._qt_poll is not None:\n # QtPoll will call VipyBaseImage._on_poll() when the camera\n # moves or the timer goes off.\n self._qt_poll.events.poll.connect(vispy_layer._on_poll)\n\n # In the other direction, some visuals need to tell\n # QtPoll to start polling. When they receive new data\n # and need to be polled to load it over some number\n # of frames.\n if vispy_layer.events is not None:\n vispy_layer.events.loaded.connect(self._qt_poll.wake_up)\n\n vispy_layer.node.parent = self.view.scene\n vispy_layer.order = len(self.viewer.layers) - 1\n self.layer_to_visual[layer] = vispy_layer\n\n def _remove_layer(self, event):\n \"\"\"When a layer is removed, remove its parent.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n layer = event.value\n vispy_layer = self.layer_to_visual[layer]\n vispy_layer.close()\n del vispy_layer\n self._reorder_layers(None)\n\n def _reorder_layers(self, event):\n \"\"\"When the list is reordered, propagate changes to draw order.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n for i, layer in enumerate(self.viewer.layers):\n vispy_layer = self.layer_to_visual[layer]\n vispy_layer.order = i\n self.canvas._draw_order.clear()\n self.canvas.update()\n\n def _save_layers_dialog(self, selected=False):\n \"\"\"Save layers (all or selected) to disk, using ``LayerList.save()``.\n\n Parameters\n ----------\n selected : bool\n If True, only layers that are selected in the viewer will be saved.\n By default, all layers are saved.\n \"\"\"\n msg = ''\n if not len(self.viewer.layers):\n msg = \"There are no layers in the viewer to save\"\n elif selected and not len(self.viewer.layers.selected):\n msg = (\n 'Please select one or more layers to save,'\n '\\nor use \"Save all layers...\"'\n )\n if msg:\n raise IOError(\"Nothing to save\")\n\n filename, _ = QFileDialog.getSaveFileName(\n parent=self,\n caption=f'Save {\"selected\" if selected else \"all\"} layers',\n directory=self._last_visited_dir, # home dir by default\n )\n\n if filename:\n with warnings.catch_warnings(record=True) as wa:\n saved = self.viewer.layers.save(filename, selected=selected)\n error_messages = \"\\n\".join(\n [str(x.message.args[0]) for x in wa]\n )\n if not saved:\n raise IOError(\n f\"File {filename} save failed.\\n{error_messages}\"\n )\n\n def screenshot(self, path=None):\n \"\"\"Take currently displayed screen and convert to an image array.\n\n Parameters\n ----------\n path : str\n Filename for saving screenshot image.\n\n Returns\n -------\n image : array\n Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the\n upper-left corner of the rendered region.\n \"\"\"\n img = QImg2array(self.canvas.native.grabFramebuffer())\n if path is not None:\n imsave(path, img) # scikit-image imsave method\n return img\n\n def _screenshot_dialog(self):\n \"\"\"Save screenshot of current display, default .png\"\"\"\n dial = ScreenshotDialog(self.screenshot, self, self._last_visited_dir)\n if dial.exec_():\n self._last_visited_dir = os.path.dirname(dial.selectedFiles()[0])\n\n def _open_files_dialog(self):\n \"\"\"Add files from the menubar.\"\"\"\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select file(s)...',\n directory=self._last_visited_dir, # home dir by default\n )\n if (filenames != []) and (filenames is not None):\n self.viewer.open(filenames)\n\n def _open_files_dialog_as_stack_dialog(self):\n \"\"\"Add files as a stack, from the menubar.\"\"\"\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select files...',\n directory=self._last_visited_dir, # home dir by default\n )\n if (filenames != []) and (filenames is not None):\n self.viewer.open(filenames, stack=True)\n\n def _open_folder_dialog(self):\n \"\"\"Add a folder of files from the menubar.\"\"\"\n folder = QFileDialog.getExistingDirectory(\n parent=self,\n caption='Select folder...',\n directory=self._last_visited_dir, # home dir by default\n )\n if folder not in {'', None}:\n self.viewer.open([folder])\n\n def _toggle_chunk_outlines(self):\n \"\"\"Toggle whether we are drawing outlines around the chunks.\"\"\"\n from ..layers.image.experimental.octree_image import OctreeImage\n\n for layer in self.viewer.layers:\n if isinstance(layer, OctreeImage):\n layer.display.show_grid = not layer.display.show_grid\n\n def _on_interactive(self, event):\n \"\"\"Link interactive attributes of view and viewer.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n self.view.interactive = self.viewer.camera.interactive\n\n def _on_cursor(self, event):\n \"\"\"Set the appearance of the mouse cursor.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n cursor = self.viewer.cursor.style\n # Scale size by zoom if needed\n if self.viewer.cursor.scaled:\n size = self.viewer.cursor.size * self.viewer.camera.zoom\n else:\n size = self.viewer.cursor.size\n\n if cursor == 'square':\n # make sure the square fits within the current canvas\n if size < 8 or size > (\n min(*self.viewer.window.qt_viewer.canvas.size) - 4\n ):\n q_cursor = self._cursors['cross']\n else:\n q_cursor = QCursor(square_pixmap(size))\n elif cursor == 'circle':\n q_cursor = QCursor(circle_pixmap(size))\n else:\n q_cursor = self._cursors[cursor]\n\n self.canvas.native.setCursor(q_cursor)\n\n def _update_theme(self, event=None):\n \"\"\"Update the napari GUI theme.\"\"\"\n # template and apply the primary stylesheet\n theme = get_theme(self.viewer.theme)\n themed_stylesheet = template(self.raw_stylesheet, **theme)\n if self._console is not None:\n self.console._update_theme(theme, themed_stylesheet)\n self.setStyleSheet(themed_stylesheet)\n self.canvas.bgcolor = theme['canvas']\n\n def toggle_console_visibility(self, event=None):\n \"\"\"Toggle console visible and not visible.\n\n Imports the console the first time it is requested.\n \"\"\"\n # force instantiation of console if not already instantiated\n _ = self.console\n\n viz = not self.dockConsole.isVisible()\n # modulate visibility at the dock widget level as console is docakable\n self.dockConsole.setVisible(viz)\n if self.dockConsole.isFloating():\n self.dockConsole.setFloating(True)\n\n self.viewerButtons.consoleButton.setProperty(\n 'expanded', self.dockConsole.isVisible()\n )\n self.viewerButtons.consoleButton.style().unpolish(\n self.viewerButtons.consoleButton\n )\n self.viewerButtons.consoleButton.style().polish(\n self.viewerButtons.consoleButton\n )\n\n def show_key_bindings_dialog(self, event=None):\n dialog = QtAboutKeyBindings(self.viewer, parent=self)\n dialog.show()\n\n def _map_canvas2world(self, position):\n \"\"\"Map position from canvas pixels into world coordinates.\n\n Parameters\n ----------\n position : 2-tuple\n Position in canvas (x, y).\n\n Returns\n -------\n coords : tuple\n Position in world coordinates, matches the total dimensionality\n of the viewer.\n \"\"\"\n nd = self.viewer.dims.ndisplay\n transform = self.view.camera.transform.inverse\n mapped_position = transform.map(list(position))[:nd]\n position_world_slice = mapped_position[::-1]\n\n position_world = list(self.viewer.dims.point)\n for i, d in enumerate(self.viewer.dims.displayed):\n position_world[d] = position_world_slice[i]\n\n return tuple(position_world)\n\n @property\n def _canvas_corners_in_world(self):\n \"\"\"Location of the corners of canvas in world coordinates.\n\n Returns\n -------\n corners : 2-tuple\n Coordinates of top left and bottom right canvas pixel in the world.\n \"\"\"\n # Find corners of canvas in world coordinates\n top_left = self._map_canvas2world([0, 0])\n bottom_right = self._map_canvas2world(self.canvas.size)\n return np.array([top_left, bottom_right])\n\n def on_resize(self, event):\n \"\"\"Called whenever canvas is resized.\n\n event : vispy.util.event.Event\n The vispy event that triggered this method.\n \"\"\"\n self.viewer._canvas_size = tuple(self.canvas.size[::-1])\n\n def on_mouse_wheel(self, event):\n \"\"\"Called whenever mouse wheel activated in canvas.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n \"\"\"\n if event.pos is None:\n return\n\n event = ReadOnlyWrapper(event)\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_wheel_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_wheel_callbacks(layer, event)\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.pos is None:\n return\n\n event = ReadOnlyWrapper(event)\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_press_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_press_callbacks(layer, event)\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.pos is None:\n return\n\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_move_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_move_callbacks(layer, event)\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.pos is None:\n return\n\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_release_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_release_callbacks(layer, event)\n\n def on_key_press(self, event):\n \"\"\"Called whenever key pressed in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if (\n event.native is not None\n and event.native.isAutoRepeat()\n and event.key.name not in ['Up', 'Down', 'Left', 'Right']\n ) or event.key is None:\n # pass if no key is present or if key is held down, unless the\n # key being held down is one of the navigation keys\n # this helps for scrolling, etc.\n return\n\n combo = components_to_key_combo(event.key.name, event.modifiers)\n self.viewer.press_key(combo)\n\n def on_key_release(self, event):\n \"\"\"Called whenever key released in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.key is None or (\n # on linux press down is treated as multiple press and release\n event.native is not None\n and event.native.isAutoRepeat()\n ):\n return\n combo = components_to_key_combo(event.key.name, event.modifiers)\n self.viewer.release_key(combo)\n\n def on_draw(self, event):\n \"\"\"Called whenever the canvas is drawn.\n\n This is triggered from vispy whenever new data is sent to the canvas or\n the camera is moved and is connected in the `QtViewer`.\n \"\"\"\n for layer in self.viewer.layers:\n if layer.ndim <= self.viewer.dims.ndim:\n layer._update_draw(\n scale_factor=1 / self.viewer.camera.zoom,\n corner_pixels=self._canvas_corners_in_world[\n :, -layer.ndim :\n ],\n shape_threshold=self.canvas.size,\n )\n\n def keyPressEvent(self, event):\n \"\"\"Called whenever a key is pressed.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n self.canvas._backend._keyEvent(self.canvas.events.key_press, event)\n event.accept()\n\n def keyReleaseEvent(self, event):\n \"\"\"Called whenever a key is released.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n self.canvas._backend._keyEvent(self.canvas.events.key_release, event)\n event.accept()\n\n def dragEnterEvent(self, event):\n \"\"\"Ignore event if not dragging & dropping a file or URL to open.\n\n Using event.ignore() here allows the event to pass through the\n parent widget to its child widget, otherwise the parent widget\n would catch the event and not pass it on to the child widget.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n \"\"\"Add local files and web URLS with drag and drop.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n shift_down = QGuiApplication.keyboardModifiers() & Qt.ShiftModifier\n filenames = []\n for url in event.mimeData().urls():\n if url.isLocalFile():\n filenames.append(url.toLocalFile())\n else:\n filenames.append(url.toString())\n self.viewer.open(filenames, stack=bool(shift_down))\n\n def closeEvent(self, event):\n \"\"\"Cleanup and close.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n self.layers.close()\n\n # if the viewer.QtDims object is playing an axis, we need to terminate\n # the AnimationThread before close, otherwise it will cauyse a segFault\n # or Abort trap. (calling stop() when no animation is occurring is also\n # not a problem)\n self.dims.stop()\n self.canvas.native.deleteLater()\n if self._console is not None:\n self.console.close()\n self.dockConsole.deleteLater()\n event.accept()\n\n\nif TYPE_CHECKING:\n from ..components.experimental.remote import RemoteManager\n from .experimental.qt_poll import QtPoll\n\n\ndef _create_qt_poll(parent: QObject, camera: Camera) -> 'Optional[QtPoll]':\n \"\"\"Create and return a QtPoll instance, if needed.\n\n Create a QtPoll instance for octree or monitor.\n\n Octree needs QtPoll so VispyTiledImageLayer can finish in-progress\n loads even if the camera is not moving. Once loading is finish it\n will tell QtPoll it no longer need to be polled.\n\n Monitor need QtPoll to poll for incoming messages. We can probably get\n rid of this need to be polled by using a thread that's blocked waiting\n for new messages, and that posts those messages as Qt Events. That\n might be something to do in the future.\n\n Parameters\n ----------\n parent : QObject\n Parent Qt object.\n camera : Camera\n Camera that the QtPoll object will listen to.\n\n Return\n ------\n Optional[QtPoll]\n The new QtPoll instance, if we need one.\n \"\"\"\n if not config.async_octree and not config.monitor:\n return None\n\n from .experimental.qt_poll import QtPoll\n\n qt_poll = QtPoll(parent)\n camera.events.connect(qt_poll.on_camera)\n return qt_poll\n\n\ndef _create_remote_manager(\n layers: LayerList, qt_poll\n) -> 'Optional[RemoteManager]':\n \"\"\"Create and return a RemoteManager instance, if we need one.\n\n Parameters\n ----------\n layers : LayersList\n The viewer's layers.\n qt_poll : QtPoll\n The viewer's QtPoll instance.\n \"\"\"\n if not config.monitor:\n return None # Not using the monitor at all\n\n from ..components.experimental.monitor import monitor\n from ..components.experimental.remote import RemoteManager\n\n # Start the monitor so we can access its events. The monitor has no\n # dependencies to napari except to utils.Event.\n started = monitor.start()\n\n if not started:\n return None # Probably not >= Python 3.9, so no manager is needed.\n\n # Create the remote manager and have monitor call its process_command()\n # method to execute commands from clients.\n manager = RemoteManager(layers)\n\n # RemoteManager will process incoming command from the monitor.\n monitor.run_command_event.connect(manager.process_command)\n\n # QtPoll should pool the RemoteManager and the Monitor.\n qt_poll.events.poll.connect(manager.on_poll)\n qt_poll.events.poll.connect(monitor.on_poll)\n\n return manager\n", "path": "napari/_qt/qt_viewer.py" } ]
[ { "content": "import os.path\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Optional\n\nimport numpy as np\nfrom qtpy.QtCore import QCoreApplication, QObject, QSize, Qt\nfrom qtpy.QtGui import QCursor, QGuiApplication\nfrom qtpy.QtWidgets import QFileDialog, QSplitter, QVBoxLayout, QWidget\n\nfrom ..components.camera import Camera\nfrom ..components.layerlist import LayerList\nfrom ..utils import config, perf\nfrom ..utils.interactions import (\n ReadOnlyWrapper,\n mouse_move_callbacks,\n mouse_press_callbacks,\n mouse_release_callbacks,\n mouse_wheel_callbacks,\n)\nfrom ..utils.io import imsave\nfrom ..utils.key_bindings import components_to_key_combo\nfrom ..utils.theme import get_theme, template\nfrom .dialogs.qt_about_key_bindings import QtAboutKeyBindings\nfrom .dialogs.screenshot_dialog import ScreenshotDialog\nfrom .qt_resources import get_stylesheet\nfrom .tracing.qt_performance import QtPerformance\nfrom .utils import QImg2array, circle_pixmap, square_pixmap\nfrom .widgets.qt_dims import QtDims\nfrom .widgets.qt_layerlist import QtLayerList\nfrom .widgets.qt_viewer_buttons import QtLayerButtons, QtViewerButtons\nfrom .widgets.qt_viewer_dock_widget import QtViewerDockWidget\n\nfrom .._vispy import ( # isort:skip\n VispyAxesVisual,\n VispyCamera,\n VispyCanvas,\n VispyScaleBarVisual,\n VispyWelcomeVisual,\n create_vispy_visual,\n)\n\n\nclass QtViewer(QSplitter):\n \"\"\"Qt view for the napari Viewer model.\n\n Parameters\n ----------\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n welcome : bool\n Flag to show a welcome message when no layers are present in the\n canvas.\n\n Attributes\n ----------\n canvas : vispy.scene.SceneCanvas\n Canvas for rendering the current view.\n console : QtConsole\n iPython console terminal integrated into the napari GUI.\n controls : QtLayerControlsContainer\n Qt view for GUI controls.\n dims : napari.qt_dims.QtDims\n Dimension sliders; Qt View for Dims model.\n dockConsole : QtViewerDockWidget\n QWidget wrapped in a QDockWidget with forwarded viewer events.\n aboutKeybindings : QtAboutKeybindings\n Key bindings for the 'About' Qt dialog.\n dockLayerControls : QtViewerDockWidget\n QWidget wrapped in a QDockWidget with forwarded viewer events.\n dockLayerList : QtViewerDockWidget\n QWidget wrapped in a QDockWidget with forwarded viewer events.\n layerButtons : QtLayerButtons\n Button controls for napari layers.\n layers : QtLayerList\n Qt view for LayerList controls.\n layer_to_visual : dict\n Dictionary mapping napari layers with their corresponding vispy_layers.\n view : vispy scene widget\n View displayed by vispy canvas. Adds a vispy ViewBox as a child widget.\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n viewerButtons : QtViewerButtons\n Button controls for the napari viewer.\n \"\"\"\n\n raw_stylesheet = get_stylesheet()\n\n def __init__(self, viewer, welcome=False):\n\n # Avoid circular import.\n from .layer_controls import QtLayerControlsContainer\n\n super().__init__()\n self.setAttribute(Qt.WA_DeleteOnClose)\n\n QCoreApplication.setAttribute(\n Qt.AA_UseStyleSheetPropagationInWidgetStyles, True\n )\n\n self.viewer = viewer\n self.dims = QtDims(self.viewer.dims)\n self.controls = QtLayerControlsContainer(self.viewer)\n self.layers = QtLayerList(self.viewer.layers)\n self.layerButtons = QtLayerButtons(self.viewer)\n self.viewerButtons = QtViewerButtons(self.viewer)\n self._console = None\n\n layerList = QWidget()\n layerList.setObjectName('layerList')\n layerListLayout = QVBoxLayout()\n layerListLayout.addWidget(self.layerButtons)\n layerListLayout.addWidget(self.layers)\n layerListLayout.addWidget(self.viewerButtons)\n layerListLayout.setContentsMargins(8, 4, 8, 6)\n layerList.setLayout(layerListLayout)\n self.dockLayerList = QtViewerDockWidget(\n self,\n layerList,\n name='layer list',\n area='left',\n allowed_areas=['left', 'right'],\n )\n self.dockLayerControls = QtViewerDockWidget(\n self,\n self.controls,\n name='layer controls',\n area='left',\n allowed_areas=['left', 'right'],\n )\n self.dockConsole = QtViewerDockWidget(\n self,\n QWidget(),\n name='console',\n area='bottom',\n allowed_areas=['top', 'bottom'],\n shortcut='Ctrl+Shift+C',\n )\n self.dockConsole.setVisible(False)\n # because the console is loaded lazily in the @getter, this line just\n # gets (or creates) the console when the dock console is made visible.\n self.dockConsole.visibilityChanged.connect(\n lambda visible: self.console if visible else None\n )\n self.dockLayerControls.visibilityChanged.connect(self._constrain_width)\n self.dockLayerList.setMaximumWidth(258)\n self.dockLayerList.setMinimumWidth(258)\n\n # Only created if using perfmon.\n self.dockPerformance = self._create_performance_dock_widget()\n\n # This dictionary holds the corresponding vispy visual for each layer\n self.layer_to_visual = {}\n self.viewerButtons.consoleButton.clicked.connect(\n self.toggle_console_visibility\n )\n\n self._create_canvas()\n\n main_widget = QWidget()\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins(10, 22, 10, 2)\n main_layout.addWidget(self.canvas.native)\n main_layout.addWidget(self.dims)\n main_layout.setSpacing(10)\n main_widget.setLayout(main_layout)\n\n self.setOrientation(Qt.Vertical)\n self.addWidget(main_widget)\n\n self._last_visited_dir = str(Path.home())\n\n self._cursors = {\n 'cross': Qt.CrossCursor,\n 'forbidden': Qt.ForbiddenCursor,\n 'pointing': Qt.PointingHandCursor,\n 'standard': QCursor(),\n }\n\n self._update_theme()\n\n self.viewer.camera.events.interactive.connect(self._on_interactive)\n self.viewer.cursor.events.style.connect(self._on_cursor)\n self.viewer.cursor.events.size.connect(self._on_cursor)\n self.viewer.events.theme.connect(self._update_theme)\n self.viewer.layers.events.reordered.connect(self._reorder_layers)\n self.viewer.layers.events.inserted.connect(self._on_add_layer_change)\n self.viewer.layers.events.removed.connect(self._remove_layer)\n\n # stop any animations whenever the layers change\n self.viewer.events.layers_change.connect(lambda x: self.dims.stop())\n\n self.setAcceptDrops(True)\n\n for layer in self.viewer.layers:\n self._add_layer(layer)\n\n self.view = self.canvas.central_widget.add_view()\n self.camera = VispyCamera(\n self.view, self.viewer.camera, self.viewer.dims\n )\n self.canvas.connect(self.camera.on_draw)\n\n # Add axes, scale bar and welcome visuals.\n self._add_visuals(welcome)\n\n # Create the experimental QtPool for octree and/or monitor.\n self._qt_poll = _create_qt_poll(self, self.viewer.camera)\n\n # Create the experimental RemoteManager for the monitor.\n self._remote_manager = _create_remote_manager(\n self.viewer.layers, self._qt_poll\n )\n\n def _create_canvas(self) -> None:\n \"\"\"Create the canvas and hook up events.\"\"\"\n self.canvas = VispyCanvas(\n keys=None,\n vsync=True,\n parent=self,\n size=self.viewer._canvas_size[::-1],\n )\n self.canvas.events.ignore_callback_errors = False\n self.canvas.events.draw.connect(self.dims.enable_play)\n self.canvas.native.setMinimumSize(QSize(200, 200))\n self.canvas.context.set_depth_func('lequal')\n\n self.canvas.connect(self.on_mouse_move)\n self.canvas.connect(self.on_mouse_press)\n self.canvas.connect(self.on_mouse_release)\n self.canvas.connect(self.on_key_press)\n self.canvas.connect(self.on_key_release)\n self.canvas.connect(self.on_mouse_wheel)\n self.canvas.connect(self.on_draw)\n self.canvas.connect(self.on_resize)\n\n def _add_visuals(self, welcome: bool) -> None:\n \"\"\"Add visuals for axes, scale bar, and welcome text.\n\n Parameters\n ----------\n welcome : bool\n Show the welcome visual.\n \"\"\"\n\n self.axes = VispyAxesVisual(\n self.viewer.axes,\n self.viewer.camera,\n self.viewer.dims,\n parent=self.view.scene,\n order=1e6,\n )\n self.scale_bar = VispyScaleBarVisual(\n self.viewer.scale_bar,\n self.viewer.camera,\n parent=self.view,\n order=1e6 + 1,\n )\n self.canvas.events.resize.connect(self.scale_bar._on_position_change)\n\n self._show_welcome = welcome and config.allow_welcome_visual\n if self._show_welcome:\n self.welcome = VispyWelcomeVisual(\n self.viewer, parent=self.view, order=-100\n )\n self.viewer.events.layers_change.connect(\n self.welcome._on_visible_change\n )\n self.viewer.events.theme.connect(self.welcome._on_theme_change)\n self.canvas.events.resize.connect(self.welcome._on_canvas_change)\n\n def _create_performance_dock_widget(self):\n \"\"\"Create the dock widget that shows performance metrics.\n \"\"\"\n if perf.USE_PERFMON:\n return QtViewerDockWidget(\n self,\n QtPerformance(),\n name='performance',\n area='bottom',\n shortcut='Ctrl+Shift+P',\n )\n return None\n\n @property\n def console(self):\n \"\"\"QtConsole: iPython console terminal integrated into the napari GUI.\n \"\"\"\n if self._console is None:\n from .widgets.qt_console import QtConsole\n\n self.console = QtConsole({'viewer': self.viewer})\n return self._console\n\n @console.setter\n def console(self, console):\n self._console = console\n self.dockConsole.setWidget(console)\n self._update_theme()\n\n def _constrain_width(self, event):\n \"\"\"Allow the layer controls to be wider, only if floated.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if self.dockLayerControls.isFloating():\n self.controls.setMaximumWidth(700)\n else:\n self.controls.setMaximumWidth(220)\n\n def _on_add_layer_change(self, event):\n \"\"\"When a layer is added, set its parent and order.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n layer = event.value\n self._add_layer(layer)\n\n def _add_layer(self, layer):\n \"\"\"When a layer is added, set its parent and order.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer to be added.\n \"\"\"\n vispy_layer = create_vispy_visual(layer)\n\n # QtPoll is experimental.\n if self._qt_poll is not None:\n # QtPoll will call VipyBaseImage._on_poll() when the camera\n # moves or the timer goes off.\n self._qt_poll.events.poll.connect(vispy_layer._on_poll)\n\n # In the other direction, some visuals need to tell\n # QtPoll to start polling. When they receive new data\n # and need to be polled to load it over some number\n # of frames.\n if vispy_layer.events is not None:\n vispy_layer.events.loaded.connect(self._qt_poll.wake_up)\n\n vispy_layer.node.parent = self.view.scene\n vispy_layer.order = len(self.viewer.layers) - 1\n self.layer_to_visual[layer] = vispy_layer\n\n def _remove_layer(self, event):\n \"\"\"When a layer is removed, remove its parent.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n layer = event.value\n vispy_layer = self.layer_to_visual[layer]\n vispy_layer.close()\n del vispy_layer\n self._reorder_layers(None)\n\n def _reorder_layers(self, event):\n \"\"\"When the list is reordered, propagate changes to draw order.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n for i, layer in enumerate(self.viewer.layers):\n vispy_layer = self.layer_to_visual[layer]\n vispy_layer.order = i\n self.canvas._draw_order.clear()\n self.canvas.update()\n\n def _save_layers_dialog(self, selected=False):\n \"\"\"Save layers (all or selected) to disk, using ``LayerList.save()``.\n\n Parameters\n ----------\n selected : bool\n If True, only layers that are selected in the viewer will be saved.\n By default, all layers are saved.\n \"\"\"\n msg = ''\n if not len(self.viewer.layers):\n msg = \"There are no layers in the viewer to save\"\n elif selected and not len(self.viewer.layers.selected):\n msg = (\n 'Please select one or more layers to save,'\n '\\nor use \"Save all layers...\"'\n )\n if msg:\n raise IOError(\"Nothing to save\")\n\n filename, _ = QFileDialog.getSaveFileName(\n parent=self,\n caption=f'Save {\"selected\" if selected else \"all\"} layers',\n directory=self._last_visited_dir, # home dir by default\n )\n\n if filename:\n with warnings.catch_warnings(record=True) as wa:\n saved = self.viewer.layers.save(filename, selected=selected)\n error_messages = \"\\n\".join(\n [str(x.message.args[0]) for x in wa]\n )\n if not saved:\n raise IOError(\n f\"File {filename} save failed.\\n{error_messages}\"\n )\n\n def screenshot(self, path=None):\n \"\"\"Take currently displayed screen and convert to an image array.\n\n Parameters\n ----------\n path : str\n Filename for saving screenshot image.\n\n Returns\n -------\n image : array\n Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the\n upper-left corner of the rendered region.\n \"\"\"\n img = QImg2array(self.canvas.native.grabFramebuffer())\n if path is not None:\n imsave(path, img) # scikit-image imsave method\n return img\n\n def _screenshot_dialog(self):\n \"\"\"Save screenshot of current display, default .png\"\"\"\n dial = ScreenshotDialog(self.screenshot, self, self._last_visited_dir)\n if dial.exec_():\n self._last_visited_dir = os.path.dirname(dial.selectedFiles()[0])\n\n def _open_files_dialog(self):\n \"\"\"Add files from the menubar.\"\"\"\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select file(s)...',\n directory=self._last_visited_dir, # home dir by default\n )\n if (filenames != []) and (filenames is not None):\n self.viewer.open(filenames)\n\n def _open_files_dialog_as_stack_dialog(self):\n \"\"\"Add files as a stack, from the menubar.\"\"\"\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select files...',\n directory=self._last_visited_dir, # home dir by default\n )\n if (filenames != []) and (filenames is not None):\n self.viewer.open(filenames, stack=True)\n\n def _open_folder_dialog(self):\n \"\"\"Add a folder of files from the menubar.\"\"\"\n folder = QFileDialog.getExistingDirectory(\n parent=self,\n caption='Select folder...',\n directory=self._last_visited_dir, # home dir by default\n )\n if folder not in {'', None}:\n self.viewer.open([folder])\n\n def _toggle_chunk_outlines(self):\n \"\"\"Toggle whether we are drawing outlines around the chunks.\"\"\"\n from ..layers.image.experimental.octree_image import OctreeImage\n\n for layer in self.viewer.layers:\n if isinstance(layer, OctreeImage):\n layer.display.show_grid = not layer.display.show_grid\n\n def _on_interactive(self, event):\n \"\"\"Link interactive attributes of view and viewer.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n self.view.interactive = self.viewer.camera.interactive\n\n def _on_cursor(self, event):\n \"\"\"Set the appearance of the mouse cursor.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n cursor = self.viewer.cursor.style\n # Scale size by zoom if needed\n if self.viewer.cursor.scaled:\n size = self.viewer.cursor.size * self.viewer.camera.zoom\n else:\n size = self.viewer.cursor.size\n\n if cursor == 'square':\n # make sure the square fits within the current canvas\n if size < 8 or size > (\n min(*self.viewer.window.qt_viewer.canvas.size) - 4\n ):\n q_cursor = self._cursors['cross']\n else:\n q_cursor = QCursor(square_pixmap(size))\n elif cursor == 'circle':\n q_cursor = QCursor(circle_pixmap(size))\n else:\n q_cursor = self._cursors[cursor]\n\n self.canvas.native.setCursor(q_cursor)\n\n def _update_theme(self, event=None):\n \"\"\"Update the napari GUI theme.\"\"\"\n # template and apply the primary stylesheet\n theme = get_theme(self.viewer.theme)\n themed_stylesheet = template(self.raw_stylesheet, **theme)\n if self._console is not None:\n self.console._update_theme(theme, themed_stylesheet)\n self.setStyleSheet(themed_stylesheet)\n self.canvas.bgcolor = theme['canvas']\n\n def toggle_console_visibility(self, event=None):\n \"\"\"Toggle console visible and not visible.\n\n Imports the console the first time it is requested.\n \"\"\"\n # force instantiation of console if not already instantiated\n _ = self.console\n\n viz = not self.dockConsole.isVisible()\n # modulate visibility at the dock widget level as console is docakable\n self.dockConsole.setVisible(viz)\n if self.dockConsole.isFloating():\n self.dockConsole.setFloating(True)\n\n self.viewerButtons.consoleButton.setProperty(\n 'expanded', self.dockConsole.isVisible()\n )\n self.viewerButtons.consoleButton.style().unpolish(\n self.viewerButtons.consoleButton\n )\n self.viewerButtons.consoleButton.style().polish(\n self.viewerButtons.consoleButton\n )\n\n def show_key_bindings_dialog(self, event=None):\n dialog = QtAboutKeyBindings(self.viewer, parent=self)\n dialog.show()\n\n def _map_canvas2world(self, position):\n \"\"\"Map position from canvas pixels into world coordinates.\n\n Parameters\n ----------\n position : 2-tuple\n Position in canvas (x, y).\n\n Returns\n -------\n coords : tuple\n Position in world coordinates, matches the total dimensionality\n of the viewer.\n \"\"\"\n nd = self.viewer.dims.ndisplay\n transform = self.view.camera.transform.inverse\n mapped_position = transform.map(list(position))[:nd]\n position_world_slice = mapped_position[::-1]\n\n position_world = list(self.viewer.dims.point)\n for i, d in enumerate(self.viewer.dims.displayed):\n position_world[d] = position_world_slice[i]\n\n return tuple(position_world)\n\n @property\n def _canvas_corners_in_world(self):\n \"\"\"Location of the corners of canvas in world coordinates.\n\n Returns\n -------\n corners : 2-tuple\n Coordinates of top left and bottom right canvas pixel in the world.\n \"\"\"\n # Find corners of canvas in world coordinates\n top_left = self._map_canvas2world([0, 0])\n bottom_right = self._map_canvas2world(self.canvas.size)\n return np.array([top_left, bottom_right])\n\n def on_resize(self, event):\n \"\"\"Called whenever canvas is resized.\n\n event : vispy.util.event.Event\n The vispy event that triggered this method.\n \"\"\"\n self.viewer._canvas_size = tuple(self.canvas.size[::-1])\n\n def on_mouse_wheel(self, event):\n \"\"\"Called whenever mouse wheel activated in canvas.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n \"\"\"\n if event.pos is None:\n return\n\n event = ReadOnlyWrapper(event)\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_wheel_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_wheel_callbacks(layer, event)\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.pos is None:\n return\n\n event = ReadOnlyWrapper(event)\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_press_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_press_callbacks(layer, event)\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.pos is None:\n return\n\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_move_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_move_callbacks(layer, event)\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.pos is None:\n return\n\n self.viewer.cursor.position = self._map_canvas2world(list(event.pos))\n mouse_release_callbacks(self.viewer, event)\n\n layer = self.viewer.active_layer\n if layer is not None:\n mouse_release_callbacks(layer, event)\n\n def on_key_press(self, event):\n \"\"\"Called whenever key pressed in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if (\n event.native is not None\n and event.native.isAutoRepeat()\n and event.key.name not in ['Up', 'Down', 'Left', 'Right']\n ) or event.key is None:\n # pass if no key is present or if key is held down, unless the\n # key being held down is one of the navigation keys\n # this helps for scrolling, etc.\n return\n\n combo = components_to_key_combo(event.key.name, event.modifiers)\n self.viewer.press_key(combo)\n\n def on_key_release(self, event):\n \"\"\"Called whenever key released in canvas.\n\n Parameters\n ----------\n event : napari.utils.event.Event\n The napari event that triggered this method.\n \"\"\"\n if event.key is None or (\n # on linux press down is treated as multiple press and release\n event.native is not None\n and event.native.isAutoRepeat()\n ):\n return\n combo = components_to_key_combo(event.key.name, event.modifiers)\n self.viewer.release_key(combo)\n\n def on_draw(self, event):\n \"\"\"Called whenever the canvas is drawn.\n\n This is triggered from vispy whenever new data is sent to the canvas or\n the camera is moved and is connected in the `QtViewer`.\n \"\"\"\n for layer in self.viewer.layers:\n if layer.ndim <= self.viewer.dims.ndim:\n layer._update_draw(\n scale_factor=1 / self.viewer.camera.zoom,\n corner_pixels=self._canvas_corners_in_world[\n :, -layer.ndim :\n ],\n shape_threshold=self.canvas.size,\n )\n\n def keyPressEvent(self, event):\n \"\"\"Called whenever a key is pressed.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n self.canvas._backend._keyEvent(self.canvas.events.key_press, event)\n event.accept()\n\n def keyReleaseEvent(self, event):\n \"\"\"Called whenever a key is released.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n self.canvas._backend._keyEvent(self.canvas.events.key_release, event)\n event.accept()\n\n def dragEnterEvent(self, event):\n \"\"\"Ignore event if not dragging & dropping a file or URL to open.\n\n Using event.ignore() here allows the event to pass through the\n parent widget to its child widget, otherwise the parent widget\n would catch the event and not pass it on to the child widget.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n \"\"\"Add local files and web URLS with drag and drop.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n shift_down = QGuiApplication.keyboardModifiers() & Qt.ShiftModifier\n filenames = []\n for url in event.mimeData().urls():\n if url.isLocalFile():\n filenames.append(url.toLocalFile())\n else:\n filenames.append(url.toString())\n self.viewer.open(filenames, stack=bool(shift_down))\n\n def closeEvent(self, event):\n \"\"\"Cleanup and close.\n\n Parameters\n ----------\n event : qtpy.QtCore.QEvent\n Event from the Qt context.\n \"\"\"\n self.layers.close()\n\n # if the viewer.QtDims object is playing an axis, we need to terminate\n # the AnimationThread before close, otherwise it will cauyse a segFault\n # or Abort trap. (calling stop() when no animation is occurring is also\n # not a problem)\n self.dims.stop()\n self.canvas.native.deleteLater()\n if self._console is not None:\n self.console.close()\n self.dockConsole.deleteLater()\n event.accept()\n\n\nif TYPE_CHECKING:\n from ..components.experimental.remote import RemoteManager\n from .experimental.qt_poll import QtPoll\n\n\ndef _create_qt_poll(parent: QObject, camera: Camera) -> 'Optional[QtPoll]':\n \"\"\"Create and return a QtPoll instance, if needed.\n\n Create a QtPoll instance for octree or monitor.\n\n Octree needs QtPoll so VispyTiledImageLayer can finish in-progress\n loads even if the camera is not moving. Once loading is finish it\n will tell QtPoll it no longer need to be polled.\n\n Monitor need QtPoll to poll for incoming messages. We can probably get\n rid of this need to be polled by using a thread that's blocked waiting\n for new messages, and that posts those messages as Qt Events. That\n might be something to do in the future.\n\n Parameters\n ----------\n parent : QObject\n Parent Qt object.\n camera : Camera\n Camera that the QtPoll object will listen to.\n\n Return\n ------\n Optional[QtPoll]\n The new QtPoll instance, if we need one.\n \"\"\"\n if not config.async_octree and not config.monitor:\n return None\n\n from .experimental.qt_poll import QtPoll\n\n qt_poll = QtPoll(parent)\n camera.events.connect(qt_poll.on_camera)\n return qt_poll\n\n\ndef _create_remote_manager(\n layers: LayerList, qt_poll\n) -> 'Optional[RemoteManager]':\n \"\"\"Create and return a RemoteManager instance, if we need one.\n\n Parameters\n ----------\n layers : LayersList\n The viewer's layers.\n qt_poll : QtPoll\n The viewer's QtPoll instance.\n \"\"\"\n if not config.monitor:\n return None # Not using the monitor at all\n\n from ..components.experimental.monitor import monitor\n from ..components.experimental.remote import RemoteManager\n\n # Start the monitor so we can access its events. The monitor has no\n # dependencies to napari except to utils.Event.\n started = monitor.start()\n\n if not started:\n return None # Probably not >= Python 3.9, so no manager is needed.\n\n # Create the remote manager and have monitor call its process_command()\n # method to execute commands from clients.\n manager = RemoteManager(layers)\n\n # RemoteManager will process incoming command from the monitor.\n monitor.run_command_event.connect(manager.process_command)\n\n # QtPoll should pool the RemoteManager and the Monitor.\n qt_poll.events.poll.connect(manager.on_poll)\n qt_poll.events.poll.connect(monitor.on_poll)\n\n return manager\n", "path": "napari/_qt/qt_viewer.py" } ]
diff --git a/napari/_qt/_tests/test_qt_viewer.py b/napari/_qt/_tests/test_qt_viewer.py index 8dc9cfaae40..0d31818389a 100644 --- a/napari/_qt/_tests/test_qt_viewer.py +++ b/napari/_qt/_tests/test_qt_viewer.py @@ -39,7 +39,7 @@ def test_qt_viewer_with_console(make_test_viewer): assert view._console is None # Check console is created when requested assert view.console is not None - assert view.dockConsole.widget == view.console + assert view.dockConsole.widget() is view.console def test_qt_viewer_toggle_console(make_test_viewer): @@ -51,7 +51,7 @@ def test_qt_viewer_toggle_console(make_test_viewer): # Check console has been created when it is supposed to be shown view.toggle_console_visibility(None) assert view._console is not None - assert view.dockConsole.widget == view.console + assert view.dockConsole.widget() is view.console @pytest.mark.parametrize('layer_class, data, ndim', layer_test_data) diff --git a/napari/_qt/qt_viewer.py b/napari/_qt/qt_viewer.py index 5b3975acfa6..b6c1b9276b9 100644 --- a/napari/_qt/qt_viewer.py +++ b/napari/_qt/qt_viewer.py @@ -295,7 +295,7 @@ def console(self): @console.setter def console(self, console): self._console = console - self.dockConsole.widget = console + self.dockConsole.setWidget(console) self._update_theme() def _constrain_width(self, event):
mitmproxy__mitmproxy-2615
[requires.io] dependency update on master branch
[ { "content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.2.1, <3.5\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py" } ]
[ { "content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py" } ]
diff --git a/setup.cfg b/setup.cfg index b8e129ee00..eaabfa12ca 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,7 @@ [flake8] max-line-length = 140 max-complexity = 25 -ignore = E251,C901,W503,W292 +ignore = E251,C901,W503,W292,E722,E741 exclude = mitmproxy/contrib/*,test/mitmproxy/data/*,release/build/* addons = file,open,basestring,xrange,unicode,long,cmp diff --git a/setup.py b/setup.py index 9c20cdc9f3..8e68e217c6 100644 --- a/setup.py +++ b/setup.py @@ -85,7 +85,7 @@ "pydivert>=2.0.3,<2.2", ], 'dev': [ - "flake8>=3.2.1, <3.5", + "flake8>=3.5, <3.6", "Flask>=0.10.1, <0.13", "mypy>=0.530,<0.541", "pytest-cov>=2.2.1, <3",
django-crispy-forms__django-crispy-forms-468
Remove `crispy_forms.base.from_iterable` We no longer support Python 2.5
[ { "content": "def from_iterable(iterables):\n \"\"\"\n Backport of `itertools.chain.from_iterable` compatible with Python 2.5\n \"\"\"\n for it in iterables:\n for element in it:\n if isinstance(element, dict):\n for key in element:\n yield key\n else:\n yield element\n\n\nclass KeepContext(object):\n \"\"\"\n Context manager that receives a `django.template.Context` instance and a list of keys\n\n Once the context manager is exited, it removes `keys` from the context, to avoid\n side effects in later layout objects that may use the same context variables.\n\n Layout objects should use `extra_context` to introduce context variables, never\n touch context object themselves, that could introduce side effects.\n \"\"\"\n def __init__(self, context, keys):\n self.context = context\n self.keys = keys\n\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, traceback):\n for key in list(self.keys):\n del self.context[key]\n", "path": "crispy_forms/base.py" } ]
[ { "content": "\n\nclass KeepContext(object):\n \"\"\"\n Context manager that receives a `django.template.Context` instance and a list of keys\n\n Once the context manager is exited, it removes `keys` from the context, to avoid\n side effects in later layout objects that may use the same context variables.\n\n Layout objects should use `extra_context` to introduce context variables, never\n touch context object themselves, that could introduce side effects.\n \"\"\"\n def __init__(self, context, keys):\n self.context = context\n self.keys = keys\n\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, traceback):\n for key in list(self.keys):\n del self.context[key]\n", "path": "crispy_forms/base.py" } ]
diff --git a/crispy_forms/base.py b/crispy_forms/base.py index 98297def6..82da75072 100644 --- a/crispy_forms/base.py +++ b/crispy_forms/base.py @@ -1,14 +1,3 @@ -def from_iterable(iterables): - """ - Backport of `itertools.chain.from_iterable` compatible with Python 2.5 - """ - for it in iterables: - for element in it: - if isinstance(element, dict): - for key in element: - yield key - else: - yield element class KeepContext(object):
openfun__marsha-761
Searching a playlist by text raises a 500 error ## Bug Report **Expected behavior/code** In the admin, it should be possible to search a playlist by text. **Actual Behavior** Searching a playlist by text raises a 500 error (https://sentry.io/organizations/gip-fun-mooc/issues/1856670127/?project=1298925&query=is%3Aunresolved) **Steps to Reproduce** 1. go to Marsha admin playlist list page: localhost:8070/admin/core/playlist/ 2. Type a search in the search field: <img src="https://user-images.githubusercontent.com/1427165/91073123-db4d7680-e63a-11ea-97c9-4c6c604f1d63.png" width="350"/> 3. Enjoy the 500! **Environment** - Marsha version: 3.10.0 - Platform: Docker **Possible Solution** I think the error comes from this line https://github.com/openfun/marsha/blob/master/src/backend/marsha/core/admin.py#L380 which is trying to search for a text on a foreignkey... The first solution that comes to mind is to replace it by : "portable_to__title", "portable_to__id" and "portable_to__lti_id"... However, I'm not sure we want to search a playlist by the title or ids of the playlists to which it is portable... So finally, I think we should remove this line.
[ { "content": "\"\"\"Admin of the ``core`` app of the Marsha project.\"\"\"\n\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DefaultUserAdmin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\nfrom marsha.core.models import (\n AudioTrack,\n ConsumerSite,\n ConsumerSiteAccess,\n ConsumerSiteOrganization,\n ConsumerSitePortability,\n Document,\n LTIPassport,\n Organization,\n OrganizationAccess,\n Playlist,\n PlaylistAccess,\n PlaylistPortability,\n SignTrack,\n TimedTextTrack,\n User,\n Video,\n)\n\n\ndef link_field(field_name):\n \"\"\"Convert a foreign key value into a clickable link. # noqa\n\n Parameters\n ----------\n field_name: Type[string]\n If `field_name` is \"name\", link text will be str(obj.name) and link will be the admin\n url for obj.name.id:change.\n\n Returns\n -------\n function\n The function that Django admin must call with the object as arguement to render the field\n as a link.\n\n \"\"\"\n\n def _link_field(obj):\n \"\"\"Render a link in Django admin for foreign key fields.\n\n The link replaces the string representation of the linked object that is rendered\n by Django by default for foreign keys.\n\n Parameters\n ----------\n obj: Type[models.Model]\n The instance of Django model for which we want to render the field `field_name`.\n\n Returns\n -------\n string\n The html representing the link to the object admin change view.\n\n \"\"\"\n app_label = obj._meta.app_label\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return \"-\"\n model_name = linked_obj._meta.model_name\n view_name = f\"admin:{app_label}_{model_name}_change\"\n link_url = reverse(view_name, args=[linked_obj.id])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _link_field.short_description = field_name\n return _link_field\n\n\nclass BaseFileAdmin(admin.ModelAdmin):\n \"\"\"Base admin class for file model.\"\"\"\n\n exclude = (\"duplicated_from\",)\n\n list_display = (\n \"id\",\n \"title\",\n link_field(\"playlist\"),\n link_field(\"consumer_site\"),\n \"lti_id\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n \"created_on\",\n )\n list_select_related = (\"playlist__consumer_site\",)\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"playlist\",\n \"lti_id\",\n \"upload_state\",\n \"created_by\",\n \"duplicated_from\",\n \"uploaded_on\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"id\",\n \"created_by\",\n \"created_on\",\n \"duplicated_from\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n ]\n list_filter = (\"upload_state\", \"playlist__consumer_site__domain\")\n search_fields = (\n \"id\",\n \"lti_id\",\n \"playlist__consumer_site__domain\",\n \"playlist__consumer_site__name\",\n \"playlist__id\",\n \"playlist__lti_id\",\n \"playlist__title\",\n \"playlist__organization__name\",\n \"title\",\n )\n\n\nclass BaseFileInline(admin.TabularInline):\n \"\"\"Base tabular inline class used by file resources.\"\"\"\n\n fields = (\n \"id\",\n \"title\",\n \"playlist\",\n \"lti_id\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"id\",\n \"created_by\",\n \"created_on\",\n \"duplicated_from\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n ]\n\n\nclass MarshaAdminSite(admin.AdminSite):\n \"\"\"Admin site for Marsha.\"\"\"\n\n site_title = _(\"{marsha_name} administration\").format(marsha_name=\"Marsha\")\n site_header = \"Marsha\"\n\n\nadmin_site = MarshaAdminSite(name=\"admin\")\n\n\nclass UserOrganizationsInline(admin.TabularInline):\n \"\"\"Inline to display organizations to which a user has been granted access.\"\"\"\n\n model = OrganizationAccess\n verbose_name = _(\"organization\")\n verbose_name_plural = _(\"organizations\")\n\n\[email protected](User, site=admin_site)\nclass UserAdmin(DefaultUserAdmin):\n \"\"\"Admin class for the User model.\"\"\"\n\n inlines = DefaultUserAdmin.inlines + [UserOrganizationsInline]\n\n\nclass ConsumerSiteUsersInline(admin.TabularInline):\n \"\"\"Inline to display users who have been granted access to a consumer site.\"\"\"\n\n model = ConsumerSiteAccess\n verbose_name = _(\"user\")\n verbose_name_plural = _(\"users\")\n\n\nclass ConsumerSiteOrganizationsInline(admin.TabularInline):\n \"\"\"Inline to display organizations for a consumer site.\"\"\"\n\n model = ConsumerSiteOrganization\n verbose_name = _(\"organization\")\n verbose_name_plural = _(\"organizations\")\n\n\nclass ConsumerSitePortabilityInline(admin.TabularInline):\n \"\"\"Inline to display consumer sites to which a consumer site is automatically portable.\"\"\"\n\n model = ConsumerSitePortability\n fk_name = \"source_site\"\n verbose_name = _(\"portable to\")\n verbose_name_plural = _(\"portable to\")\n\n\[email protected](ConsumerSite, site=admin_site)\nclass ConsumerSiteAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the ConsumerSite model.\"\"\"\n\n list_display = (\"id\", \"name\", \"domain\", \"created_on\", \"updated_on\")\n search_fields = (\"id\", \"name\", \"domain\")\n inlines = [\n ConsumerSitePortabilityInline,\n ConsumerSiteUsersInline,\n ConsumerSiteOrganizationsInline,\n ]\n\n fields = (\n \"id\",\n \"name\",\n \"domain\",\n \"created_on\",\n \"updated_on\",\n \"lrs_url\",\n \"lrs_auth_token\",\n \"lrs_xapi_version\",\n \"video_show_download_default\",\n )\n readonly_fields = [\"id\", \"created_on\", \"updated_on\"]\n\n\nclass OrganizationUsersInline(admin.TabularInline):\n \"\"\"Inline to display users who have been granted access to an organization.\"\"\"\n\n model = OrganizationAccess\n verbose_name = _(\"user\")\n verbose_name_plural = _(\"users\")\n\n\nclass OrganizationConsumerSitesInline(admin.TabularInline):\n \"\"\"Inline to display consumer sites for an organization.\"\"\"\n\n model = ConsumerSiteOrganization\n verbose_name = _(\"consumer site\")\n verbose_name_plural = _(\"consumer sites\")\n\n\[email protected](Organization, site=admin_site)\nclass OrganizationAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the Organization model.\"\"\"\n\n list_display = (\"name\",)\n inlines = [OrganizationUsersInline, OrganizationConsumerSitesInline]\n\n\nclass AudioTrackInline(admin.TabularInline):\n \"\"\"Inline for audio tracks of a video.\"\"\"\n\n model = AudioTrack\n readonly_fields = [\"upload_state\", \"uploaded_on\"]\n\n\nclass TimedTextTrackInline(admin.TabularInline):\n \"\"\"Inline for timed text tracks of a video.\"\"\"\n\n model = TimedTextTrack\n readonly_fields = [\"upload_state\", \"uploaded_on\"]\n\n\nclass SignTrackInline(admin.TabularInline):\n \"\"\"Inline for sign tracks of a video.\"\"\"\n\n model = SignTrack\n readonly_fields = [\"upload_state\", \"uploaded_on\"]\n\n\[email protected](Video, site=admin_site)\nclass VideoAdmin(BaseFileAdmin):\n \"\"\"Admin class for the Video model.\"\"\"\n\n inlines = [AudioTrackInline, TimedTextTrackInline, SignTrackInline]\n verbose_name = _(\"Video\")\n\n\nclass VideosInline(BaseFileInline):\n \"\"\"Inline for videos in a playlist.\"\"\"\n\n model = Video\n verbose_name = _(\"video\")\n verbose_name_plural = _(\"videos\")\n\n\nclass PlaylistAccessesInline(admin.TabularInline):\n \"\"\"Inline for with right to write access to a playlist.\"\"\"\n\n model = PlaylistAccess\n verbose_name = _(\"user access\")\n verbose_name_plural = _(\"users accesses\")\n\n\nclass PlaylistPortabilityInline(admin.TabularInline):\n \"\"\"Inline to display playlists to which a playlist is automatically portable.\"\"\"\n\n model = PlaylistPortability\n fk_name = \"source_playlist\"\n verbose_name = _(\"portable to\")\n verbose_name_plural = _(\"portable to\")\n\n\[email protected](Document, site=admin_site)\nclass DocumentAdmin(BaseFileAdmin):\n \"\"\"Admin class for the Document model.\"\"\"\n\n verbose_name = _(\"Document\")\n\n\nclass DocumentsInline(BaseFileInline):\n \"\"\"Inline for documents in a playlist.\"\"\"\n\n model = Document\n verbose_name = _(\"document\")\n verbose_name_plural = _(\"documents\")\n\n\[email protected](Playlist, site=admin_site)\nclass PlaylistAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the Playlist model.\"\"\"\n\n exclude = (\"duplicated_from\",)\n inlines = [\n DocumentsInline,\n VideosInline,\n PlaylistAccessesInline,\n PlaylistPortabilityInline,\n ]\n\n list_display = (\n \"id\",\n \"title\",\n link_field(\"organization\"),\n link_field(\"consumer_site\"),\n \"lti_id\",\n \"is_public\",\n \"is_portable_to_playlist\",\n \"is_portable_to_consumer_site\",\n \"updated_on\",\n \"created_on\",\n )\n list_select_related = (\"consumer_site\", \"organization\")\n fields = (\n \"id\",\n \"title\",\n \"organization\",\n \"consumer_site\",\n \"lti_id\",\n \"is_public\",\n \"is_portable_to_playlist\",\n \"is_portable_to_consumer_site\",\n \"created_by\",\n \"duplicated_from\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"id\",\n \"created_by\",\n \"created_on\",\n \"duplicated_from\",\n \"updated_on\",\n ]\n list_filter = (\n \"consumer_site__domain\",\n \"is_public\",\n \"is_portable_to_playlist\",\n \"is_portable_to_consumer_site\",\n )\n search_fields = (\n \"id\",\n \"consumer_site__domain\",\n \"consumer_site__name\",\n \"organization__name\",\n \"lti_id\",\n \"portable_to\",\n \"title\",\n )\n verbose_name = _(\"Playlist\")\n\n\[email protected](LTIPassport, site=admin_site)\nclass LTIPassportAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the LTIPassport model.\"\"\"\n\n list_display = (\n \"oauth_consumer_key\",\n link_field(\"consumer_site\"),\n link_field(\"playlist\"),\n \"is_enabled\",\n \"updated_on\",\n \"created_on\",\n )\n list_select_related = (\"consumer_site\", \"playlist\")\n fields = (\n \"oauth_consumer_key\",\n \"shared_secret\",\n \"consumer_site\",\n \"playlist\",\n \"is_enabled\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"created_on\",\n \"oauth_consumer_key\",\n \"shared_secret\",\n \"updated_on\",\n ]\n list_filter = (\"is_enabled\",)\n search_fields = (\n \"oauth_consumer_key\",\n \"consumer_site__name\",\n \"consumer_site__domain\",\n \"playlist__title\",\n )\n verbose_name = _(\"LTI passport\")\n", "path": "src/backend/marsha/core/admin.py" } ]
[ { "content": "\"\"\"Admin of the ``core`` app of the Marsha project.\"\"\"\n\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DefaultUserAdmin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\nfrom marsha.core.models import (\n AudioTrack,\n ConsumerSite,\n ConsumerSiteAccess,\n ConsumerSiteOrganization,\n ConsumerSitePortability,\n Document,\n LTIPassport,\n Organization,\n OrganizationAccess,\n Playlist,\n PlaylistAccess,\n PlaylistPortability,\n SignTrack,\n TimedTextTrack,\n User,\n Video,\n)\n\n\ndef link_field(field_name):\n \"\"\"Convert a foreign key value into a clickable link. # noqa\n\n Parameters\n ----------\n field_name: Type[string]\n If `field_name` is \"name\", link text will be str(obj.name) and link will be the admin\n url for obj.name.id:change.\n\n Returns\n -------\n function\n The function that Django admin must call with the object as arguement to render the field\n as a link.\n\n \"\"\"\n\n def _link_field(obj):\n \"\"\"Render a link in Django admin for foreign key fields.\n\n The link replaces the string representation of the linked object that is rendered\n by Django by default for foreign keys.\n\n Parameters\n ----------\n obj: Type[models.Model]\n The instance of Django model for which we want to render the field `field_name`.\n\n Returns\n -------\n string\n The html representing the link to the object admin change view.\n\n \"\"\"\n app_label = obj._meta.app_label\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return \"-\"\n model_name = linked_obj._meta.model_name\n view_name = f\"admin:{app_label}_{model_name}_change\"\n link_url = reverse(view_name, args=[linked_obj.id])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _link_field.short_description = field_name\n return _link_field\n\n\nclass BaseFileAdmin(admin.ModelAdmin):\n \"\"\"Base admin class for file model.\"\"\"\n\n exclude = (\"duplicated_from\",)\n\n list_display = (\n \"id\",\n \"title\",\n link_field(\"playlist\"),\n link_field(\"consumer_site\"),\n \"lti_id\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n \"created_on\",\n )\n list_select_related = (\"playlist__consumer_site\",)\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"playlist\",\n \"lti_id\",\n \"upload_state\",\n \"created_by\",\n \"duplicated_from\",\n \"uploaded_on\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"id\",\n \"created_by\",\n \"created_on\",\n \"duplicated_from\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n ]\n list_filter = (\"upload_state\", \"playlist__consumer_site__domain\")\n search_fields = (\n \"id\",\n \"lti_id\",\n \"playlist__consumer_site__domain\",\n \"playlist__consumer_site__name\",\n \"playlist__id\",\n \"playlist__lti_id\",\n \"playlist__title\",\n \"playlist__organization__name\",\n \"title\",\n )\n\n\nclass BaseFileInline(admin.TabularInline):\n \"\"\"Base tabular inline class used by file resources.\"\"\"\n\n fields = (\n \"id\",\n \"title\",\n \"playlist\",\n \"lti_id\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"id\",\n \"created_by\",\n \"created_on\",\n \"duplicated_from\",\n \"upload_state\",\n \"uploaded_on\",\n \"updated_on\",\n ]\n\n\nclass MarshaAdminSite(admin.AdminSite):\n \"\"\"Admin site for Marsha.\"\"\"\n\n site_title = _(\"{marsha_name} administration\").format(marsha_name=\"Marsha\")\n site_header = \"Marsha\"\n\n\nadmin_site = MarshaAdminSite(name=\"admin\")\n\n\nclass UserOrganizationsInline(admin.TabularInline):\n \"\"\"Inline to display organizations to which a user has been granted access.\"\"\"\n\n model = OrganizationAccess\n verbose_name = _(\"organization\")\n verbose_name_plural = _(\"organizations\")\n\n\[email protected](User, site=admin_site)\nclass UserAdmin(DefaultUserAdmin):\n \"\"\"Admin class for the User model.\"\"\"\n\n inlines = DefaultUserAdmin.inlines + [UserOrganizationsInline]\n\n\nclass ConsumerSiteUsersInline(admin.TabularInline):\n \"\"\"Inline to display users who have been granted access to a consumer site.\"\"\"\n\n model = ConsumerSiteAccess\n verbose_name = _(\"user\")\n verbose_name_plural = _(\"users\")\n\n\nclass ConsumerSiteOrganizationsInline(admin.TabularInline):\n \"\"\"Inline to display organizations for a consumer site.\"\"\"\n\n model = ConsumerSiteOrganization\n verbose_name = _(\"organization\")\n verbose_name_plural = _(\"organizations\")\n\n\nclass ConsumerSitePortabilityInline(admin.TabularInline):\n \"\"\"Inline to display consumer sites to which a consumer site is automatically portable.\"\"\"\n\n model = ConsumerSitePortability\n fk_name = \"source_site\"\n verbose_name = _(\"portable to\")\n verbose_name_plural = _(\"portable to\")\n\n\[email protected](ConsumerSite, site=admin_site)\nclass ConsumerSiteAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the ConsumerSite model.\"\"\"\n\n list_display = (\"id\", \"name\", \"domain\", \"created_on\", \"updated_on\")\n search_fields = (\"id\", \"name\", \"domain\")\n inlines = [\n ConsumerSitePortabilityInline,\n ConsumerSiteUsersInline,\n ConsumerSiteOrganizationsInline,\n ]\n\n fields = (\n \"id\",\n \"name\",\n \"domain\",\n \"created_on\",\n \"updated_on\",\n \"lrs_url\",\n \"lrs_auth_token\",\n \"lrs_xapi_version\",\n \"video_show_download_default\",\n )\n readonly_fields = [\"id\", \"created_on\", \"updated_on\"]\n\n\nclass OrganizationUsersInline(admin.TabularInline):\n \"\"\"Inline to display users who have been granted access to an organization.\"\"\"\n\n model = OrganizationAccess\n verbose_name = _(\"user\")\n verbose_name_plural = _(\"users\")\n\n\nclass OrganizationConsumerSitesInline(admin.TabularInline):\n \"\"\"Inline to display consumer sites for an organization.\"\"\"\n\n model = ConsumerSiteOrganization\n verbose_name = _(\"consumer site\")\n verbose_name_plural = _(\"consumer sites\")\n\n\[email protected](Organization, site=admin_site)\nclass OrganizationAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the Organization model.\"\"\"\n\n list_display = (\"name\",)\n inlines = [OrganizationUsersInline, OrganizationConsumerSitesInline]\n\n\nclass AudioTrackInline(admin.TabularInline):\n \"\"\"Inline for audio tracks of a video.\"\"\"\n\n model = AudioTrack\n readonly_fields = [\"upload_state\", \"uploaded_on\"]\n\n\nclass TimedTextTrackInline(admin.TabularInline):\n \"\"\"Inline for timed text tracks of a video.\"\"\"\n\n model = TimedTextTrack\n readonly_fields = [\"upload_state\", \"uploaded_on\"]\n\n\nclass SignTrackInline(admin.TabularInline):\n \"\"\"Inline for sign tracks of a video.\"\"\"\n\n model = SignTrack\n readonly_fields = [\"upload_state\", \"uploaded_on\"]\n\n\[email protected](Video, site=admin_site)\nclass VideoAdmin(BaseFileAdmin):\n \"\"\"Admin class for the Video model.\"\"\"\n\n inlines = [AudioTrackInline, TimedTextTrackInline, SignTrackInline]\n verbose_name = _(\"Video\")\n\n\nclass VideosInline(BaseFileInline):\n \"\"\"Inline for videos in a playlist.\"\"\"\n\n model = Video\n verbose_name = _(\"video\")\n verbose_name_plural = _(\"videos\")\n\n\nclass PlaylistAccessesInline(admin.TabularInline):\n \"\"\"Inline for with right to write access to a playlist.\"\"\"\n\n model = PlaylistAccess\n verbose_name = _(\"user access\")\n verbose_name_plural = _(\"users accesses\")\n\n\nclass PlaylistPortabilityInline(admin.TabularInline):\n \"\"\"Inline to display playlists to which a playlist is automatically portable.\"\"\"\n\n model = PlaylistPortability\n fk_name = \"source_playlist\"\n verbose_name = _(\"portable to\")\n verbose_name_plural = _(\"portable to\")\n\n\[email protected](Document, site=admin_site)\nclass DocumentAdmin(BaseFileAdmin):\n \"\"\"Admin class for the Document model.\"\"\"\n\n verbose_name = _(\"Document\")\n\n\nclass DocumentsInline(BaseFileInline):\n \"\"\"Inline for documents in a playlist.\"\"\"\n\n model = Document\n verbose_name = _(\"document\")\n verbose_name_plural = _(\"documents\")\n\n\[email protected](Playlist, site=admin_site)\nclass PlaylistAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the Playlist model.\"\"\"\n\n exclude = (\"duplicated_from\",)\n inlines = [\n DocumentsInline,\n VideosInline,\n PlaylistAccessesInline,\n PlaylistPortabilityInline,\n ]\n\n list_display = (\n \"id\",\n \"title\",\n link_field(\"organization\"),\n link_field(\"consumer_site\"),\n \"lti_id\",\n \"is_public\",\n \"is_portable_to_playlist\",\n \"is_portable_to_consumer_site\",\n \"updated_on\",\n \"created_on\",\n )\n list_select_related = (\"consumer_site\", \"organization\")\n fields = (\n \"id\",\n \"title\",\n \"organization\",\n \"consumer_site\",\n \"lti_id\",\n \"is_public\",\n \"is_portable_to_playlist\",\n \"is_portable_to_consumer_site\",\n \"created_by\",\n \"duplicated_from\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"id\",\n \"created_by\",\n \"created_on\",\n \"duplicated_from\",\n \"updated_on\",\n ]\n list_filter = (\n \"consumer_site__domain\",\n \"is_public\",\n \"is_portable_to_playlist\",\n \"is_portable_to_consumer_site\",\n )\n search_fields = (\n \"id\",\n \"consumer_site__domain\",\n \"consumer_site__name\",\n \"organization__name\",\n \"lti_id\",\n \"portable_to__title\",\n \"portable_to__lti_id\",\n \"portable_to__id\",\n \"title\",\n )\n verbose_name = _(\"Playlist\")\n\n\[email protected](LTIPassport, site=admin_site)\nclass LTIPassportAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the LTIPassport model.\"\"\"\n\n list_display = (\n \"oauth_consumer_key\",\n link_field(\"consumer_site\"),\n link_field(\"playlist\"),\n \"is_enabled\",\n \"updated_on\",\n \"created_on\",\n )\n list_select_related = (\"consumer_site\", \"playlist\")\n fields = (\n \"oauth_consumer_key\",\n \"shared_secret\",\n \"consumer_site\",\n \"playlist\",\n \"is_enabled\",\n \"updated_on\",\n \"created_on\",\n )\n readonly_fields = [\n \"created_on\",\n \"oauth_consumer_key\",\n \"shared_secret\",\n \"updated_on\",\n ]\n list_filter = (\"is_enabled\",)\n search_fields = (\n \"oauth_consumer_key\",\n \"consumer_site__name\",\n \"consumer_site__domain\",\n \"playlist__title\",\n )\n verbose_name = _(\"LTI passport\")\n", "path": "src/backend/marsha/core/admin.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index fc950e90cc..30db09d49e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,10 @@ Versioning](https://semver.org/spec/v2.0.0.html). - Remove usage of react-intl-po - Rework front i18n workflow +### Fixed + +- Fix admin video search + ## [3.10.2] - 2020-09-29 ### Fixed diff --git a/src/backend/marsha/core/admin.py b/src/backend/marsha/core/admin.py index 2de55d5a2e..1e10aa1d17 100644 --- a/src/backend/marsha/core/admin.py +++ b/src/backend/marsha/core/admin.py @@ -377,7 +377,9 @@ class PlaylistAdmin(admin.ModelAdmin): "consumer_site__name", "organization__name", "lti_id", - "portable_to", + "portable_to__title", + "portable_to__lti_id", + "portable_to__id", "title", ) verbose_name = _("Playlist")
docker__docker-py-3023
Not a Contribution: create_api_error_from_api_exception should "raise <exception> from <original-error>" to preserve error message Not a Contribution. APIError handling should be changed to so that it doesn't hide the original exception. https://stackoverflow.com/questions/24752395/python-raise-from-usage
[ { "content": "import requests\n\n\nclass DockerException(Exception):\n \"\"\"\n A base class from which all other exceptions inherit.\n\n If you want to catch all errors that the Docker SDK might raise,\n catch this base exception.\n \"\"\"\n\n\ndef create_api_error_from_http_exception(e):\n \"\"\"\n Create a suitable APIError from requests.exceptions.HTTPError.\n \"\"\"\n response = e.response\n try:\n explanation = response.json()['message']\n except ValueError:\n explanation = (response.content or '').strip()\n cls = APIError\n if response.status_code == 404:\n if explanation and ('No such image' in str(explanation) or\n 'not found: does not exist or no pull access'\n in str(explanation) or\n 'repository does not exist' in str(explanation)):\n cls = ImageNotFound\n else:\n cls = NotFound\n raise cls(e, response=response, explanation=explanation)\n\n\nclass APIError(requests.exceptions.HTTPError, DockerException):\n \"\"\"\n An HTTP error from the API.\n \"\"\"\n def __init__(self, message, response=None, explanation=None):\n # requests 1.2 supports response as a keyword argument, but\n # requests 1.1 doesn't\n super().__init__(message)\n self.response = response\n self.explanation = explanation\n\n def __str__(self):\n message = super().__str__()\n\n if self.is_client_error():\n message = '{} Client Error for {}: {}'.format(\n self.response.status_code, self.response.url,\n self.response.reason)\n\n elif self.is_server_error():\n message = '{} Server Error for {}: {}'.format(\n self.response.status_code, self.response.url,\n self.response.reason)\n\n if self.explanation:\n message = f'{message} (\"{self.explanation}\")'\n\n return message\n\n @property\n def status_code(self):\n if self.response is not None:\n return self.response.status_code\n\n def is_error(self):\n return self.is_client_error() or self.is_server_error()\n\n def is_client_error(self):\n if self.status_code is None:\n return False\n return 400 <= self.status_code < 500\n\n def is_server_error(self):\n if self.status_code is None:\n return False\n return 500 <= self.status_code < 600\n\n\nclass NotFound(APIError):\n pass\n\n\nclass ImageNotFound(NotFound):\n pass\n\n\nclass InvalidVersion(DockerException):\n pass\n\n\nclass InvalidRepository(DockerException):\n pass\n\n\nclass InvalidConfigFile(DockerException):\n pass\n\n\nclass InvalidArgument(DockerException):\n pass\n\n\nclass DeprecatedMethod(DockerException):\n pass\n\n\nclass TLSParameterError(DockerException):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg + (\". TLS configurations should map the Docker CLI \"\n \"client configurations. See \"\n \"https://docs.docker.com/engine/articles/https/ \"\n \"for API details.\")\n\n\nclass NullResource(DockerException, ValueError):\n pass\n\n\nclass ContainerError(DockerException):\n \"\"\"\n Represents a container that has exited with a non-zero exit code.\n \"\"\"\n def __init__(self, container, exit_status, command, image, stderr):\n self.container = container\n self.exit_status = exit_status\n self.command = command\n self.image = image\n self.stderr = stderr\n\n err = f\": {stderr}\" if stderr is not None else \"\"\n msg = (\"Command '{}' in image '{}' returned non-zero exit \"\n \"status {}{}\").format(command, image, exit_status, err)\n\n super().__init__(msg)\n\n\nclass StreamParseError(RuntimeError):\n def __init__(self, reason):\n self.msg = reason\n\n\nclass BuildError(DockerException):\n def __init__(self, reason, build_log):\n super().__init__(reason)\n self.msg = reason\n self.build_log = build_log\n\n\nclass ImageLoadError(DockerException):\n pass\n\n\ndef create_unexpected_kwargs_error(name, kwargs):\n quoted_kwargs = [f\"'{k}'\" for k in sorted(kwargs)]\n text = [f\"{name}() \"]\n if len(quoted_kwargs) == 1:\n text.append(\"got an unexpected keyword argument \")\n else:\n text.append(\"got unexpected keyword arguments \")\n text.append(', '.join(quoted_kwargs))\n return TypeError(''.join(text))\n\n\nclass MissingContextParameter(DockerException):\n def __init__(self, param):\n self.param = param\n\n def __str__(self):\n return (f\"missing parameter: {self.param}\")\n\n\nclass ContextAlreadyExists(DockerException):\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return (f\"context {self.name} already exists\")\n\n\nclass ContextException(DockerException):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return (self.msg)\n\n\nclass ContextNotFound(DockerException):\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return (f\"context '{self.name}' not found\")\n", "path": "docker/errors.py" } ]
[ { "content": "import requests\n\n\nclass DockerException(Exception):\n \"\"\"\n A base class from which all other exceptions inherit.\n\n If you want to catch all errors that the Docker SDK might raise,\n catch this base exception.\n \"\"\"\n\n\ndef create_api_error_from_http_exception(e):\n \"\"\"\n Create a suitable APIError from requests.exceptions.HTTPError.\n \"\"\"\n response = e.response\n try:\n explanation = response.json()['message']\n except ValueError:\n explanation = (response.content or '').strip()\n cls = APIError\n if response.status_code == 404:\n if explanation and ('No such image' in str(explanation) or\n 'not found: does not exist or no pull access'\n in str(explanation) or\n 'repository does not exist' in str(explanation)):\n cls = ImageNotFound\n else:\n cls = NotFound\n raise cls(e, response=response, explanation=explanation) from e\n\n\nclass APIError(requests.exceptions.HTTPError, DockerException):\n \"\"\"\n An HTTP error from the API.\n \"\"\"\n def __init__(self, message, response=None, explanation=None):\n # requests 1.2 supports response as a keyword argument, but\n # requests 1.1 doesn't\n super().__init__(message)\n self.response = response\n self.explanation = explanation\n\n def __str__(self):\n message = super().__str__()\n\n if self.is_client_error():\n message = '{} Client Error for {}: {}'.format(\n self.response.status_code, self.response.url,\n self.response.reason)\n\n elif self.is_server_error():\n message = '{} Server Error for {}: {}'.format(\n self.response.status_code, self.response.url,\n self.response.reason)\n\n if self.explanation:\n message = f'{message} (\"{self.explanation}\")'\n\n return message\n\n @property\n def status_code(self):\n if self.response is not None:\n return self.response.status_code\n\n def is_error(self):\n return self.is_client_error() or self.is_server_error()\n\n def is_client_error(self):\n if self.status_code is None:\n return False\n return 400 <= self.status_code < 500\n\n def is_server_error(self):\n if self.status_code is None:\n return False\n return 500 <= self.status_code < 600\n\n\nclass NotFound(APIError):\n pass\n\n\nclass ImageNotFound(NotFound):\n pass\n\n\nclass InvalidVersion(DockerException):\n pass\n\n\nclass InvalidRepository(DockerException):\n pass\n\n\nclass InvalidConfigFile(DockerException):\n pass\n\n\nclass InvalidArgument(DockerException):\n pass\n\n\nclass DeprecatedMethod(DockerException):\n pass\n\n\nclass TLSParameterError(DockerException):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg + (\". TLS configurations should map the Docker CLI \"\n \"client configurations. See \"\n \"https://docs.docker.com/engine/articles/https/ \"\n \"for API details.\")\n\n\nclass NullResource(DockerException, ValueError):\n pass\n\n\nclass ContainerError(DockerException):\n \"\"\"\n Represents a container that has exited with a non-zero exit code.\n \"\"\"\n def __init__(self, container, exit_status, command, image, stderr):\n self.container = container\n self.exit_status = exit_status\n self.command = command\n self.image = image\n self.stderr = stderr\n\n err = f\": {stderr}\" if stderr is not None else \"\"\n msg = (\"Command '{}' in image '{}' returned non-zero exit \"\n \"status {}{}\").format(command, image, exit_status, err)\n\n super().__init__(msg)\n\n\nclass StreamParseError(RuntimeError):\n def __init__(self, reason):\n self.msg = reason\n\n\nclass BuildError(DockerException):\n def __init__(self, reason, build_log):\n super().__init__(reason)\n self.msg = reason\n self.build_log = build_log\n\n\nclass ImageLoadError(DockerException):\n pass\n\n\ndef create_unexpected_kwargs_error(name, kwargs):\n quoted_kwargs = [f\"'{k}'\" for k in sorted(kwargs)]\n text = [f\"{name}() \"]\n if len(quoted_kwargs) == 1:\n text.append(\"got an unexpected keyword argument \")\n else:\n text.append(\"got unexpected keyword arguments \")\n text.append(', '.join(quoted_kwargs))\n return TypeError(''.join(text))\n\n\nclass MissingContextParameter(DockerException):\n def __init__(self, param):\n self.param = param\n\n def __str__(self):\n return (f\"missing parameter: {self.param}\")\n\n\nclass ContextAlreadyExists(DockerException):\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return (f\"context {self.name} already exists\")\n\n\nclass ContextException(DockerException):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return (self.msg)\n\n\nclass ContextNotFound(DockerException):\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return (f\"context '{self.name}' not found\")\n", "path": "docker/errors.py" } ]
diff --git a/docker/errors.py b/docker/errors.py index ba952562c..7725295f5 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -28,7 +28,7 @@ def create_api_error_from_http_exception(e): cls = ImageNotFound else: cls = NotFound - raise cls(e, response=response, explanation=explanation) + raise cls(e, response=response, explanation=explanation) from e class APIError(requests.exceptions.HTTPError, DockerException):
pyqtgraph__pyqtgraph-888
TreeWidget.topLevelItems is broken on Python3 As per the title. The method uses `xrange`, which obviously is not available in Python 3. I haven't tried it, but I assume the regression was introduced with 6c7e0fa, where an `from ..python2_3 import xrange` was removed from the corresponding file. I can see two possible fixes for the issue. 1. Change `xrange` to `range`, which is available in both languages. 2. Revert the removal of the `python2_3` compatibility import. @campagnola Let me know which one you prefer, and I'll submit a PR.
[ { "content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\nfrom weakref import *\n\n__all__ = ['TreeWidget', 'TreeWidgetItem']\n\n\nclass TreeWidget(QtGui.QTreeWidget):\n \"\"\"Extends QTreeWidget to allow internal drag/drop with widgets in the tree.\n Also maintains the expanded state of subtrees as they are moved.\n This class demonstrates the absurd lengths one must go to to make drag/drop work.\"\"\"\n \n sigItemMoved = QtCore.Signal(object, object, object) # (item, parent, index)\n sigItemCheckStateChanged = QtCore.Signal(object, object)\n sigItemTextChanged = QtCore.Signal(object, object)\n sigColumnCountChanged = QtCore.Signal(object, object) # self, count\n \n def __init__(self, parent=None):\n QtGui.QTreeWidget.__init__(self, parent)\n \n # wrap this item so that we can propagate tree change information\n # to children.\n self._invRootItem = InvisibleRootItem(QtGui.QTreeWidget.invisibleRootItem(self))\n \n self.setAcceptDrops(True)\n self.setDragEnabled(True)\n self.setEditTriggers(QtGui.QAbstractItemView.EditKeyPressed|QtGui.QAbstractItemView.SelectedClicked)\n self.placeholders = []\n self.childNestingLimit = None\n self.itemClicked.connect(self._itemClicked)\n\n def setItemWidget(self, item, col, wid):\n \"\"\"\n Overrides QTreeWidget.setItemWidget such that widgets are added inside an invisible wrapper widget.\n This makes it possible to move the item in and out of the tree without its widgets being automatically deleted.\n \"\"\"\n w = QtGui.QWidget() ## foster parent / surrogate child widget\n l = QtGui.QVBoxLayout()\n l.setContentsMargins(0,0,0,0)\n w.setLayout(l)\n w.setSizePolicy(wid.sizePolicy())\n w.setMinimumHeight(wid.minimumHeight())\n w.setMinimumWidth(wid.minimumWidth())\n l.addWidget(wid)\n w.realChild = wid\n self.placeholders.append(w)\n QtGui.QTreeWidget.setItemWidget(self, item, col, w)\n\n def itemWidget(self, item, col):\n w = QtGui.QTreeWidget.itemWidget(self, item, col)\n if w is not None and hasattr(w, 'realChild'):\n w = w.realChild\n return w\n\n def dropMimeData(self, parent, index, data, action):\n item = self.currentItem()\n p = parent\n #print \"drop\", item, \"->\", parent, index\n while True:\n if p is None:\n break\n if p is item:\n return False\n #raise Exception(\"Can not move item into itself.\")\n p = p.parent()\n \n if not self.itemMoving(item, parent, index):\n return False\n \n currentParent = item.parent()\n if currentParent is None:\n currentParent = self.invisibleRootItem()\n if parent is None:\n parent = self.invisibleRootItem()\n \n if currentParent is parent and index > parent.indexOfChild(item):\n index -= 1\n \n self.prepareMove(item)\n \n currentParent.removeChild(item)\n #print \" insert child to index\", index\n parent.insertChild(index, item) ## index will not be correct\n self.setCurrentItem(item)\n \n self.recoverMove(item)\n #self.emit(QtCore.SIGNAL('itemMoved'), item, parent, index)\n self.sigItemMoved.emit(item, parent, index)\n return True\n\n def itemMoving(self, item, parent, index):\n \"\"\"Called when item has been dropped elsewhere in the tree.\n Return True to accept the move, False to reject.\"\"\"\n return True\n \n def prepareMove(self, item):\n item.__widgets = []\n item.__expanded = item.isExpanded()\n for i in range(self.columnCount()):\n w = self.itemWidget(item, i)\n item.__widgets.append(w)\n if w is None:\n continue\n w.setParent(None)\n for i in range(item.childCount()):\n self.prepareMove(item.child(i))\n \n def recoverMove(self, item):\n for i in range(self.columnCount()):\n w = item.__widgets[i]\n if w is None:\n continue\n self.setItemWidget(item, i, w)\n for i in range(item.childCount()):\n self.recoverMove(item.child(i))\n \n item.setExpanded(False) ## Items do not re-expand correctly unless they are collapsed first.\n QtGui.QApplication.instance().processEvents()\n item.setExpanded(item.__expanded)\n \n def collapseTree(self, item):\n item.setExpanded(False)\n for i in range(item.childCount()):\n self.collapseTree(item.child(i))\n \n def removeTopLevelItem(self, item):\n for i in range(self.topLevelItemCount()):\n if self.topLevelItem(i) is item:\n self.takeTopLevelItem(i)\n return\n raise Exception(\"Item '%s' not in top-level items.\" % str(item))\n \n def listAllItems(self, item=None):\n items = []\n if item != None:\n items.append(item)\n else:\n item = self.invisibleRootItem()\n \n for cindex in range(item.childCount()):\n foundItems = self.listAllItems(item=item.child(cindex))\n for f in foundItems:\n items.append(f)\n return items\n \n def dropEvent(self, ev):\n QtGui.QTreeWidget.dropEvent(self, ev)\n self.updateDropFlags()\n\n def updateDropFlags(self):\n ### intended to put a limit on how deep nests of children can go.\n ### self.childNestingLimit is upheld when moving items without children, but if the item being moved has children/grandchildren, the children/grandchildren\n ### can end up over the childNestingLimit. \n if self.childNestingLimit == None:\n pass # enable drops in all items (but only if there are drops that aren't enabled? for performance...)\n else:\n items = self.listAllItems()\n for item in items:\n parentCount = 0\n p = item.parent()\n while p is not None:\n parentCount += 1\n p = p.parent()\n if parentCount >= self.childNestingLimit:\n item.setFlags(item.flags() & (~QtCore.Qt.ItemIsDropEnabled))\n else:\n item.setFlags(item.flags() | QtCore.Qt.ItemIsDropEnabled)\n\n @staticmethod\n def informTreeWidgetChange(item):\n if hasattr(item, 'treeWidgetChanged'):\n item.treeWidgetChanged()\n for i in range(item.childCount()):\n TreeWidget.informTreeWidgetChange(item.child(i))\n \n def addTopLevelItem(self, item):\n QtGui.QTreeWidget.addTopLevelItem(self, item)\n self.informTreeWidgetChange(item)\n\n def addTopLevelItems(self, items):\n QtGui.QTreeWidget.addTopLevelItems(self, items)\n for item in items:\n self.informTreeWidgetChange(item)\n \n def insertTopLevelItem(self, index, item):\n QtGui.QTreeWidget.insertTopLevelItem(self, index, item)\n self.informTreeWidgetChange(item)\n\n def insertTopLevelItems(self, index, items):\n QtGui.QTreeWidget.insertTopLevelItems(self, index, items)\n for item in items:\n self.informTreeWidgetChange(item)\n \n def takeTopLevelItem(self, index):\n item = self.topLevelItem(index)\n if item is not None:\n self.prepareMove(item)\n item = QtGui.QTreeWidget.takeTopLevelItem(self, index)\n self.prepareMove(item)\n self.informTreeWidgetChange(item)\n return item\n\n def topLevelItems(self):\n return map(self.topLevelItem, xrange(self.topLevelItemCount()))\n \n def clear(self):\n items = self.topLevelItems()\n for item in items:\n self.prepareMove(item)\n QtGui.QTreeWidget.clear(self)\n \n ## Why do we want to do this? It causes RuntimeErrors. \n #for item in items:\n #self.informTreeWidgetChange(item)\n\n def invisibleRootItem(self):\n return self._invRootItem\n \n def itemFromIndex(self, index):\n \"\"\"Return the item and column corresponding to a QModelIndex.\n \"\"\"\n col = index.column()\n rows = []\n while index.row() >= 0:\n rows.insert(0, index.row())\n index = index.parent()\n item = self.topLevelItem(rows[0])\n for row in rows[1:]:\n item = item.child(row)\n return item, col\n\n def setColumnCount(self, c):\n QtGui.QTreeWidget.setColumnCount(self, c)\n self.sigColumnCountChanged.emit(self, c)\n\n def _itemClicked(self, item, col):\n if hasattr(item, 'itemClicked'):\n item.itemClicked(col)\n\n\nclass TreeWidgetItem(QtGui.QTreeWidgetItem):\n \"\"\"\n TreeWidgetItem that keeps track of its own widgets and expansion state.\n \n * Widgets may be added to columns before the item is added to a tree.\n * Expanded state may be set before item is added to a tree.\n * Adds setCheked and isChecked methods.\n * Adds addChildren, insertChildren, and takeChildren methods.\n \"\"\"\n def __init__(self, *args):\n QtGui.QTreeWidgetItem.__init__(self, *args)\n self._widgets = {} # col: widget\n self._tree = None\n self._expanded = False\n \n def setChecked(self, column, checked):\n self.setCheckState(column, QtCore.Qt.Checked if checked else QtCore.Qt.Unchecked)\n\n def isChecked(self, col):\n return self.checkState(col) == QtCore.Qt.Checked\n \n def setExpanded(self, exp):\n self._expanded = exp\n QtGui.QTreeWidgetItem.setExpanded(self, exp)\n \n def isExpanded(self):\n return self._expanded\n \n def setWidget(self, column, widget):\n if column in self._widgets:\n self.removeWidget(column)\n self._widgets[column] = widget\n tree = self.treeWidget()\n if tree is None:\n return\n else:\n tree.setItemWidget(self, column, widget)\n \n def removeWidget(self, column):\n del self._widgets[column]\n tree = self.treeWidget()\n if tree is None:\n return\n tree.removeItemWidget(self, column)\n \n def treeWidgetChanged(self):\n tree = self.treeWidget()\n if self._tree is tree:\n return\n self._tree = self.treeWidget()\n if tree is None:\n return\n for col, widget in self._widgets.items():\n tree.setItemWidget(self, col, widget)\n QtGui.QTreeWidgetItem.setExpanded(self, self._expanded)\n \n def childItems(self):\n return [self.child(i) for i in range(self.childCount())]\n \n def addChild(self, child):\n QtGui.QTreeWidgetItem.addChild(self, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def addChildren(self, childs):\n QtGui.QTreeWidgetItem.addChildren(self, childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n\n def insertChild(self, index, child):\n QtGui.QTreeWidgetItem.insertChild(self, index, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def insertChildren(self, index, childs):\n QtGui.QTreeWidgetItem.addChildren(self, index, childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n \n def removeChild(self, child):\n QtGui.QTreeWidgetItem.removeChild(self, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def takeChild(self, index):\n child = QtGui.QTreeWidgetItem.takeChild(self, index)\n TreeWidget.informTreeWidgetChange(child)\n return child\n \n def takeChildren(self):\n childs = QtGui.QTreeWidgetItem.takeChildren(self)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n return childs\n \n def setData(self, column, role, value):\n # credit: ekhumoro\n # http://stackoverflow.com/questions/13662020/how-to-implement-itemchecked-and-itemunchecked-signals-for-qtreewidget-in-pyqt4\n checkstate = self.checkState(column)\n text = self.text(column)\n QtGui.QTreeWidgetItem.setData(self, column, role, value)\n \n treewidget = self.treeWidget()\n if treewidget is None:\n return\n if (role == QtCore.Qt.CheckStateRole and checkstate != self.checkState(column)):\n treewidget.sigItemCheckStateChanged.emit(self, column)\n elif (role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole) and text != self.text(column)):\n treewidget.sigItemTextChanged.emit(self, column)\n\n def itemClicked(self, col):\n \"\"\"Called when this item is clicked on.\n \n Override this method to react to user clicks.\n \"\"\"\n\n \nclass InvisibleRootItem(object):\n \"\"\"Wrapper around a TreeWidget's invisible root item that calls\n TreeWidget.informTreeWidgetChange when child items are added/removed.\n \"\"\"\n def __init__(self, item):\n self._real_item = item\n \n def addChild(self, child):\n self._real_item.addChild(child)\n TreeWidget.informTreeWidgetChange(child)\n \n def addChildren(self, childs):\n self._real_item.addChildren(childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n\n def insertChild(self, index, child):\n self._real_item.insertChild(index, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def insertChildren(self, index, childs):\n self._real_item.addChildren(index, childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n \n def removeChild(self, child):\n self._real_item.removeChild(child)\n TreeWidget.informTreeWidgetChange(child)\n \n def takeChild(self, index):\n child = self._real_item.takeChild(index)\n TreeWidget.informTreeWidgetChange(child)\n return child\n \n def takeChildren(self):\n childs = self._real_item.takeChildren()\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n return childs\n\n def __getattr__(self, attr):\n return getattr(self._real_item, attr)\n", "path": "pyqtgraph/widgets/TreeWidget.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\nfrom weakref import *\n\n__all__ = ['TreeWidget', 'TreeWidgetItem']\n\n\nclass TreeWidget(QtGui.QTreeWidget):\n \"\"\"Extends QTreeWidget to allow internal drag/drop with widgets in the tree.\n Also maintains the expanded state of subtrees as they are moved.\n This class demonstrates the absurd lengths one must go to to make drag/drop work.\"\"\"\n \n sigItemMoved = QtCore.Signal(object, object, object) # (item, parent, index)\n sigItemCheckStateChanged = QtCore.Signal(object, object)\n sigItemTextChanged = QtCore.Signal(object, object)\n sigColumnCountChanged = QtCore.Signal(object, object) # self, count\n \n def __init__(self, parent=None):\n QtGui.QTreeWidget.__init__(self, parent)\n \n # wrap this item so that we can propagate tree change information\n # to children.\n self._invRootItem = InvisibleRootItem(QtGui.QTreeWidget.invisibleRootItem(self))\n \n self.setAcceptDrops(True)\n self.setDragEnabled(True)\n self.setEditTriggers(QtGui.QAbstractItemView.EditKeyPressed|QtGui.QAbstractItemView.SelectedClicked)\n self.placeholders = []\n self.childNestingLimit = None\n self.itemClicked.connect(self._itemClicked)\n\n def setItemWidget(self, item, col, wid):\n \"\"\"\n Overrides QTreeWidget.setItemWidget such that widgets are added inside an invisible wrapper widget.\n This makes it possible to move the item in and out of the tree without its widgets being automatically deleted.\n \"\"\"\n w = QtGui.QWidget() ## foster parent / surrogate child widget\n l = QtGui.QVBoxLayout()\n l.setContentsMargins(0,0,0,0)\n w.setLayout(l)\n w.setSizePolicy(wid.sizePolicy())\n w.setMinimumHeight(wid.minimumHeight())\n w.setMinimumWidth(wid.minimumWidth())\n l.addWidget(wid)\n w.realChild = wid\n self.placeholders.append(w)\n QtGui.QTreeWidget.setItemWidget(self, item, col, w)\n\n def itemWidget(self, item, col):\n w = QtGui.QTreeWidget.itemWidget(self, item, col)\n if w is not None and hasattr(w, 'realChild'):\n w = w.realChild\n return w\n\n def dropMimeData(self, parent, index, data, action):\n item = self.currentItem()\n p = parent\n #print \"drop\", item, \"->\", parent, index\n while True:\n if p is None:\n break\n if p is item:\n return False\n #raise Exception(\"Can not move item into itself.\")\n p = p.parent()\n \n if not self.itemMoving(item, parent, index):\n return False\n \n currentParent = item.parent()\n if currentParent is None:\n currentParent = self.invisibleRootItem()\n if parent is None:\n parent = self.invisibleRootItem()\n \n if currentParent is parent and index > parent.indexOfChild(item):\n index -= 1\n \n self.prepareMove(item)\n \n currentParent.removeChild(item)\n #print \" insert child to index\", index\n parent.insertChild(index, item) ## index will not be correct\n self.setCurrentItem(item)\n \n self.recoverMove(item)\n #self.emit(QtCore.SIGNAL('itemMoved'), item, parent, index)\n self.sigItemMoved.emit(item, parent, index)\n return True\n\n def itemMoving(self, item, parent, index):\n \"\"\"Called when item has been dropped elsewhere in the tree.\n Return True to accept the move, False to reject.\"\"\"\n return True\n \n def prepareMove(self, item):\n item.__widgets = []\n item.__expanded = item.isExpanded()\n for i in range(self.columnCount()):\n w = self.itemWidget(item, i)\n item.__widgets.append(w)\n if w is None:\n continue\n w.setParent(None)\n for i in range(item.childCount()):\n self.prepareMove(item.child(i))\n \n def recoverMove(self, item):\n for i in range(self.columnCount()):\n w = item.__widgets[i]\n if w is None:\n continue\n self.setItemWidget(item, i, w)\n for i in range(item.childCount()):\n self.recoverMove(item.child(i))\n \n item.setExpanded(False) ## Items do not re-expand correctly unless they are collapsed first.\n QtGui.QApplication.instance().processEvents()\n item.setExpanded(item.__expanded)\n \n def collapseTree(self, item):\n item.setExpanded(False)\n for i in range(item.childCount()):\n self.collapseTree(item.child(i))\n \n def removeTopLevelItem(self, item):\n for i in range(self.topLevelItemCount()):\n if self.topLevelItem(i) is item:\n self.takeTopLevelItem(i)\n return\n raise Exception(\"Item '%s' not in top-level items.\" % str(item))\n \n def listAllItems(self, item=None):\n items = []\n if item != None:\n items.append(item)\n else:\n item = self.invisibleRootItem()\n \n for cindex in range(item.childCount()):\n foundItems = self.listAllItems(item=item.child(cindex))\n for f in foundItems:\n items.append(f)\n return items\n \n def dropEvent(self, ev):\n QtGui.QTreeWidget.dropEvent(self, ev)\n self.updateDropFlags()\n\n def updateDropFlags(self):\n ### intended to put a limit on how deep nests of children can go.\n ### self.childNestingLimit is upheld when moving items without children, but if the item being moved has children/grandchildren, the children/grandchildren\n ### can end up over the childNestingLimit. \n if self.childNestingLimit == None:\n pass # enable drops in all items (but only if there are drops that aren't enabled? for performance...)\n else:\n items = self.listAllItems()\n for item in items:\n parentCount = 0\n p = item.parent()\n while p is not None:\n parentCount += 1\n p = p.parent()\n if parentCount >= self.childNestingLimit:\n item.setFlags(item.flags() & (~QtCore.Qt.ItemIsDropEnabled))\n else:\n item.setFlags(item.flags() | QtCore.Qt.ItemIsDropEnabled)\n\n @staticmethod\n def informTreeWidgetChange(item):\n if hasattr(item, 'treeWidgetChanged'):\n item.treeWidgetChanged()\n for i in range(item.childCount()):\n TreeWidget.informTreeWidgetChange(item.child(i))\n \n def addTopLevelItem(self, item):\n QtGui.QTreeWidget.addTopLevelItem(self, item)\n self.informTreeWidgetChange(item)\n\n def addTopLevelItems(self, items):\n QtGui.QTreeWidget.addTopLevelItems(self, items)\n for item in items:\n self.informTreeWidgetChange(item)\n \n def insertTopLevelItem(self, index, item):\n QtGui.QTreeWidget.insertTopLevelItem(self, index, item)\n self.informTreeWidgetChange(item)\n\n def insertTopLevelItems(self, index, items):\n QtGui.QTreeWidget.insertTopLevelItems(self, index, items)\n for item in items:\n self.informTreeWidgetChange(item)\n \n def takeTopLevelItem(self, index):\n item = self.topLevelItem(index)\n if item is not None:\n self.prepareMove(item)\n item = QtGui.QTreeWidget.takeTopLevelItem(self, index)\n self.prepareMove(item)\n self.informTreeWidgetChange(item)\n return item\n\n def topLevelItems(self):\n return [self.topLevelItem(i) for i in range(self.topLevelItemCount())]\n \n def clear(self):\n items = self.topLevelItems()\n for item in items:\n self.prepareMove(item)\n QtGui.QTreeWidget.clear(self)\n \n ## Why do we want to do this? It causes RuntimeErrors. \n #for item in items:\n #self.informTreeWidgetChange(item)\n\n def invisibleRootItem(self):\n return self._invRootItem\n \n def itemFromIndex(self, index):\n \"\"\"Return the item and column corresponding to a QModelIndex.\n \"\"\"\n col = index.column()\n rows = []\n while index.row() >= 0:\n rows.insert(0, index.row())\n index = index.parent()\n item = self.topLevelItem(rows[0])\n for row in rows[1:]:\n item = item.child(row)\n return item, col\n\n def setColumnCount(self, c):\n QtGui.QTreeWidget.setColumnCount(self, c)\n self.sigColumnCountChanged.emit(self, c)\n\n def _itemClicked(self, item, col):\n if hasattr(item, 'itemClicked'):\n item.itemClicked(col)\n\n\nclass TreeWidgetItem(QtGui.QTreeWidgetItem):\n \"\"\"\n TreeWidgetItem that keeps track of its own widgets and expansion state.\n \n * Widgets may be added to columns before the item is added to a tree.\n * Expanded state may be set before item is added to a tree.\n * Adds setCheked and isChecked methods.\n * Adds addChildren, insertChildren, and takeChildren methods.\n \"\"\"\n def __init__(self, *args):\n QtGui.QTreeWidgetItem.__init__(self, *args)\n self._widgets = {} # col: widget\n self._tree = None\n self._expanded = False\n \n def setChecked(self, column, checked):\n self.setCheckState(column, QtCore.Qt.Checked if checked else QtCore.Qt.Unchecked)\n\n def isChecked(self, col):\n return self.checkState(col) == QtCore.Qt.Checked\n \n def setExpanded(self, exp):\n self._expanded = exp\n QtGui.QTreeWidgetItem.setExpanded(self, exp)\n \n def isExpanded(self):\n return self._expanded\n \n def setWidget(self, column, widget):\n if column in self._widgets:\n self.removeWidget(column)\n self._widgets[column] = widget\n tree = self.treeWidget()\n if tree is None:\n return\n else:\n tree.setItemWidget(self, column, widget)\n \n def removeWidget(self, column):\n del self._widgets[column]\n tree = self.treeWidget()\n if tree is None:\n return\n tree.removeItemWidget(self, column)\n \n def treeWidgetChanged(self):\n tree = self.treeWidget()\n if self._tree is tree:\n return\n self._tree = self.treeWidget()\n if tree is None:\n return\n for col, widget in self._widgets.items():\n tree.setItemWidget(self, col, widget)\n QtGui.QTreeWidgetItem.setExpanded(self, self._expanded)\n \n def childItems(self):\n return [self.child(i) for i in range(self.childCount())]\n \n def addChild(self, child):\n QtGui.QTreeWidgetItem.addChild(self, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def addChildren(self, childs):\n QtGui.QTreeWidgetItem.addChildren(self, childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n\n def insertChild(self, index, child):\n QtGui.QTreeWidgetItem.insertChild(self, index, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def insertChildren(self, index, childs):\n QtGui.QTreeWidgetItem.addChildren(self, index, childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n \n def removeChild(self, child):\n QtGui.QTreeWidgetItem.removeChild(self, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def takeChild(self, index):\n child = QtGui.QTreeWidgetItem.takeChild(self, index)\n TreeWidget.informTreeWidgetChange(child)\n return child\n \n def takeChildren(self):\n childs = QtGui.QTreeWidgetItem.takeChildren(self)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n return childs\n \n def setData(self, column, role, value):\n # credit: ekhumoro\n # http://stackoverflow.com/questions/13662020/how-to-implement-itemchecked-and-itemunchecked-signals-for-qtreewidget-in-pyqt4\n checkstate = self.checkState(column)\n text = self.text(column)\n QtGui.QTreeWidgetItem.setData(self, column, role, value)\n \n treewidget = self.treeWidget()\n if treewidget is None:\n return\n if (role == QtCore.Qt.CheckStateRole and checkstate != self.checkState(column)):\n treewidget.sigItemCheckStateChanged.emit(self, column)\n elif (role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole) and text != self.text(column)):\n treewidget.sigItemTextChanged.emit(self, column)\n\n def itemClicked(self, col):\n \"\"\"Called when this item is clicked on.\n \n Override this method to react to user clicks.\n \"\"\"\n\n \nclass InvisibleRootItem(object):\n \"\"\"Wrapper around a TreeWidget's invisible root item that calls\n TreeWidget.informTreeWidgetChange when child items are added/removed.\n \"\"\"\n def __init__(self, item):\n self._real_item = item\n \n def addChild(self, child):\n self._real_item.addChild(child)\n TreeWidget.informTreeWidgetChange(child)\n \n def addChildren(self, childs):\n self._real_item.addChildren(childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n\n def insertChild(self, index, child):\n self._real_item.insertChild(index, child)\n TreeWidget.informTreeWidgetChange(child)\n \n def insertChildren(self, index, childs):\n self._real_item.addChildren(index, childs)\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n \n def removeChild(self, child):\n self._real_item.removeChild(child)\n TreeWidget.informTreeWidgetChange(child)\n \n def takeChild(self, index):\n child = self._real_item.takeChild(index)\n TreeWidget.informTreeWidgetChange(child)\n return child\n \n def takeChildren(self):\n childs = self._real_item.takeChildren()\n for child in childs:\n TreeWidget.informTreeWidgetChange(child)\n return childs\n\n def __getattr__(self, attr):\n return getattr(self._real_item, attr)\n", "path": "pyqtgraph/widgets/TreeWidget.py" } ]
diff --git a/pyqtgraph/widgets/TreeWidget.py b/pyqtgraph/widgets/TreeWidget.py index b0ec54c12d..8c55ae2f9b 100644 --- a/pyqtgraph/widgets/TreeWidget.py +++ b/pyqtgraph/widgets/TreeWidget.py @@ -201,7 +201,7 @@ def takeTopLevelItem(self, index): return item def topLevelItems(self): - return map(self.topLevelItem, xrange(self.topLevelItemCount())) + return [self.topLevelItem(i) for i in range(self.topLevelItemCount())] def clear(self): items = self.topLevelItems()
PaddlePaddle__models-2261
deeplabv3+反复报warning 在paddle1.4.1下,由于deeplabv3+打开了显存优化,导致反复报如下的warning: <img width="956" alt="db83046567521a831348d8eea6f2e46a" src="https://user-images.githubusercontent.com/46314656/57190981-398cac80-6f53-11e9-9ffc-3a3c7b379d82.png">
[ { "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport time\nimport contextlib\nimport paddle.fluid.profiler as profiler\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('batch_size', int, 4, \"The number of images in each batch during training.\")\nadd_arg('train_crop_size', int, 769, \"Image crop size during training.\")\nadd_arg('base_lr', float, 0.001, \"The base learning rate for model training.\")\nadd_arg('total_step', int, 500000, \"Number of the training step.\")\nadd_arg('init_weights_path', str, None, \"Path of the initial weights in paddlepaddle format.\")\nadd_arg('save_weights_path', str, None, \"Path of the saved weights during training.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('parallel', bool, True, \"using ParallelExecutor.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('load_logit_layer', bool, True, \"Load last logit fc layer or not. If you are training with different number of classes, you should set to False.\")\nadd_arg('memory_optimize', bool, True, \"Using memory optimizer.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\nadd_arg('profile', bool, False, \"Enable profiler.\")\nadd_arg('use_py_reader', bool, True, \"Use py reader.\")\nparser.add_argument(\n '--enable_ce',\n action='store_true',\n help='If set, run the task with continuous evaluation logs. Users can ignore this agument.')\n#yapf: enable\n\[email protected]\ndef profile_context(profile=True):\n if profile:\n with profiler.profiler('All', 'total', '/tmp/profile_file2'):\n yield\n else:\n yield\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n load_vars = [\n x for x in tp.list_vars()\n if isinstance(x, fluid.framework.Parameter) and x.name.find('logit') ==\n -1\n ]\n if args.load_logit_layer:\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_vars(exe, dirname=args.init_weights_path, vars=load_vars)\n else:\n fluid.io.load_params(\n exe,\n dirname=\"\",\n filename=args.init_weights_path,\n main_program=tp)\n\n\n\ndef save_model():\n assert not os.path.isfile(args.save_weights_path)\n fluid.io.save_params(\n exe, dirname=args.save_weights_path, main_program=tp)\n\n\ndef loss(logit, label):\n label_nignore = fluid.layers.less_than(\n label.astype('float32'),\n fluid.layers.assign(np.array([num_classes], 'float32')),\n force_cpu=False).astype('float32')\n logit = fluid.layers.transpose(logit, [0, 2, 3, 1])\n logit = fluid.layers.reshape(logit, [-1, num_classes])\n label = fluid.layers.reshape(label, [-1, 1])\n label = fluid.layers.cast(label, 'int64')\n label_nignore = fluid.layers.reshape(label_nignore, [-1, 1])\n logit = fluid.layers.softmax(logit, use_cudnn=False)\n loss = fluid.layers.cross_entropy(logit, label, ignore_index=255)\n label_nignore.stop_gradient = True\n label.stop_gradient = True\n return loss, label_nignore\n\n\nargs = parser.parse_args()\nutility.print_arguments(args)\n\nmodels.clean()\nmodels.bn_momentum = 0.9997\nmodels.dropout_keep_prop = 0.9\nmodels.label_number = args.num_classes\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nsp = fluid.Program()\ntp = fluid.Program()\n\n# only for ce\nif args.enable_ce:\n SEED = 102\n sp.random_seed = SEED\n tp.random_seed = SEED\n\ncrop_size = args.train_crop_size\nbatch_size = args.batch_size\nimage_shape = [crop_size, crop_size]\nreader.default_config['crop_size'] = crop_size\nreader.default_config['shuffle'] = True\nnum_classes = args.num_classes\nweight_decay = 0.00004\n\nbase_lr = args.base_lr\ntotal_step = args.total_step\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n batch_size_each = batch_size // fluid.core.get_cuda_device_count()\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[batch_size_each, 3] + image_shape, [batch_size_each] + image_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(\n name='img', shape=[3] + image_shape, dtype='float32')\n label = fluid.layers.data(name='label', shape=image_shape, dtype='int32')\n logit = deeplabv3p(img)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n loss, mask = loss(logit, label)\n lr = fluid.layers.polynomial_decay(\n base_lr, total_step, end_learning_rate=0, power=0.9)\n area = fluid.layers.elementwise_max(\n fluid.layers.reduce_mean(mask),\n fluid.layers.assign(np.array(\n [0.1], dtype=np.float32)))\n loss_mean = fluid.layers.reduce_mean(loss) / area\n\n opt = fluid.optimizer.Momentum(\n lr,\n momentum=0.9,\n regularization=fluid.regularizer.L2DecayRegularizer(\n regularization_coeff=weight_decay))\n optimize_ops, params_grads = opt.minimize(loss_mean, startup_program=sp)\n # ir memory optimizer has some issues, we need to seed grad persistable to\n # avoid this issue\n for p,g in params_grads: g.persistable = True\n\n\nexec_strategy = fluid.ExecutionStrategy()\nexec_strategy.num_threads = fluid.core.get_cuda_device_count()\nexec_strategy.num_iteration_per_drop_scope = 100\nbuild_strategy = fluid.BuildStrategy()\nif args.memory_optimize:\n build_strategy.fuse_relu_depthwise_conv = True\n build_strategy.enable_inplace = True\n build_strategy.memory_optimize = True\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = reader.CityscapeDataset(args.dataset_path, 'train')\n\nif args.parallel:\n binary = fluid.compiler.CompiledProgram(tp).with_data_parallel(\n loss_name=loss_mean.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\nelse:\n binary = fluid.compiler.CompiledProgram(tp)\n\nif args.use_py_reader:\n assert(batch_size % fluid.core.get_cuda_device_count() == 0)\n def data_gen():\n batches = dataset.get_batch_generator(\n batch_size // fluid.core.get_cuda_device_count(),\n total_step * fluid.core.get_cuda_device_count())\n for b in batches:\n yield b[1], b[2]\n py_reader.decorate_tensor_provider(data_gen)\n py_reader.start()\nelse:\n batches = dataset.get_batch_generator(batch_size, total_step)\ntotal_time = 0.0\nepoch_idx = 0\ntrain_loss = 0\n\nwith profile_context(args.profile):\n for i in range(total_step):\n epoch_idx += 1\n begin_time = time.time()\n prev_start_time = time.time()\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n train_loss, = exe.run(binary,\n feed={'img': imgs,\n 'label': labels}, fetch_list=[loss_mean])\n else:\n train_loss, = exe.run(binary, fetch_list=[loss_mean])\n train_loss = np.mean(train_loss)\n end_time = time.time()\n total_time += end_time - begin_time\n if i % 100 == 0:\n print(\"Model is saved to\", args.save_weights_path)\n save_model()\n print(\"step {:d}, loss: {:.6f}, step_time_cost: {:.3f}\".format(\n i, train_loss, end_time - prev_start_time))\n\nprint(\"Training done. Model is saved to\", args.save_weights_path)\nsave_model()\n\nif args.enable_ce:\n gpu_num = fluid.core.get_cuda_device_count()\n print(\"kpis\\teach_pass_duration_card%s\\t%s\" %\n (gpu_num, total_time / epoch_idx))\n print(\"kpis\\ttrain_loss_card%s\\t%s\" % (gpu_num, train_loss))\n", "path": "PaddleCV/deeplabv3+/train.py" } ]
[ { "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport time\nimport contextlib\nimport paddle.fluid.profiler as profiler\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('batch_size', int, 4, \"The number of images in each batch during training.\")\nadd_arg('train_crop_size', int, 769, \"Image crop size during training.\")\nadd_arg('base_lr', float, 0.001, \"The base learning rate for model training.\")\nadd_arg('total_step', int, 500000, \"Number of the training step.\")\nadd_arg('init_weights_path', str, None, \"Path of the initial weights in paddlepaddle format.\")\nadd_arg('save_weights_path', str, None, \"Path of the saved weights during training.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('parallel', bool, True, \"using ParallelExecutor.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('load_logit_layer', bool, True, \"Load last logit fc layer or not. If you are training with different number of classes, you should set to False.\")\nadd_arg('memory_optimize', bool, True, \"Using memory optimizer.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\nadd_arg('profile', bool, False, \"Enable profiler.\")\nadd_arg('use_py_reader', bool, True, \"Use py reader.\")\nparser.add_argument(\n '--enable_ce',\n action='store_true',\n help='If set, run the task with continuous evaluation logs. Users can ignore this agument.')\n#yapf: enable\n\[email protected]\ndef profile_context(profile=True):\n if profile:\n with profiler.profiler('All', 'total', '/tmp/profile_file2'):\n yield\n else:\n yield\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n load_vars = [\n x for x in tp.list_vars()\n if isinstance(x, fluid.framework.Parameter) and x.name.find('logit') ==\n -1\n ]\n if args.load_logit_layer:\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_vars(exe, dirname=args.init_weights_path, vars=load_vars)\n else:\n fluid.io.load_params(\n exe,\n dirname=\"\",\n filename=args.init_weights_path,\n main_program=tp)\n\n\n\ndef save_model():\n assert not os.path.isfile(args.save_weights_path)\n fluid.io.save_params(\n exe, dirname=args.save_weights_path, main_program=tp)\n\n\ndef loss(logit, label):\n label_nignore = fluid.layers.less_than(\n label.astype('float32'),\n fluid.layers.assign(np.array([num_classes], 'float32')),\n force_cpu=False).astype('float32')\n logit = fluid.layers.transpose(logit, [0, 2, 3, 1])\n logit = fluid.layers.reshape(logit, [-1, num_classes])\n label = fluid.layers.reshape(label, [-1, 1])\n label = fluid.layers.cast(label, 'int64')\n label_nignore = fluid.layers.reshape(label_nignore, [-1, 1])\n logit = fluid.layers.softmax(logit, use_cudnn=False)\n loss = fluid.layers.cross_entropy(logit, label, ignore_index=255)\n label_nignore.stop_gradient = True\n label.stop_gradient = True\n return loss, label_nignore\n\n\nargs = parser.parse_args()\nutility.print_arguments(args)\n\nmodels.clean()\nmodels.bn_momentum = 0.9997\nmodels.dropout_keep_prop = 0.9\nmodels.label_number = args.num_classes\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nsp = fluid.Program()\ntp = fluid.Program()\n\n# only for ce\nif args.enable_ce:\n SEED = 102\n sp.random_seed = SEED\n tp.random_seed = SEED\n\ncrop_size = args.train_crop_size\nbatch_size = args.batch_size\nimage_shape = [crop_size, crop_size]\nreader.default_config['crop_size'] = crop_size\nreader.default_config['shuffle'] = True\nnum_classes = args.num_classes\nweight_decay = 0.00004\n\nbase_lr = args.base_lr\ntotal_step = args.total_step\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n batch_size_each = batch_size // fluid.core.get_cuda_device_count()\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[batch_size_each, 3] + image_shape, [batch_size_each] + image_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(\n name='img', shape=[3] + image_shape, dtype='float32')\n label = fluid.layers.data(name='label', shape=image_shape, dtype='int32')\n logit = deeplabv3p(img)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n loss, mask = loss(logit, label)\n lr = fluid.layers.polynomial_decay(\n base_lr, total_step, end_learning_rate=0, power=0.9)\n area = fluid.layers.elementwise_max(\n fluid.layers.reduce_mean(mask),\n fluid.layers.assign(np.array(\n [0.1], dtype=np.float32)))\n loss_mean = fluid.layers.reduce_mean(loss) / area\n loss_mean.persistable = True\n\n opt = fluid.optimizer.Momentum(\n lr,\n momentum=0.9,\n regularization=fluid.regularizer.L2DecayRegularizer(\n regularization_coeff=weight_decay))\n optimize_ops, params_grads = opt.minimize(loss_mean, startup_program=sp)\n # ir memory optimizer has some issues, we need to seed grad persistable to\n # avoid this issue\n for p,g in params_grads: g.persistable = True\n\n\nexec_strategy = fluid.ExecutionStrategy()\nexec_strategy.num_threads = fluid.core.get_cuda_device_count()\nexec_strategy.num_iteration_per_drop_scope = 100\nbuild_strategy = fluid.BuildStrategy()\nif args.memory_optimize:\n build_strategy.fuse_relu_depthwise_conv = True\n build_strategy.enable_inplace = True\n build_strategy.memory_optimize = True\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = reader.CityscapeDataset(args.dataset_path, 'train')\n\nif args.parallel:\n binary = fluid.compiler.CompiledProgram(tp).with_data_parallel(\n loss_name=loss_mean.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\nelse:\n binary = fluid.compiler.CompiledProgram(tp)\n\nif args.use_py_reader:\n assert(batch_size % fluid.core.get_cuda_device_count() == 0)\n def data_gen():\n batches = dataset.get_batch_generator(\n batch_size // fluid.core.get_cuda_device_count(),\n total_step * fluid.core.get_cuda_device_count())\n for b in batches:\n yield b[1], b[2]\n py_reader.decorate_tensor_provider(data_gen)\n py_reader.start()\nelse:\n batches = dataset.get_batch_generator(batch_size, total_step)\ntotal_time = 0.0\nepoch_idx = 0\ntrain_loss = 0\n\nwith profile_context(args.profile):\n for i in range(total_step):\n epoch_idx += 1\n begin_time = time.time()\n prev_start_time = time.time()\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n train_loss, = exe.run(binary,\n feed={'img': imgs,\n 'label': labels}, fetch_list=[loss_mean])\n else:\n train_loss, = exe.run(binary, fetch_list=[loss_mean])\n train_loss = np.mean(train_loss)\n end_time = time.time()\n total_time += end_time - begin_time\n if i % 100 == 0:\n print(\"Model is saved to\", args.save_weights_path)\n save_model()\n print(\"step {:d}, loss: {:.6f}, step_time_cost: {:.3f}\".format(\n i, train_loss, end_time - prev_start_time))\n\nprint(\"Training done. Model is saved to\", args.save_weights_path)\nsave_model()\n\nif args.enable_ce:\n gpu_num = fluid.core.get_cuda_device_count()\n print(\"kpis\\teach_pass_duration_card%s\\t%s\" %\n (gpu_num, total_time / epoch_idx))\n print(\"kpis\\ttrain_loss_card%s\\t%s\" % (gpu_num, train_loss))\n", "path": "PaddleCV/deeplabv3+/train.py" } ]
diff --git a/PaddleCV/deeplabv3+/train.py b/PaddleCV/deeplabv3+/train.py index 5e983ed291..2cef945de7 100755 --- a/PaddleCV/deeplabv3+/train.py +++ b/PaddleCV/deeplabv3+/train.py @@ -145,6 +145,7 @@ def loss(logit, label): fluid.layers.assign(np.array( [0.1], dtype=np.float32))) loss_mean = fluid.layers.reduce_mean(loss) / area + loss_mean.persistable = True opt = fluid.optimizer.Momentum( lr,
kivy__kivy-5366
setup.py should not depend on the existence of `git` on the system ### Versions * Python: 3.5.3 * OS: nixos * Kivy: 1.10.0 * Kivy installation method: nix ### Description I tried to package kivy for nixos, a Purely Functional Linux Distribution. ``` Traceback (most recent call last): File "nix_run_setup.py", line 8, in <module> exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec')) File "setup.py", line 934, in <module> version=get_version(), File "setup.py", line 47, in get_version ['git', 'rev-parse', 'HEAD'] File "/nix/store/kgh0z7i2n7xl6pvanmi0v8dw4qy63932-python3-3.5.3/lib/python3.5/subprocess.py", line 316, in check_output **kwargs).stdout File "/nix/store/kgh0z7i2n7xl6pvanmi0v8dw4qy63932-python3-3.5.3/lib/python3.5/subprocess.py", line 383, in run with Popen(*popenargs, **kwargs) as process: File "/nix/store/kgh0z7i2n7xl6pvanmi0v8dw4qy63932-python3-3.5.3/lib/python3.5/subprocess.py", line 676, in __init__ restore_signals, start_new_session) File "/nix/store/kgh0z7i2n7xl6pvanmi0v8dw4qy63932-python3-3.5.3/lib/python3.5/subprocess.py", line 1282, in _execute_child raise child_exception_type(errno_num, err_msg) FileNotFoundError: [Errno 2] No such file or directory: 'git' ``` This can be attributed to `setup.py` attempting to run `git`. This was introduced here: https://github.com/kivy/kivy/pull/4949/commits/ffcb437fc6d183f3e34db180c9b0f0220bf2e08b I would postulate that the install-time dependency on `git` is unnecessary and bad practice.
[ { "content": "#\n# Kivy - Cross-platform UI framework\n# https://kivy.org/\n#\nfrom __future__ import print_function\n\nimport sys\nbuild_examples = False\nif \"--build_examples\" in sys.argv:\n build_examples = True\n sys.argv.remove(\"--build_examples\")\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename, isdir\nfrom os import walk, environ\nfrom distutils.version import LooseVersion\nfrom distutils.sysconfig import get_python_inc\nfrom collections import OrderedDict\nfrom time import sleep\nfrom subprocess import check_output, CalledProcessError\nfrom datetime import datetime\n\nif environ.get('KIVY_USE_SETUPTOOLS'):\n from setuptools import setup, Extension\n print('Using setuptools')\nelse:\n from distutils.core import setup\n from distutils.extension import Extension\n print('Using distutils')\n\n\nPY3 = sys.version > '3'\n\nif PY3: # fix error with py3's LooseVersion comparisons\n def ver_equal(self, other):\n return self.version == other\n\n LooseVersion.__eq__ = ver_equal\n\n\ndef get_version(filename='kivy/version.py'):\n VERSION = kivy.__version__\n DATE = datetime.utcnow().strftime('%Y%m%d')\n try:\n GIT_REVISION = check_output(\n ['git', 'rev-parse', 'HEAD']\n ).strip().decode('ascii')\n except CalledProcessError:\n GIT_REVISION = \"Unknown\"\n\n cnt = (\n \"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\\n\"\n \"__version__ = '%(version)s'\\n\"\n \"__hash__ = '%(hash)s'\\n\"\n \"__date__ = '%(date)s'\\n\"\n )\n\n with open(filename, 'w') as f:\n f.write(cnt % {\n 'version': VERSION,\n 'hash': GIT_REVISION,\n 'date': DATE\n })\n return VERSION\n\n\nMIN_CYTHON_STRING = '0.23'\nMIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)\nMAX_CYTHON_STRING = '0.25.2'\nMAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)\nCYTHON_UNSUPPORTED = ()\n\n\ndef getoutput(cmd, env=None):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n p.wait()\n if p.returncode: # if not returncode == 0\n print('WARNING: A problem occurred while running {0} (code {1})\\n'\n .format(cmd, p.returncode))\n stderr_content = p.stderr.read()\n if stderr_content:\n print('{0}\\n'.format(stderr_content))\n return \"\"\n return p.stdout.read()\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n lenviron = None\n pconfig = join(sys.prefix, 'libs', 'pkgconfig')\n\n if isdir(pconfig):\n lenviron = environ.copy()\n lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(\n environ.get('PKG_CONFIG_PATH', ''), pconfig)\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n results = getoutput(cmd, lenviron).split()\n for token in results:\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\nif exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):\n platform = 'mali'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_mali'] = platform == 'mali'\nc_options['use_egl'] = False\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'\nc_options['use_sdl2'] = None\nc_options['use_ios'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_gstreamer'] = None\nc_options['use_avfoundation'] = platform == 'darwin'\nc_options['use_osx_frameworks'] = platform == 'darwin'\nc_options['debug_gl'] = False\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\n\ncython_unsupported_append = '''\n\n Please note that the following versions of Cython are not supported\n at all: {}\n'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))\n\ncython_min = '''\\\n This version of Cython is not compatible with Kivy. Please upgrade to\n at least version {0}, preferably the newest supported version {1}.\n\n If your platform provides a Cython package, make sure you have upgraded\n to the newest version. If the newest version available is still too low,\n please remove it and install the newest supported Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_max = '''\\\n This version of Cython is untested with Kivy. While this version may\n work perfectly fine, it is possible that you may experience issues. If\n you do have issues, please downgrade to a supported version. It is\n best to use the newest supported version, {1}, but the minimum\n supported version is {0}.\n\n If your platform provides a Cython package, check if you can downgrade\n to a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_unsupported = '''\\\n This version of Cython suffers from known bugs and is unsupported.\n Please install the newest supported version, {1}, if possible, but\n the minimum supported version is {0}.\n\n If your platform provides a Cython package, check if you can install\n a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append)\n\nhave_cython = False\nskip_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\n skip_cython = True\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n import Cython\n cy_version_str = Cython.__version__\n cy_ver = LooseVersion(cy_version_str)\n print('\\nDetected Cython version {}'.format(cy_version_str))\n if cy_ver < MIN_CYTHON_VERSION:\n print(cython_min)\n raise ImportError('Incompatible Cython Version')\n if cy_ver in CYTHON_UNSUPPORTED:\n print(cython_unsupported)\n raise ImportError('Incompatible Cython Version')\n if cy_ver > MAX_CYTHON_VERSION:\n print(cython_max)\n sleep(1)\n except ImportError:\n print(\"\\nCython is missing, it's required for compiling kivy !\\n\\n\")\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n# the build path where kivy is being compiled\nsrc_path = build_path = dirname(__file__)\n\n\nclass KivyBuildExt(build_ext):\n\n def finalize_options(self):\n retval = build_ext.finalize_options(self)\n global build_path\n if (self.build_lib is not None and exists(self.build_lib) and\n not self.inplace):\n build_path = self.build_lib\n return retval\n\n def build_extensions(self):\n # build files\n config_h_fn = ('include', 'config.h')\n config_pxi_fn = ('include', 'config.pxi')\n config_py_fn = ('setupconfig.py', )\n\n # generate headers\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n config_py = '# Autogenerated file for Kivy configuration\\n'\n config_py += 'PY3 = {0}\\n'.format(int(PY3))\n config_py += 'CYTHON_MIN = {0}\\nCYTHON_MAX = {1}\\n'.format(\n repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))\n config_py += 'CYTHON_BAD = {0}\\n'.format(repr(', '.join(map(\n str, CYTHON_UNSUPPORTED))))\n\n # generate content\n print('Build configuration is:')\n for opt, value in c_options.items():\n value = int(bool(value))\n print(' * {0} = {1}'.format(opt, value))\n opt = opt.upper()\n config_h += '#define __{0} {1}\\n'.format(opt, value)\n config_pxi += 'DEF {0} = {1}\\n'.format(opt, value)\n config_py += '{0} = {1}\\n'.format(opt, value)\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n config_py += 'DEBUG = {0}\\n'.format(debug)\n config_pxi += 'DEF PLATFORM = \"{0}\"\\n'.format(platform)\n config_py += 'PLATFORM = \"{0}\"\\n'.format(platform)\n for fn, content in (\n (config_h_fn, config_h), (config_pxi_fn, config_pxi),\n (config_py_fn, config_py)):\n build_fn = expand(build_path, *fn)\n if self.update_if_changed(build_fn, content):\n print('Updated {}'.format(build_fn))\n src_fn = expand(src_path, *fn)\n if src_fn != build_fn and self.update_if_changed(src_fn, content):\n print('Updated {}'.format(src_fn))\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n with open(fn, 'w') as fd:\n fd.write(content)\n return need_update\n\n\ndef _check_and_fix_sdl2_mixer(f_path):\n print(\"Check if SDL2_mixer smpeg2 have an @executable_path\")\n rpath_from = (\"@executable_path/../Frameworks/SDL2.framework\"\n \"/Versions/A/SDL2\")\n rpath_to = \"@rpath/../../../../SDL2.framework/Versions/A/SDL2\"\n smpeg2_path = (\"{}/Versions/A/Frameworks/smpeg2.framework\"\n \"/Versions/A/smpeg2\").format(f_path)\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path)).decode('utf-8')\n if \"@executable_path\" not in output:\n return\n\n print(\"WARNING: Your SDL2_mixer version is invalid\")\n print(\"WARNING: The smpeg2 framework embedded in SDL2_mixer contains a\")\n print(\"WARNING: reference to @executable_path that will fail the\")\n print(\"WARNING: execution of your application.\")\n print(\"WARNING: We are going to change:\")\n print(\"WARNING: from: {}\".format(rpath_from))\n print(\"WARNING: to: {}\".format(rpath_to))\n getoutput(\"install_name_tool -change {} {} {}\".format(\n rpath_from, rpath_to, smpeg2_path))\n\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path))\n if b\"@executable_path\" not in output:\n print(\"WARNING: Change successfully applied!\")\n print(\"WARNING: You'll never see this message again.\")\n else:\n print(\"WARNING: Unable to apply the changes, sorry.\")\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. Also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi', 'mali'):\n c_options['use_opengl_es2'] = True\nelif c_options['use_opengl_es2'] is None:\n c_options['use_opengl_es2'] = \\\n environ.get('KIVY_GRAPHICS', '').lower() == 'gles'\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n c_options['use_ios'] = True\n c_options['use_sdl2'] = True\n\nelif platform == 'darwin':\n if c_options['use_osx_frameworks']:\n if osx_arch == \"i386\":\n print(\"Warning: building with frameworks fail on i386\")\n else:\n print(\"OSX framework used, force to x86_64 only\")\n environ[\"ARCHFLAGS\"] = environ.get(\"ARCHFLAGS\", \"-arch x86_64\")\n print(\"OSX ARCHFLAGS are: {}\".format(environ[\"ARCHFLAGS\"]))\n\n# detect gstreamer, only on desktop\n# works if we forced the options or in autodetection\nif platform not in ('ios', 'android') and (c_options['use_gstreamer']\n in (None, True)):\n gstreamer_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n f_path = '/Library/Frameworks/GStreamer.framework'\n if not exists(f_path):\n c_options['use_gstreamer'] = False\n print('GStreamer framework not found, fallback on pkg-config')\n else:\n print('GStreamer framework found')\n gstreamer_valid = True\n c_options['use_gstreamer'] = True\n gst_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190',\n '-framework', 'GStreamer'],\n 'include_dirs': [join(f_path, 'Headers')]}\n\n if not gstreamer_valid:\n # use pkg-config approach instead\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n print('GStreamer found via pkg-config')\n c_options['use_gstreamer'] = True\n\n\n# detect SDL2, only on desktop and iOS, or android if explicitly enabled\n# works if we forced the options or in autodetection\nsdl2_flags = {}\nif c_options['use_sdl2'] or (\n platform not in ('android',) and c_options['use_sdl2'] is None):\n\n sdl2_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n sdl2_valid = True\n sdl2_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190'],\n 'include_dirs': [],\n 'extra_compile_args': ['-F/Library/Frameworks']\n }\n for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):\n f_path = '/Library/Frameworks/{}.framework'.format(name)\n if not exists(f_path):\n print('Missing framework {}'.format(f_path))\n sdl2_valid = False\n continue\n sdl2_flags['extra_link_args'] += ['-framework', name]\n sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]\n print('Found sdl2 frameworks: {}'.format(f_path))\n if name == 'SDL2_mixer':\n _check_and_fix_sdl2_mixer(f_path)\n\n if not sdl2_valid:\n c_options['use_sdl2'] = False\n print('SDL2 frameworks not found, fallback on pkg-config')\n else:\n c_options['use_sdl2'] = True\n print('Activate SDL2 compilation')\n\n if not sdl2_valid and platform != \"ios\":\n # use pkg-config approach instead\n sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n if 'libraries' in sdl2_flags:\n print('SDL2 found via pkg-config')\n c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(root, *args):\n return join(root, 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [join(src_path, 'kivy', 'include')],\n 'library_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'include')]\n flags['library_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'lib')]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using OS X{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(\n xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n elif platform == 'win32':\n flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]\n flags['library_dirs'] += [join(sys.prefix, \"libs\")]\n return flags\n\n\ndef determine_gl_flags():\n kivy_graphics_include = join(src_path, 'kivy', 'include')\n flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n if c_options['use_opengl_mock']:\n return flags, base_flags\n if platform == 'win32':\n flags['libraries'] = ['opengl32', 'glew32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['library_dirs'] = ['/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = [\n '/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']\n elif platform == 'mali':\n flags['include_dirs'] = ['/usr/include/']\n flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']\n flags['libraries'] = ['GLESv2']\n c_options['use_x11'] = True\n c_options['use_egl'] = True\n else:\n flags['libraries'] = ['GL']\n return flags, base_flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path and platform == 'darwin':\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n split_chr = ';' if platform == 'win32' else ':'\n sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []\n\n if not sdl2_paths:\n sdl_inc = join(sys.prefix, 'include', 'SDL2')\n if isdir(sdl_inc):\n sdl2_paths = [sdl_inc]\n sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['include_dirs'] = sdl2_paths\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['library_dirs'] = (\n sdl2_paths if sdl2_paths else\n ['/usr/local/lib/'])\n\n if sdl2_flags:\n flags = merge(flags, sdl2_flags)\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags, gl_flags_base = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],\n 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'cgl.pyx': ['cgl.pxd'],\n 'cgl_mock.pyx': ['cgl.pxd'],\n 'cgl_sdl2.pyx': ['cgl.pxd'],\n 'cgl_gl.pyx': ['cgl.pxd'],\n 'cgl_glew.pyx': ['cgl.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': [\n 'opengl_utils_def.pxi', 'cgl.pxd', ],\n 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd',\n 'gl_debug_logger.pxi'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'gl_debug_logger.pxi'],\n 'scissor_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['cgl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'cgl.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi', 'gl_debug_logger.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'context.pxd',\n 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],\n 'vertex.pxd': ['cgl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',\n 'instructions.pxd', 'vertex_instructions.pxd',\n 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n '_clock.pyx': {},\n 'weakproxy.pyx': {},\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context.pyx': merge(base_flags, gl_flags_base),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),\n 'graphics/shader.pyx': merge(base_flags, gl_flags_base),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/texture.pyx': merge(base_flags, gl_flags_base),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),\n 'core/text/text_layout.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags_base)\n}\n\nif c_options[\"use_sdl2\"]:\n sdl2_flags = determine_sdl2()\n\nif c_options['use_sdl2'] and sdl2_flags:\n sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(\n sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)\n sdl2_depends = {'depends': ['lib/sdl2.pxi']}\n for source_file in ('core/window/_window_sdl2.pyx',\n 'core/image/_img_sdl2.pyx',\n 'core/text/_text_sdl2.pyx',\n 'core/audio/audio_sdl2.pyx',\n 'core/clipboard/_clipboard_sdl2.pyx'):\n sources[source_file] = merge(\n base_flags, sdl2_flags, sdl2_depends)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n libs = ['Xrender', 'X11']\n if c_options['use_egl']:\n libs += ['EGL']\n else:\n libs += ['GL']\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n # 'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': libs})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n\n deps_final = []\n paths_to_test = ['graphics', 'include']\n for dep in deps:\n found = False\n for path in paths_to_test:\n filename = expand(src_path, path, dep)\n if exists(filename):\n deps_final.append(filename)\n found = True\n break\n if not found:\n print('ERROR: Dependency for {} not resolved: {}'.format(\n fn, dep\n ))\n\n return deps_final\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(src_path, pyx)\n depends = [expand(src_path, x) for x in flags.pop('depends', [])]\n c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(\n module_name, [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\n\next_modules = get_extensions_from_sources(sources)\n\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\nsplit_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',\n 'glsl', 'zip')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if directory not in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\nbinary_deps = []\nbinary_deps_path = join(src_path, 'kivy', 'binary_deps')\nif isdir(binary_deps_path):\n for root, dirnames, filenames in walk(binary_deps_path):\n for fname in filenames:\n binary_deps.append(\n join(root.replace(binary_deps_path, 'binary_deps'), fname))\n\n# -----------------------------------------------------------------------------\n# setup !\nif not build_examples:\n setup(\n name='Kivy',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.adapters',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.deps',\n 'kivy.effects',\n 'kivy.graphics',\n 'kivy.graphics.cgl_backend',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lang',\n 'kivy.lib',\n 'kivy.lib.osc',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tests',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.uix',\n 'kivy.uix.behaviors',\n 'kivy.uix.recycleview',\n ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n '*.pxd',\n '*.pxi',\n 'core/text/*.pxd',\n 'core/text/*.pxi',\n 'graphics/*.pxd',\n 'graphics/*.pxi',\n 'graphics/*.h',\n 'include/*',\n 'lib/vidcore_lite/*.pxd',\n 'lib/vidcore_lite/*.pxi',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tests/*.zip',\n 'tests/*.kv',\n 'tests/*.png',\n 'tests/*.ttf',\n 'tests/*.ogg',\n 'tools/gles_compat/*',\n 'tools/highlight/*',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh',\n 'tools/pep8checker/*',\n 'tools/theming/defaulttheme/*',\n ] + binary_deps},\n data_files=[] if split_examples else list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n ('Topic :: Software Development :: Libraries :: '\n 'Application Frameworks'),\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=['Kivy-Garden>=0.1.4', 'docutils', 'pygments'],\n setup_requires=[\n 'cython>=' + MIN_CYTHON_STRING\n ] if not skip_cython else [])\nelse:\n setup(\n name='Kivy-examples',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=('Kivy examples.'),\n data_files=list(examples.items()))\n", "path": "setup.py" } ]
[ { "content": "#\n# Kivy - Cross-platform UI framework\n# https://kivy.org/\n#\nfrom __future__ import print_function\n\nimport sys\nbuild_examples = False\nif \"--build_examples\" in sys.argv:\n build_examples = True\n sys.argv.remove(\"--build_examples\")\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename, isdir\nfrom os import walk, environ\nfrom distutils.version import LooseVersion\nfrom distutils.sysconfig import get_python_inc\nfrom collections import OrderedDict\nfrom time import sleep\nfrom subprocess import check_output, CalledProcessError\nfrom datetime import datetime\n\nif environ.get('KIVY_USE_SETUPTOOLS'):\n from setuptools import setup, Extension\n print('Using setuptools')\nelse:\n from distutils.core import setup\n from distutils.extension import Extension\n print('Using distutils')\n\n\nPY3 = sys.version > '3'\n\nif PY3: # fix error with py3's LooseVersion comparisons\n def ver_equal(self, other):\n return self.version == other\n\n LooseVersion.__eq__ = ver_equal\n\n\ndef get_version(filename='kivy/version.py'):\n VERSION = kivy.__version__\n DATE = datetime.utcnow().strftime('%Y%m%d')\n try:\n GIT_REVISION = check_output(\n ['git', 'rev-parse', 'HEAD']\n ).strip().decode('ascii')\n except (CalledProcessError, FileNotFoundError):\n GIT_REVISION = \"Unknown\"\n\n cnt = (\n \"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\\n\"\n \"__version__ = '%(version)s'\\n\"\n \"__hash__ = '%(hash)s'\\n\"\n \"__date__ = '%(date)s'\\n\"\n )\n\n with open(filename, 'w') as f:\n f.write(cnt % {\n 'version': VERSION,\n 'hash': GIT_REVISION,\n 'date': DATE\n })\n return VERSION\n\n\nMIN_CYTHON_STRING = '0.23'\nMIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)\nMAX_CYTHON_STRING = '0.25.2'\nMAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)\nCYTHON_UNSUPPORTED = ()\n\n\ndef getoutput(cmd, env=None):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n p.wait()\n if p.returncode: # if not returncode == 0\n print('WARNING: A problem occurred while running {0} (code {1})\\n'\n .format(cmd, p.returncode))\n stderr_content = p.stderr.read()\n if stderr_content:\n print('{0}\\n'.format(stderr_content))\n return \"\"\n return p.stdout.read()\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n lenviron = None\n pconfig = join(sys.prefix, 'libs', 'pkgconfig')\n\n if isdir(pconfig):\n lenviron = environ.copy()\n lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(\n environ.get('PKG_CONFIG_PATH', ''), pconfig)\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n results = getoutput(cmd, lenviron).split()\n for token in results:\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\nif exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):\n platform = 'mali'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_mali'] = platform == 'mali'\nc_options['use_egl'] = False\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'\nc_options['use_sdl2'] = None\nc_options['use_ios'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_gstreamer'] = None\nc_options['use_avfoundation'] = platform == 'darwin'\nc_options['use_osx_frameworks'] = platform == 'darwin'\nc_options['debug_gl'] = False\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\n\ncython_unsupported_append = '''\n\n Please note that the following versions of Cython are not supported\n at all: {}\n'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))\n\ncython_min = '''\\\n This version of Cython is not compatible with Kivy. Please upgrade to\n at least version {0}, preferably the newest supported version {1}.\n\n If your platform provides a Cython package, make sure you have upgraded\n to the newest version. If the newest version available is still too low,\n please remove it and install the newest supported Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_max = '''\\\n This version of Cython is untested with Kivy. While this version may\n work perfectly fine, it is possible that you may experience issues. If\n you do have issues, please downgrade to a supported version. It is\n best to use the newest supported version, {1}, but the minimum\n supported version is {0}.\n\n If your platform provides a Cython package, check if you can downgrade\n to a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_unsupported = '''\\\n This version of Cython suffers from known bugs and is unsupported.\n Please install the newest supported version, {1}, if possible, but\n the minimum supported version is {0}.\n\n If your platform provides a Cython package, check if you can install\n a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append)\n\nhave_cython = False\nskip_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\n skip_cython = True\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n import Cython\n cy_version_str = Cython.__version__\n cy_ver = LooseVersion(cy_version_str)\n print('\\nDetected Cython version {}'.format(cy_version_str))\n if cy_ver < MIN_CYTHON_VERSION:\n print(cython_min)\n raise ImportError('Incompatible Cython Version')\n if cy_ver in CYTHON_UNSUPPORTED:\n print(cython_unsupported)\n raise ImportError('Incompatible Cython Version')\n if cy_ver > MAX_CYTHON_VERSION:\n print(cython_max)\n sleep(1)\n except ImportError:\n print(\"\\nCython is missing, it's required for compiling kivy !\\n\\n\")\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n# the build path where kivy is being compiled\nsrc_path = build_path = dirname(__file__)\n\n\nclass KivyBuildExt(build_ext):\n\n def finalize_options(self):\n retval = build_ext.finalize_options(self)\n global build_path\n if (self.build_lib is not None and exists(self.build_lib) and\n not self.inplace):\n build_path = self.build_lib\n return retval\n\n def build_extensions(self):\n # build files\n config_h_fn = ('include', 'config.h')\n config_pxi_fn = ('include', 'config.pxi')\n config_py_fn = ('setupconfig.py', )\n\n # generate headers\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n config_py = '# Autogenerated file for Kivy configuration\\n'\n config_py += 'PY3 = {0}\\n'.format(int(PY3))\n config_py += 'CYTHON_MIN = {0}\\nCYTHON_MAX = {1}\\n'.format(\n repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))\n config_py += 'CYTHON_BAD = {0}\\n'.format(repr(', '.join(map(\n str, CYTHON_UNSUPPORTED))))\n\n # generate content\n print('Build configuration is:')\n for opt, value in c_options.items():\n value = int(bool(value))\n print(' * {0} = {1}'.format(opt, value))\n opt = opt.upper()\n config_h += '#define __{0} {1}\\n'.format(opt, value)\n config_pxi += 'DEF {0} = {1}\\n'.format(opt, value)\n config_py += '{0} = {1}\\n'.format(opt, value)\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n config_py += 'DEBUG = {0}\\n'.format(debug)\n config_pxi += 'DEF PLATFORM = \"{0}\"\\n'.format(platform)\n config_py += 'PLATFORM = \"{0}\"\\n'.format(platform)\n for fn, content in (\n (config_h_fn, config_h), (config_pxi_fn, config_pxi),\n (config_py_fn, config_py)):\n build_fn = expand(build_path, *fn)\n if self.update_if_changed(build_fn, content):\n print('Updated {}'.format(build_fn))\n src_fn = expand(src_path, *fn)\n if src_fn != build_fn and self.update_if_changed(src_fn, content):\n print('Updated {}'.format(src_fn))\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n with open(fn, 'w') as fd:\n fd.write(content)\n return need_update\n\n\ndef _check_and_fix_sdl2_mixer(f_path):\n print(\"Check if SDL2_mixer smpeg2 have an @executable_path\")\n rpath_from = (\"@executable_path/../Frameworks/SDL2.framework\"\n \"/Versions/A/SDL2\")\n rpath_to = \"@rpath/../../../../SDL2.framework/Versions/A/SDL2\"\n smpeg2_path = (\"{}/Versions/A/Frameworks/smpeg2.framework\"\n \"/Versions/A/smpeg2\").format(f_path)\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path)).decode('utf-8')\n if \"@executable_path\" not in output:\n return\n\n print(\"WARNING: Your SDL2_mixer version is invalid\")\n print(\"WARNING: The smpeg2 framework embedded in SDL2_mixer contains a\")\n print(\"WARNING: reference to @executable_path that will fail the\")\n print(\"WARNING: execution of your application.\")\n print(\"WARNING: We are going to change:\")\n print(\"WARNING: from: {}\".format(rpath_from))\n print(\"WARNING: to: {}\".format(rpath_to))\n getoutput(\"install_name_tool -change {} {} {}\".format(\n rpath_from, rpath_to, smpeg2_path))\n\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path))\n if b\"@executable_path\" not in output:\n print(\"WARNING: Change successfully applied!\")\n print(\"WARNING: You'll never see this message again.\")\n else:\n print(\"WARNING: Unable to apply the changes, sorry.\")\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. Also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi', 'mali'):\n c_options['use_opengl_es2'] = True\nelif c_options['use_opengl_es2'] is None:\n c_options['use_opengl_es2'] = \\\n environ.get('KIVY_GRAPHICS', '').lower() == 'gles'\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n c_options['use_ios'] = True\n c_options['use_sdl2'] = True\n\nelif platform == 'darwin':\n if c_options['use_osx_frameworks']:\n if osx_arch == \"i386\":\n print(\"Warning: building with frameworks fail on i386\")\n else:\n print(\"OSX framework used, force to x86_64 only\")\n environ[\"ARCHFLAGS\"] = environ.get(\"ARCHFLAGS\", \"-arch x86_64\")\n print(\"OSX ARCHFLAGS are: {}\".format(environ[\"ARCHFLAGS\"]))\n\n# detect gstreamer, only on desktop\n# works if we forced the options or in autodetection\nif platform not in ('ios', 'android') and (c_options['use_gstreamer']\n in (None, True)):\n gstreamer_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n f_path = '/Library/Frameworks/GStreamer.framework'\n if not exists(f_path):\n c_options['use_gstreamer'] = False\n print('GStreamer framework not found, fallback on pkg-config')\n else:\n print('GStreamer framework found')\n gstreamer_valid = True\n c_options['use_gstreamer'] = True\n gst_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190',\n '-framework', 'GStreamer'],\n 'include_dirs': [join(f_path, 'Headers')]}\n\n if not gstreamer_valid:\n # use pkg-config approach instead\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n print('GStreamer found via pkg-config')\n c_options['use_gstreamer'] = True\n\n\n# detect SDL2, only on desktop and iOS, or android if explicitly enabled\n# works if we forced the options or in autodetection\nsdl2_flags = {}\nif c_options['use_sdl2'] or (\n platform not in ('android',) and c_options['use_sdl2'] is None):\n\n sdl2_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n sdl2_valid = True\n sdl2_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190'],\n 'include_dirs': [],\n 'extra_compile_args': ['-F/Library/Frameworks']\n }\n for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):\n f_path = '/Library/Frameworks/{}.framework'.format(name)\n if not exists(f_path):\n print('Missing framework {}'.format(f_path))\n sdl2_valid = False\n continue\n sdl2_flags['extra_link_args'] += ['-framework', name]\n sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]\n print('Found sdl2 frameworks: {}'.format(f_path))\n if name == 'SDL2_mixer':\n _check_and_fix_sdl2_mixer(f_path)\n\n if not sdl2_valid:\n c_options['use_sdl2'] = False\n print('SDL2 frameworks not found, fallback on pkg-config')\n else:\n c_options['use_sdl2'] = True\n print('Activate SDL2 compilation')\n\n if not sdl2_valid and platform != \"ios\":\n # use pkg-config approach instead\n sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n if 'libraries' in sdl2_flags:\n print('SDL2 found via pkg-config')\n c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(root, *args):\n return join(root, 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [join(src_path, 'kivy', 'include')],\n 'library_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'include')]\n flags['library_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'lib')]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using OS X{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(\n xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n elif platform == 'win32':\n flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]\n flags['library_dirs'] += [join(sys.prefix, \"libs\")]\n return flags\n\n\ndef determine_gl_flags():\n kivy_graphics_include = join(src_path, 'kivy', 'include')\n flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n if c_options['use_opengl_mock']:\n return flags, base_flags\n if platform == 'win32':\n flags['libraries'] = ['opengl32', 'glew32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['library_dirs'] = ['/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = [\n '/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']\n elif platform == 'mali':\n flags['include_dirs'] = ['/usr/include/']\n flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']\n flags['libraries'] = ['GLESv2']\n c_options['use_x11'] = True\n c_options['use_egl'] = True\n else:\n flags['libraries'] = ['GL']\n return flags, base_flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path and platform == 'darwin':\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n split_chr = ';' if platform == 'win32' else ':'\n sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []\n\n if not sdl2_paths:\n sdl_inc = join(sys.prefix, 'include', 'SDL2')\n if isdir(sdl_inc):\n sdl2_paths = [sdl_inc]\n sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['include_dirs'] = sdl2_paths\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['library_dirs'] = (\n sdl2_paths if sdl2_paths else\n ['/usr/local/lib/'])\n\n if sdl2_flags:\n flags = merge(flags, sdl2_flags)\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags, gl_flags_base = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],\n 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'cgl.pyx': ['cgl.pxd'],\n 'cgl_mock.pyx': ['cgl.pxd'],\n 'cgl_sdl2.pyx': ['cgl.pxd'],\n 'cgl_gl.pyx': ['cgl.pxd'],\n 'cgl_glew.pyx': ['cgl.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': [\n 'opengl_utils_def.pxi', 'cgl.pxd', ],\n 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd',\n 'gl_debug_logger.pxi'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'gl_debug_logger.pxi'],\n 'scissor_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['cgl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'cgl.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi', 'gl_debug_logger.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'context.pxd',\n 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],\n 'vertex.pxd': ['cgl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',\n 'instructions.pxd', 'vertex_instructions.pxd',\n 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n '_clock.pyx': {},\n 'weakproxy.pyx': {},\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context.pyx': merge(base_flags, gl_flags_base),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),\n 'graphics/shader.pyx': merge(base_flags, gl_flags_base),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/texture.pyx': merge(base_flags, gl_flags_base),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),\n 'core/text/text_layout.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags_base)\n}\n\nif c_options[\"use_sdl2\"]:\n sdl2_flags = determine_sdl2()\n\nif c_options['use_sdl2'] and sdl2_flags:\n sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(\n sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)\n sdl2_depends = {'depends': ['lib/sdl2.pxi']}\n for source_file in ('core/window/_window_sdl2.pyx',\n 'core/image/_img_sdl2.pyx',\n 'core/text/_text_sdl2.pyx',\n 'core/audio/audio_sdl2.pyx',\n 'core/clipboard/_clipboard_sdl2.pyx'):\n sources[source_file] = merge(\n base_flags, sdl2_flags, sdl2_depends)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n libs = ['Xrender', 'X11']\n if c_options['use_egl']:\n libs += ['EGL']\n else:\n libs += ['GL']\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n # 'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': libs})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n\n deps_final = []\n paths_to_test = ['graphics', 'include']\n for dep in deps:\n found = False\n for path in paths_to_test:\n filename = expand(src_path, path, dep)\n if exists(filename):\n deps_final.append(filename)\n found = True\n break\n if not found:\n print('ERROR: Dependency for {} not resolved: {}'.format(\n fn, dep\n ))\n\n return deps_final\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(src_path, pyx)\n depends = [expand(src_path, x) for x in flags.pop('depends', [])]\n c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(\n module_name, [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\n\next_modules = get_extensions_from_sources(sources)\n\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\nsplit_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',\n 'glsl', 'zip')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if directory not in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\nbinary_deps = []\nbinary_deps_path = join(src_path, 'kivy', 'binary_deps')\nif isdir(binary_deps_path):\n for root, dirnames, filenames in walk(binary_deps_path):\n for fname in filenames:\n binary_deps.append(\n join(root.replace(binary_deps_path, 'binary_deps'), fname))\n\n# -----------------------------------------------------------------------------\n# setup !\nif not build_examples:\n setup(\n name='Kivy',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.adapters',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.deps',\n 'kivy.effects',\n 'kivy.graphics',\n 'kivy.graphics.cgl_backend',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lang',\n 'kivy.lib',\n 'kivy.lib.osc',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tests',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.uix',\n 'kivy.uix.behaviors',\n 'kivy.uix.recycleview',\n ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n '*.pxd',\n '*.pxi',\n 'core/text/*.pxd',\n 'core/text/*.pxi',\n 'graphics/*.pxd',\n 'graphics/*.pxi',\n 'graphics/*.h',\n 'include/*',\n 'lib/vidcore_lite/*.pxd',\n 'lib/vidcore_lite/*.pxi',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tests/*.zip',\n 'tests/*.kv',\n 'tests/*.png',\n 'tests/*.ttf',\n 'tests/*.ogg',\n 'tools/gles_compat/*',\n 'tools/highlight/*',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh',\n 'tools/pep8checker/*',\n 'tools/theming/defaulttheme/*',\n ] + binary_deps},\n data_files=[] if split_examples else list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n ('Topic :: Software Development :: Libraries :: '\n 'Application Frameworks'),\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=['Kivy-Garden>=0.1.4', 'docutils', 'pygments'],\n setup_requires=[\n 'cython>=' + MIN_CYTHON_STRING\n ] if not skip_cython else [])\nelse:\n setup(\n name='Kivy-examples',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=('Kivy examples.'),\n data_files=list(examples.items()))\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 72c8c001af..56da910481 100644 --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ def get_version(filename='kivy/version.py'): GIT_REVISION = check_output( ['git', 'rev-parse', 'HEAD'] ).strip().decode('ascii') - except CalledProcessError: + except (CalledProcessError, FileNotFoundError): GIT_REVISION = "Unknown" cnt = (
litestar-org__litestar-1610
StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar, cast\n\nfrom litestar._parsers import parse_cookie_string, parse_headers, parse_query_string\nfrom litestar.datastructures.headers import Headers\nfrom litestar.datastructures.multi_dicts import MultiDict\nfrom litestar.datastructures.state import State\nfrom litestar.datastructures.url import URL, Address, make_absolute_url\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.types.empty import Empty\n\n__all__ = (\"ASGIConnection\", \"empty_receive\", \"empty_send\")\n\n\nif TYPE_CHECKING:\n from typing import NoReturn\n\n from pydantic import BaseModel\n\n from litestar.app import Litestar\n from litestar.types import EmptyType\n from litestar.types.asgi_types import Message, Receive, Scope, Send\n from litestar.types.protocols import Logger\n\nUserT = TypeVar(\"UserT\")\nAuthT = TypeVar(\"AuthT\")\nHandlerT = TypeVar(\"HandlerT\")\nStateT = TypeVar(\"StateT\", bound=State)\n\n\nasync def empty_receive() -> NoReturn: # pragma: no cover\n \"\"\"Raise a ``RuntimeError``.\n\n Serves as a placeholder ``send`` function.\n\n Raises:\n RuntimeError\n \"\"\"\n raise RuntimeError()\n\n\nasync def empty_send(_: Message) -> NoReturn: # pragma: no cover\n \"\"\"Raise a ``RuntimeError``.\n\n Serves as a placeholder ``send`` function.\n\n Args:\n _: An ASGI message\n\n Raises:\n RuntimeError\n \"\"\"\n raise RuntimeError()\n\n\nclass ASGIConnection(Generic[HandlerT, UserT, AuthT, StateT]):\n \"\"\"The base ASGI connection container.\"\"\"\n\n __slots__ = (\"scope\", \"receive\", \"send\", \"_base_url\", \"_url\", \"_parsed_query\", \"_headers\", \"_cookies\")\n\n scope: Scope\n \"\"\"The ASGI scope attached to the connection.\"\"\"\n receive: Receive\n \"\"\"The ASGI receive function.\"\"\"\n send: Send\n \"\"\"The ASGI send function.\"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send) -> None:\n \"\"\"Initialize ``ASGIConnection``.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n \"\"\"\n self.scope = scope\n self.receive = receive\n self.send = send\n self._base_url: Any = scope.get(\"_base_url\", Empty)\n self._url: Any = scope.get(\"_url\", Empty)\n self._parsed_query: Any = scope.get(\"_parsed_query\", Empty)\n self._cookies: Any = scope.get(\"_cookies\", Empty)\n self._headers: Any = scope.get(\"_headers\", Empty)\n\n @property\n def app(self) -> Litestar:\n \"\"\"Return the ``app`` for this connection.\n\n Returns:\n The :class:`Litestar <litestar.app.Litestar>` application instance\n \"\"\"\n return self.scope[\"app\"]\n\n @property\n def route_handler(self) -> HandlerT:\n \"\"\"Return the ``route_handler`` for this connection.\n\n Returns:\n The target route handler instance.\n \"\"\"\n return cast(\"HandlerT\", self.scope[\"route_handler\"])\n\n @property\n def state(self) -> StateT:\n \"\"\"Return the ``State`` of this connection.\n\n Returns:\n A State instance constructed from the scope[\"state\"] value.\n \"\"\"\n return cast(\"StateT\", State(self.scope[\"state\"]))\n\n @property\n def url(self) -> URL:\n \"\"\"Return the URL of this connection's ``Scope``.\n\n Returns:\n A URL instance constructed from the request's scope.\n \"\"\"\n if self._url is Empty:\n self._url = self.scope[\"_url\"] = URL.from_scope(self.scope) # type: ignore[typeddict-unknown-key]\n\n return cast(\"URL\", self._url)\n\n @property\n def base_url(self) -> URL:\n \"\"\"Return the base URL of this connection's ``Scope``.\n\n Returns:\n A URL instance constructed from the request's scope, representing only the base part\n (host + domain + prefix) of the request.\n \"\"\"\n if self._base_url is Empty:\n scope = {\n **self.scope,\n \"path\": \"/\",\n \"query_string\": b\"\",\n \"root_path\": self.scope.get(\"app_root_path\") or self.scope.get(\"root_path\", \"\"),\n }\n self._base_url = self.scope[\"_base_url\"] = URL.from_scope(cast(\"Scope\", scope)) # type: ignore[typeddict-unknown-key]\n\n return cast(\"URL\", self._base_url)\n\n @property\n def headers(self) -> Headers:\n \"\"\"Return the headers of this connection's ``Scope``.\n\n Returns:\n A Headers instance with the request's scope[\"headers\"] value.\n \"\"\"\n if self._headers is Empty:\n self.scope.setdefault(\"headers\", [])\n self._headers = self.scope[\"_headers\"] = parse_headers(tuple(self.scope[\"headers\"])) # type: ignore[typeddict-unknown-key]\n\n return Headers(self._headers)\n\n @property\n def query_params(self) -> MultiDict:\n \"\"\"Return the query parameters of this connection's ``Scope``.\n\n Returns:\n A normalized dict of query parameters. Multiple values for the same key are returned as a list.\n \"\"\"\n if self._parsed_query is Empty:\n self._parsed_query = self.scope[\"_parsed_query\"] = parse_query_string(self.scope.get(\"query_string\", b\"\")) # type: ignore\n\n return MultiDict(self._parsed_query)\n\n @property\n def path_params(self) -> dict[str, Any]:\n \"\"\"Return the ``path_params`` of this connection's ``Scope``.\n\n Returns:\n A string keyed dictionary of path parameter values.\n \"\"\"\n return self.scope[\"path_params\"]\n\n @property\n def cookies(self) -> dict[str, str]:\n \"\"\"Return the ``cookies`` of this connection's ``Scope``.\n\n Returns:\n Returns any cookies stored in the header as a parsed dictionary.\n \"\"\"\n if self._cookies is Empty:\n cookies: dict[str, str] = {}\n cookie_header = self.headers.get(\"cookie\")\n\n if cookie_header:\n cookies = parse_cookie_string(cookie_header)\n\n self._cookies = self.scope[\"_cookies\"] = cookies # type: ignore[typeddict-unknown-key]\n\n return cast(\"dict[str, str]\", self._cookies)\n\n @property\n def client(self) -> Address | None:\n \"\"\"Return the ``client`` data of this connection's ``Scope``.\n\n Returns:\n A two tuple of the host name and port number.\n \"\"\"\n client = self.scope.get(\"client\")\n return Address(*client) if client else None\n\n @property\n def auth(self) -> AuthT:\n \"\"\"Return the ``auth`` data of this connection's ``Scope``.\n\n Raises:\n ImproperlyConfiguredException: If ``auth`` is not set in scope via an ``AuthMiddleware``, raises an exception\n\n Returns:\n A type correlating to the generic variable Auth.\n \"\"\"\n if \"auth\" not in self.scope:\n raise ImproperlyConfiguredException(\"'auth' is not defined in scope, install an AuthMiddleware to set it\")\n\n return cast(\"AuthT\", self.scope[\"auth\"])\n\n @property\n def user(self) -> UserT:\n \"\"\"Return the ``user`` data of this connection's ``Scope``.\n\n Raises:\n ImproperlyConfiguredException: If ``user`` is not set in scope via an ``AuthMiddleware``, raises an exception\n\n Returns:\n A type correlating to the generic variable User.\n \"\"\"\n if \"user\" not in self.scope:\n raise ImproperlyConfiguredException(\"'user' is not defined in scope, install an AuthMiddleware to set it\")\n\n return cast(\"UserT\", self.scope[\"user\"])\n\n @property\n def session(self) -> dict[str, Any]:\n \"\"\"Return the session for this connection if a session was previously set in the ``Scope``\n\n Returns:\n A dictionary representing the session value - if existing.\n\n Raises:\n ImproperlyConfiguredException: if session is not set in scope.\n \"\"\"\n if \"session\" not in self.scope:\n raise ImproperlyConfiguredException(\n \"'session' is not defined in scope, install a SessionMiddleware to set it\"\n )\n\n return cast(\"dict[str, Any]\", self.scope[\"session\"])\n\n @property\n def logger(self) -> Logger:\n \"\"\"Return the ``Logger`` instance for this connection.\n\n Returns:\n A ``Logger`` instance.\n\n Raises:\n ImproperlyConfiguredException: if ``log_config`` has not been passed to the Litestar constructor.\n \"\"\"\n return self.app.get_logger()\n\n def set_session(self, value: dict[str, Any] | BaseModel | EmptyType) -> None:\n \"\"\"Set the session in the connection's ``Scope``.\n\n If the :class:`SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, the session will be added\n to the response as a cookie header.\n\n Args:\n value: Dictionary or pydantic model instance for the session data.\n\n Returns:\n None.\n \"\"\"\n self.scope[\"session\"] = value\n\n def clear_session(self) -> None:\n \"\"\"Remove the session from the connection's ``Scope``.\n\n If the :class:`Litestar SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, this will cause\n the session data to be cleared.\n\n Returns:\n None.\n \"\"\"\n self.scope[\"session\"] = Empty\n\n def url_for(self, name: str, **path_parameters: dict[str, Any]) -> str:\n \"\"\"Return the url for a given route handler name.\n\n Args:\n name: The ``name`` of the request route handler.\n **path_parameters: Values for path parameters in the route\n\n Raises:\n NoRouteMatchFoundException: If route with ``name`` does not exist, path parameters are missing or have a\n wrong type.\n\n Returns:\n A string representing the absolute url of the route handler.\n \"\"\"\n litestar_instance = self.scope[\"app\"]\n url_path = litestar_instance.route_reverse(name, **path_parameters)\n\n return make_absolute_url(url_path, self.base_url)\n\n def url_for_static_asset(self, name: str, file_path: str) -> str:\n \"\"\"Receives a static files handler name, an asset file path and returns resolved absolute url to the asset.\n\n Args:\n name: A static handler unique name.\n file_path: a string containing path to an asset.\n\n Raises:\n NoRouteMatchFoundException: If static files handler with ``name`` does not exist.\n\n Returns:\n A string representing absolute url to the asset.\n \"\"\"\n litestar_instance = self.scope[\"app\"]\n url_path = litestar_instance.url_for_static_asset(name, file_path)\n\n return make_absolute_url(url_path, self.base_url)\n", "path": "litestar/connection/base.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar, cast\n\nfrom litestar._parsers import parse_cookie_string, parse_headers, parse_query_string\nfrom litestar.datastructures.headers import Headers\nfrom litestar.datastructures.multi_dicts import MultiDict\nfrom litestar.datastructures.state import State\nfrom litestar.datastructures.url import URL, Address, make_absolute_url\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.types.empty import Empty\n\n__all__ = (\"ASGIConnection\", \"empty_receive\", \"empty_send\")\n\n\nif TYPE_CHECKING:\n from typing import NoReturn\n\n from pydantic import BaseModel\n\n from litestar.app import Litestar\n from litestar.types import EmptyType\n from litestar.types.asgi_types import Message, Receive, Scope, Send\n from litestar.types.protocols import Logger\n\nUserT = TypeVar(\"UserT\")\nAuthT = TypeVar(\"AuthT\")\nHandlerT = TypeVar(\"HandlerT\")\nStateT = TypeVar(\"StateT\", bound=State)\n\n\nasync def empty_receive() -> NoReturn: # pragma: no cover\n \"\"\"Raise a ``RuntimeError``.\n\n Serves as a placeholder ``send`` function.\n\n Raises:\n RuntimeError\n \"\"\"\n raise RuntimeError()\n\n\nasync def empty_send(_: Message) -> NoReturn: # pragma: no cover\n \"\"\"Raise a ``RuntimeError``.\n\n Serves as a placeholder ``send`` function.\n\n Args:\n _: An ASGI message\n\n Raises:\n RuntimeError\n \"\"\"\n raise RuntimeError()\n\n\nclass ASGIConnection(Generic[HandlerT, UserT, AuthT, StateT]):\n \"\"\"The base ASGI connection container.\"\"\"\n\n __slots__ = (\"scope\", \"receive\", \"send\", \"_base_url\", \"_url\", \"_parsed_query\", \"_headers\", \"_cookies\")\n\n scope: Scope\n \"\"\"The ASGI scope attached to the connection.\"\"\"\n receive: Receive\n \"\"\"The ASGI receive function.\"\"\"\n send: Send\n \"\"\"The ASGI send function.\"\"\"\n\n def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send) -> None:\n \"\"\"Initialize ``ASGIConnection``.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n \"\"\"\n self.scope = scope\n self.receive = receive\n self.send = send\n self._base_url: Any = scope.get(\"_base_url\", Empty)\n self._url: Any = scope.get(\"_url\", Empty)\n self._parsed_query: Any = scope.get(\"_parsed_query\", Empty)\n self._cookies: Any = scope.get(\"_cookies\", Empty)\n self._headers: Any = scope.get(\"_headers\", Empty)\n\n @property\n def app(self) -> Litestar:\n \"\"\"Return the ``app`` for this connection.\n\n Returns:\n The :class:`Litestar <litestar.app.Litestar>` application instance\n \"\"\"\n return self.scope[\"app\"]\n\n @property\n def route_handler(self) -> HandlerT:\n \"\"\"Return the ``route_handler`` for this connection.\n\n Returns:\n The target route handler instance.\n \"\"\"\n return cast(\"HandlerT\", self.scope[\"route_handler\"])\n\n @property\n def state(self) -> StateT:\n \"\"\"Return the ``State`` of this connection.\n\n Returns:\n A State instance constructed from the scope[\"state\"] value.\n \"\"\"\n return cast(\"StateT\", State(self.scope[\"state\"]))\n\n @property\n def url(self) -> URL:\n \"\"\"Return the URL of this connection's ``Scope``.\n\n Returns:\n A URL instance constructed from the request's scope.\n \"\"\"\n if self._url is Empty:\n self._url = self.scope[\"_url\"] = URL.from_scope(self.scope) # type: ignore[typeddict-unknown-key]\n\n return cast(\"URL\", self._url)\n\n @property\n def base_url(self) -> URL:\n \"\"\"Return the base URL of this connection's ``Scope``.\n\n Returns:\n A URL instance constructed from the request's scope, representing only the base part\n (host + domain + prefix) of the request.\n \"\"\"\n if self._base_url is Empty:\n scope = {\n **self.scope,\n \"path\": \"/\",\n \"query_string\": b\"\",\n \"root_path\": self.scope.get(\"app_root_path\") or self.scope.get(\"root_path\", \"\"),\n }\n self._base_url = self.scope[\"_base_url\"] = URL.from_scope(cast(\"Scope\", scope)) # type: ignore[typeddict-unknown-key]\n\n return cast(\"URL\", self._base_url)\n\n @property\n def headers(self) -> Headers:\n \"\"\"Return the headers of this connection's ``Scope``.\n\n Returns:\n A Headers instance with the request's scope[\"headers\"] value.\n \"\"\"\n if self._headers is Empty:\n self.scope.setdefault(\"headers\", [])\n self._headers = self.scope[\"_headers\"] = parse_headers(tuple(self.scope[\"headers\"])) # type: ignore[typeddict-unknown-key]\n\n return Headers(self._headers)\n\n @property\n def query_params(self) -> MultiDict:\n \"\"\"Return the query parameters of this connection's ``Scope``.\n\n Returns:\n A normalized dict of query parameters. Multiple values for the same key are returned as a list.\n \"\"\"\n if self._parsed_query is Empty:\n self._parsed_query = self.scope[\"_parsed_query\"] = parse_query_string(self.scope.get(\"query_string\", b\"\")) # type: ignore\n\n return MultiDict(self._parsed_query)\n\n @property\n def path_params(self) -> dict[str, Any]:\n \"\"\"Return the ``path_params`` of this connection's ``Scope``.\n\n Returns:\n A string keyed dictionary of path parameter values.\n \"\"\"\n return self.scope[\"path_params\"]\n\n @property\n def cookies(self) -> dict[str, str]:\n \"\"\"Return the ``cookies`` of this connection's ``Scope``.\n\n Returns:\n Returns any cookies stored in the header as a parsed dictionary.\n \"\"\"\n if self._cookies is Empty:\n cookies: dict[str, str] = {}\n cookie_header = self.headers.get(\"cookie\")\n\n if cookie_header:\n cookies = parse_cookie_string(cookie_header)\n\n self._cookies = self.scope[\"_cookies\"] = cookies # type: ignore[typeddict-unknown-key]\n\n return cast(\"dict[str, str]\", self._cookies)\n\n @property\n def client(self) -> Address | None:\n \"\"\"Return the ``client`` data of this connection's ``Scope``.\n\n Returns:\n A two tuple of the host name and port number.\n \"\"\"\n client = self.scope.get(\"client\")\n return Address(*client) if client else None\n\n @property\n def auth(self) -> AuthT:\n \"\"\"Return the ``auth`` data of this connection's ``Scope``.\n\n Raises:\n ImproperlyConfiguredException: If ``auth`` is not set in scope via an ``AuthMiddleware``, raises an exception\n\n Returns:\n A type correlating to the generic variable Auth.\n \"\"\"\n if \"auth\" not in self.scope:\n raise ImproperlyConfiguredException(\"'auth' is not defined in scope, install an AuthMiddleware to set it\")\n\n return cast(\"AuthT\", self.scope[\"auth\"])\n\n @property\n def user(self) -> UserT:\n \"\"\"Return the ``user`` data of this connection's ``Scope``.\n\n Raises:\n ImproperlyConfiguredException: If ``user`` is not set in scope via an ``AuthMiddleware``, raises an exception\n\n Returns:\n A type correlating to the generic variable User.\n \"\"\"\n if \"user\" not in self.scope:\n raise ImproperlyConfiguredException(\"'user' is not defined in scope, install an AuthMiddleware to set it\")\n\n return cast(\"UserT\", self.scope[\"user\"])\n\n @property\n def session(self) -> dict[str, Any]:\n \"\"\"Return the session for this connection if a session was previously set in the ``Scope``\n\n Returns:\n A dictionary representing the session value - if existing.\n\n Raises:\n ImproperlyConfiguredException: if session is not set in scope.\n \"\"\"\n if \"session\" not in self.scope:\n raise ImproperlyConfiguredException(\n \"'session' is not defined in scope, install a SessionMiddleware to set it\"\n )\n\n return cast(\"dict[str, Any]\", self.scope[\"session\"])\n\n @property\n def logger(self) -> Logger:\n \"\"\"Return the ``Logger`` instance for this connection.\n\n Returns:\n A ``Logger`` instance.\n\n Raises:\n ImproperlyConfiguredException: if ``log_config`` has not been passed to the Litestar constructor.\n \"\"\"\n return self.app.get_logger()\n\n def set_session(self, value: dict[str, Any] | BaseModel | EmptyType) -> None:\n \"\"\"Set the session in the connection's ``Scope``.\n\n If the :class:`SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, the session will be added\n to the response as a cookie header.\n\n Args:\n value: Dictionary or pydantic model instance for the session data.\n\n Returns:\n None.\n \"\"\"\n self.scope[\"session\"] = value\n\n def clear_session(self) -> None:\n \"\"\"Remove the session from the connection's ``Scope``.\n\n If the :class:`Litestar SessionMiddleware <.middleware.session.base.SessionMiddleware>` is enabled, this will cause\n the session data to be cleared.\n\n Returns:\n None.\n \"\"\"\n self.scope[\"session\"] = Empty\n\n def url_for(self, name: str, **path_parameters: Any) -> str:\n \"\"\"Return the url for a given route handler name.\n\n Args:\n name: The ``name`` of the request route handler.\n **path_parameters: Values for path parameters in the route\n\n Raises:\n NoRouteMatchFoundException: If route with ``name`` does not exist, path parameters are missing or have a\n wrong type.\n\n Returns:\n A string representing the absolute url of the route handler.\n \"\"\"\n litestar_instance = self.scope[\"app\"]\n url_path = litestar_instance.route_reverse(name, **path_parameters)\n\n return make_absolute_url(url_path, self.base_url)\n\n def url_for_static_asset(self, name: str, file_path: str) -> str:\n \"\"\"Receives a static files handler name, an asset file path and returns resolved absolute url to the asset.\n\n Args:\n name: A static handler unique name.\n file_path: a string containing path to an asset.\n\n Raises:\n NoRouteMatchFoundException: If static files handler with ``name`` does not exist.\n\n Returns:\n A string representing absolute url to the asset.\n \"\"\"\n litestar_instance = self.scope[\"app\"]\n url_path = litestar_instance.url_for_static_asset(name, file_path)\n\n return make_absolute_url(url_path, self.base_url)\n", "path": "litestar/connection/base.py" } ]
diff --git a/litestar/connection/base.py b/litestar/connection/base.py index c604e3471f..161eb0f606 100644 --- a/litestar/connection/base.py +++ b/litestar/connection/base.py @@ -287,7 +287,7 @@ def clear_session(self) -> None: """ self.scope["session"] = Empty - def url_for(self, name: str, **path_parameters: dict[str, Any]) -> str: + def url_for(self, name: str, **path_parameters: Any) -> str: """Return the url for a given route handler name. Args:
liberapay__liberapay.com-844
Can't cancel a bank wire payin Users who initiate a bank wire payin can't cancel it if they change their mind or realize that their bank doesn't allow them to transfer euros abroad. It's not possible to cancel a bank wire payin on MangoPay, which is why we don't already have this feature, but we should be able to implement a fake cancellation on our side: - ~~[ ] add a `canceled` status to `exchanges` (there will be some code to modify, e.g. `git grep "'failed'"`)~~ - [x] add a "Cancel" button to the bankwire page which changes the exchange status to `failed` with "Canceled by the user" in the `note` column ~~Later, maybe:~~ - ~~[ ] identify bank wires that should have been canceled and change their status~~ - ~~[ ] send the list of all canceled bank wires to MangoPay so they can manually mark them as failed on their side~~ *Edit: no longer necessary now that bank wire payins expire automatically after a month. However we'll need something else instead:* - [ ] ~~ignore mangopay's expiration notifications for "canceled" bank wires~~ *not necessary if we use the `failed` status instead of creating a `canceled` one* Mangopay account switch can mess up the wallets table Which in turn can crash payday: <https://github.com/liberapay/salon/issues/181#issuecomment-347861123>.
[ { "content": "\"\"\"Functions for moving money into, out of, or between wallets.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom decimal import Decimal\nfrom time import sleep\n\nfrom mangopay.exceptions import APIError\nfrom mangopay.resources import (\n BankAccount, BankWirePayIn, BankWirePayOut, DirectPayIn, DirectDebitDirectPayIn,\n SettlementTransfer, Transfer, User, Wallet,\n)\nfrom mangopay.utils import Money\n\nfrom liberapay.billing.fees import (\n skim_bank_wire, skim_credit, upcharge_card, upcharge_direct_debit\n)\nfrom liberapay.constants import FEE_PAYOUT_WARN, QUARANTINE\nfrom liberapay.exceptions import (\n NegativeBalance, NotEnoughWithdrawableMoney, PaydayIsRunning,\n FeeExceedsAmount, TransactionFeeTooHigh, TransferError,\n AccountSuspended, Redirect,\n)\nfrom liberapay.models import check_db\nfrom liberapay.models.participant import Participant\nfrom liberapay.models.exchange_route import ExchangeRoute\nfrom liberapay.utils import group_by, NS\n\n\nQUARANTINE = '%s days' % QUARANTINE.days\n\n\ndef repr_error(o):\n r = o.ResultCode\n if r == '000000':\n return\n msg = getattr(o, 'ResultMessage', None)\n if msg:\n r += ': ' + msg\n return r\n\n\ndef repr_exception(e):\n if isinstance(e, APIError):\n return '%s %s' % (e.code, e.args[0])\n else:\n return repr(e)\n\n\ndef create_wallet(db, participant, currency):\n w = Wallet()\n w.Owners = [participant.mangopay_user_id]\n w.Description = str(participant.id)\n w.Currency = currency\n w.save()\n return db.one(\"\"\"\n INSERT INTO wallets\n (remote_id, balance, owner, remote_owner_id)\n VALUES (%s, %s, %s, %s)\n RETURNING *\n \"\"\", (w.Id, w.Balance, participant.id, participant.mangopay_user_id))\n\n\ndef test_hook():\n return\n\n\ndef payout(db, route, amount, ignore_high_fee=False):\n \"\"\"Withdraw money to the specified bank account (`route`).\n \"\"\"\n assert amount > 0\n assert route\n assert route.network == 'mango-ba'\n\n participant = route.participant\n if participant.is_suspended:\n raise AccountSuspended()\n\n payday = db.one(\"SELECT * FROM paydays WHERE ts_start > ts_end\")\n if payday:\n raise PaydayIsRunning\n\n ba = BankAccount.get(route.address, user_id=participant.mangopay_user_id)\n\n # Do final calculations\n amount = Money(amount, 'EUR') if isinstance(amount, Decimal) else amount\n credit_amount, fee, vat = skim_credit(amount, ba)\n if credit_amount <= 0 and fee > 0:\n raise FeeExceedsAmount\n fee_percent = fee / amount\n if fee_percent > FEE_PAYOUT_WARN and not ignore_high_fee:\n raise TransactionFeeTooHigh(fee_percent, fee, amount)\n\n # Try to dance with MangoPay\n e_id = record_exchange(db, route, -credit_amount, fee, vat, participant, 'pre').id\n payout = BankWirePayOut()\n payout.AuthorId = participant.mangopay_user_id\n payout.DebitedFunds = amount.int()\n payout.DebitedWalletId = participant.get_current_wallet(amount.currency).remote_id\n payout.Fees = fee.int()\n payout.BankAccountId = route.address\n payout.BankWireRef = str(e_id)\n payout.Tag = str(e_id)\n try:\n test_hook()\n payout.save()\n return record_exchange_result(\n db, e_id, payout.Id, payout.Status.lower(), repr_error(payout), participant\n )\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n\ndef charge(db, route, amount, return_url):\n \"\"\"Charge the given credit card (`route`).\n\n Amount should be the nominal amount. We'll compute fees below this function\n and add it to amount to end up with charge_amount.\n\n \"\"\"\n assert isinstance(amount, (Decimal, Money)), type(amount)\n assert route\n assert route.network == 'mango-cc'\n\n participant = route.participant\n\n amount = Money(amount, 'EUR') if isinstance(amount, Decimal) else amount\n charge_amount, fee, vat = upcharge_card(amount)\n amount = charge_amount - fee\n\n wallet = participant.get_current_wallet(amount.currency, create=True)\n e_id = record_exchange(db, route, amount, fee, vat, participant, 'pre').id\n payin = DirectPayIn()\n payin.AuthorId = participant.mangopay_user_id\n payin.CreditedWalletId = wallet.remote_id\n payin.DebitedFunds = charge_amount.int()\n payin.CardId = route.address\n payin.SecureModeReturnURL = return_url\n payin.Fees = fee.int()\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n if payin.SecureModeRedirectURL:\n raise Redirect(payin.SecureModeRedirectURL)\n\n return record_exchange_result(\n db, e_id, payin.Id, payin.Status.lower(), repr_error(payin), participant\n )\n\n\ndef prepare_direct_debit(db, route, amount):\n \"\"\"Prepare to debit a bank account.\n \"\"\"\n assert isinstance(amount, (Decimal, Money)), type(amount)\n\n assert route.network == 'mango-ba'\n\n participant = route.participant\n\n amount = Money(amount, 'EUR') if isinstance(amount, Decimal) else amount\n debit_amount, fee, vat = upcharge_direct_debit(amount)\n amount = debit_amount - fee\n\n status = 'pre' if route.mandate else 'pre-mandate'\n return record_exchange(db, route, amount, fee, vat, participant, status)\n\n\ndef execute_direct_debit(db, exchange, route):\n \"\"\"Execute a prepared direct debit.\n \"\"\"\n assert exchange.route == route.id\n assert route\n assert route.network == 'mango-ba'\n assert route.mandate\n\n participant = route.participant\n assert exchange.participant == participant.id\n\n if exchange.status == 'pre-mandate':\n exchange = db.one(\"\"\"\n UPDATE exchanges\n SET status = 'pre'\n WHERE id = %s\n AND status = %s\n RETURNING *\n \"\"\", (exchange.id, exchange.status))\n assert exchange, 'race condition'\n\n assert exchange.status == 'pre'\n\n amount, fee = exchange.amount, exchange.fee\n debit_amount = amount + fee\n\n e_id = exchange.id\n payin = DirectDebitDirectPayIn()\n payin.AuthorId = participant.mangopay_user_id\n payin.CreditedWalletId = exchange.wallet_id\n payin.DebitedFunds = debit_amount.int()\n payin.MandateId = route.mandate\n payin.Fees = fee.int()\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n return record_exchange_result(\n db, e_id, payin.Id, payin.Status.lower(), repr_error(payin), participant\n )\n\n\ndef payin_bank_wire(db, participant, debit_amount):\n \"\"\"Prepare to receive a bank wire payin.\n\n The amount should be how much the user intends to send, not how much will\n arrive in the wallet.\n \"\"\"\n\n route = ExchangeRoute.upsert_bankwire_route(participant)\n\n if not isinstance(debit_amount, Money):\n debit_amount = Money(debit_amount, 'EUR')\n amount, fee, vat = skim_bank_wire(debit_amount)\n\n wallet = participant.get_current_wallet(amount.currency, create=True)\n e_id = record_exchange(db, route, amount, fee, vat, participant, 'pre').id\n payin = BankWirePayIn()\n payin.AuthorId = participant.mangopay_user_id\n payin.CreditedWalletId = wallet.remote_id\n payin.DeclaredDebitedFunds = debit_amount.int()\n payin.DeclaredFees = fee.int()\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return None, record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n e = record_exchange_result(\n db, e_id, payin.Id, payin.Status.lower(), repr_error(payin), participant\n )\n return payin, e\n\n\ndef record_unexpected_payin(db, payin):\n \"\"\"Record an unexpected bank wire payin.\n \"\"\"\n assert payin.PaymentType == 'BANK_WIRE'\n debited_amount = payin.DebitedFunds / Decimal(100)\n paid_fee = payin.Fees / Decimal(100)\n vat = skim_bank_wire(debited_amount)[2]\n wallet_id = payin.CreditedWalletId\n participant = Participant.from_mangopay_user_id(payin.AuthorId)\n current_wallet = participant.get_current_wallet(debited_amount.currency)\n assert current_wallet.remote_id == wallet_id\n route = ExchangeRoute.upsert_bankwire_route(participant)\n amount = debited_amount - paid_fee\n return db.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, remote_id, wallet_id)\n VALUES (%s, %s, %s, %s, 'created', %s, NULL, %s, %s)\n RETURNING id\n \"\"\", (amount, paid_fee, vat, participant.id, route.id, payin.Id, wallet_id))\n\n\ndef record_payout_refund(db, payout_refund):\n orig_payout = BankWirePayOut.get(payout_refund.InitialTransactionId)\n e_origin = db.one(\"SELECT * FROM exchanges WHERE id = %s\", (orig_payout.Tag,))\n e_refund_id = db.one(\"SELECT id FROM exchanges WHERE refund_ref = %s\", (e_origin.id,))\n if e_refund_id:\n # Already recorded\n return e_refund_id\n amount, fee, vat = -e_origin.amount, -e_origin.fee, -e_origin.vat\n assert payout_refund.DebitedFunds / 100 == amount\n assert payout_refund.Fees / 100 == fee\n route = ExchangeRoute.from_id(e_origin.route)\n participant = Participant.from_id(e_origin.participant)\n remote_id = payout_refund.Id\n wallet_id = e_origin.wallet_id\n return db.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, refund_ref, remote_id, wallet_id)\n VALUES (%s, %s, %s, %s, 'created', %s, NULL, %s, %s, %s)\n RETURNING id\n \"\"\", (amount, fee, vat, participant.id, route.id, e_origin.id, remote_id, wallet_id))\n\n\ndef record_exchange(db, route, amount, fee, vat, participant, status, error=None):\n \"\"\"Given a Bunch of Stuff, return an int (exchange_id).\n\n Records in the exchanges table have these characteristics:\n\n amount It's negative for credits (representing an outflow from\n Liberapay to you) and positive for charges.\n The sign is how we differentiate the two in, e.g., the\n history page.\n\n fee The payment processor's fee. It's always positive.\n\n vat The amount of VAT included in the fee. Always positive.\n\n \"\"\"\n assert status.startswith('pre')\n if participant.is_suspended:\n raise AccountSuspended()\n\n with db.get_cursor() as cursor:\n\n wallet_id = participant.get_current_wallet(amount.currency, create=True).remote_id\n e = cursor.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, wallet_id)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING *\n \"\"\", (amount, fee, vat, participant.id, status, route.id, error, wallet_id))\n\n if amount < 0:\n amount -= fee\n propagate_exchange(cursor, participant, e, '', amount)\n\n return e\n\n\ndef record_exchange_result(db, exchange_id, remote_id, status, error, participant):\n \"\"\"Updates the status of an exchange.\n \"\"\"\n with db.get_cursor() as cursor:\n e = cursor.one(\"\"\"\n UPDATE exchanges e\n SET status=%(status)s\n , note=%(error)s\n , remote_id=%(remote_id)s\n WHERE id=%(exchange_id)s\n AND status <> %(status)s\n RETURNING *\n \"\"\", locals())\n if not e:\n return\n assert participant.id == e.participant\n\n amount = e.amount\n if amount < 0:\n amount = -amount + max(e.fee, 0) if status == 'failed' else amount.zero()\n else:\n amount = amount - min(e.fee, 0) if status == 'succeeded' else amount.zero()\n propagate_exchange(cursor, participant, e, error, amount)\n\n return e\n\n\ndef propagate_exchange(cursor, participant, exchange, error, amount):\n \"\"\"Propagates an exchange's result to the participant's balance.\n \"\"\"\n wallet_id = exchange.wallet_id\n new_balance = cursor.one(\"\"\"\n UPDATE wallets\n SET balance = (balance + %s)\n WHERE remote_id = %s\n AND (balance + %s) >= 0\n RETURNING balance\n \"\"\", (amount, wallet_id, amount))\n\n if new_balance is None:\n raise NegativeBalance\n\n if amount < 0:\n bundles = cursor.all(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n SELECT b.*\n FROM cash_bundles b\n JOIN exchanges e ON e.id = b.origin\n WHERE b.owner = %s\n AND b.ts < now() - INTERVAL %s\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n AND b.amount::currency = %s\n ORDER BY b.owner = e.participant DESC, b.ts\n \"\"\", (participant.id, QUARANTINE, amount.currency))\n withdrawable = sum(b.amount for b in bundles)\n x = -amount\n if x > withdrawable:\n raise NotEnoughWithdrawableMoney(withdrawable)\n for b in bundles:\n if x >= b.amount:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET owner = NULL\n , withdrawal = %s\n , wallet_id = NULL\n WHERE id = %s\n \"\"\", (exchange.id, b.id))\n x -= b.amount\n if x == 0:\n break\n else:\n assert x > 0\n cursor.run(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, ts, amount, withdrawal, wallet_id)\n VALUES (NULL, %s, %s, %s, %s, NULL)\n \"\"\", (b.origin, b.ts, x, exchange.id))\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s\n \"\"\", (x, b.id))\n break\n elif amount > 0 and (exchange.amount < 0 or exchange.refund_ref):\n # failed withdrawal\n orig_exchange_id = exchange.refund_ref or exchange.id\n cursor.run(\"\"\"\n UPDATE cash_bundles b\n SET owner = %(p_id)s\n , withdrawal = NULL\n , wallet_id = %(wallet_id)s\n WHERE withdrawal = %(e_id)s\n \"\"\", dict(p_id=participant.id, e_id=orig_exchange_id, wallet_id=wallet_id))\n elif amount > 0:\n cursor.run(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, wallet_id)\n VALUES (%s, %s, %s, %s, %s)\n \"\"\", (participant.id, exchange.id, amount, exchange.timestamp, wallet_id))\n\n new_balance = cursor.one(\"SELECT recompute_balance(%s)\", (participant.id,))\n participant.set_attributes(balance=new_balance)\n\n if amount != 0:\n participant.update_giving_and_tippees(cursor)\n merge_cash_bundles(cursor, participant.id)\n\n\ndef transfer(db, tipper, tippee, amount, context, **kw):\n tipper_wallet = NS(remote_id=kw.get('tipper_wallet_id'), remote_owner_id=kw.get('tipper_mango_id'))\n if not all(tipper_wallet.__dict__.values()):\n tipper_wallet = Participant.from_id(tipper).get_current_wallet(amount.currency)\n tippee_wallet = NS(remote_id=kw.get('tippee_wallet_id'), remote_owner_id=kw.get('tippee_mango_id'))\n if not all(tippee_wallet.__dict__.values()):\n tippee_wallet = Participant.from_id(tippee).get_current_wallet(amount.currency, create=True)\n wallet_from = tipper_wallet.remote_id\n wallet_to = tippee_wallet.remote_id\n t_id = prepare_transfer(\n db, tipper, tippee, amount, context, wallet_from, wallet_to,\n team=kw.get('team'), invoice=kw.get('invoice'), bundles=kw.get('bundles'),\n )\n tr = Transfer()\n tr.AuthorId = tipper_wallet.remote_owner_id\n tr.CreditedUserId = tippee_wallet.remote_owner_id\n tr.CreditedWalletId = wallet_to\n tr.DebitedFunds = amount.int()\n tr.DebitedWalletId = wallet_from\n tr.Fees = Money(0, amount.currency)\n tr.Tag = str(t_id)\n tr.save()\n return record_transfer_result(db, t_id, tr), t_id\n\n\ndef prepare_transfer(db, tipper, tippee, amount, context, wallet_from, wallet_to,\n team=None, invoice=None, **kw):\n with db.get_cursor() as cursor:\n transfer = cursor.one(\"\"\"\n INSERT INTO transfers\n (tipper, tippee, amount, context, team, invoice, status,\n wallet_from, wallet_to)\n VALUES (%s, %s, %s, %s, %s, %s, 'pre',\n %s, %s)\n RETURNING *\n \"\"\", (tipper, tippee, amount, context, team, invoice, wallet_from, wallet_to))\n lock_bundles(cursor, transfer, **kw)\n return transfer.id\n\n\ndef lock_bundles(cursor, transfer, bundles=None, prefer_bundles_from=-1):\n assert transfer.status == 'pre'\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n tipper, tippee = transfer.tipper, transfer.tippee\n currency = transfer.amount.currency\n bundles = bundles or cursor.all(\"\"\"\n SELECT b.*\n FROM cash_bundles b\n JOIN exchanges e ON e.id = b.origin\n WHERE b.owner = %(tipper)s\n AND b.withdrawal IS NULL\n AND b.locked_for IS NULL\n AND b.amount::currency = %(currency)s\n ORDER BY b.origin = %(prefer_bundles_from)s DESC\n , e.participant = %(tippee)s DESC\n , b.ts\n \"\"\", locals())\n transferable = sum(b.amount for b in bundles)\n x = transfer.amount\n if x > transferable:\n raise NegativeBalance()\n for b in bundles:\n if x >= b.amount:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET locked_for = %s\n WHERE id = %s\n \"\"\", (transfer.id, b.id))\n x -= b.amount\n if x == 0:\n break\n else:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s;\n\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, locked_for, wallet_id)\n VALUES (%s, %s, %s, %s, %s, %s);\n \"\"\", (x, b.id, transfer.tipper, b.origin, x, b.ts, transfer.id, b.wallet_id))\n break\n\n\ndef record_transfer_result(db, t_id, tr):\n error = repr_error(tr)\n status = tr.Status.lower()\n assert (not error) ^ (status == 'failed')\n r = _record_transfer_result(db, t_id, status, error)\n if status == 'failed':\n raise TransferError(error)\n return r\n\n\ndef _record_transfer_result(db, t_id, status, error=None):\n balance = None\n with db.get_cursor() as c:\n tipper, tippee, amount, wallet_from, wallet_to = c.one(\"\"\"\n UPDATE transfers\n SET status = %s\n , error = %s\n WHERE id = %s\n RETURNING tipper, tippee, amount, wallet_from, wallet_to\n \"\"\", (status, error, t_id))\n if status == 'succeeded':\n # Update the balances\n balance = c.one(\"\"\"\n\n UPDATE wallets\n SET balance = balance + %(amount)s\n WHERE remote_id = %(wallet_to)s;\n\n UPDATE wallets\n SET balance = balance - %(amount)s\n WHERE remote_id = %(wallet_from)s;\n\n SELECT recompute_balance(%(tippee)s);\n SELECT recompute_balance(%(tipper)s);\n\n \"\"\", locals())\n # Transfer the locked bundles to the recipient\n bundles = c.all(\"\"\"\n UPDATE cash_bundles\n SET owner = %s\n , locked_for = NULL\n , wallet_id = %s\n WHERE owner = %s\n AND locked_for = %s\n RETURNING *\n \"\"\", (tippee, wallet_to, tipper, t_id))\n else:\n # Unlock the bundles\n bundles = c.all(\"\"\"\n UPDATE cash_bundles\n SET locked_for = NULL\n WHERE owner = %s\n AND locked_for = %s\n RETURNING *\n \"\"\", (tipper, t_id))\n bundles_sum = sum(b.amount for b in bundles)\n assert bundles_sum == amount\n merge_cash_bundles(db, tippee)\n return balance\n\n\ndef lock_disputed_funds(cursor, exchange, amount):\n \"\"\"Prevent money that is linked to a chargeback from being withdrawn.\n \"\"\"\n if amount != exchange.amount + exchange.fee:\n raise NotImplementedError(\"partial disputes are not implemented\")\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n disputed_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n UPDATE cash_bundles\n SET disputed = true\n WHERE origin = %s\n RETURNING *\n \"\"\", (exchange.id,))]\n disputed_bundles_sum = sum(b.amount for b in disputed_bundles)\n assert disputed_bundles_sum == exchange.amount\n original_owner = exchange.participant\n for b in disputed_bundles:\n if b.owner == original_owner:\n continue\n try_to_swap_bundle(cursor, b, original_owner)\n\n\ndef recover_lost_funds(db, exchange, lost_amount, repudiation_id):\n \"\"\"Recover as much money as possible from a payin which has been reverted.\n \"\"\"\n original_owner = exchange.participant\n # Try (again) to swap the disputed bundles\n with db.get_cursor() as cursor:\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n disputed_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE origin = %s\n AND disputed = true\n \"\"\", (exchange.id,))]\n bundles_sum = sum(b.amount for b in disputed_bundles)\n assert bundles_sum == lost_amount - exchange.fee\n for b in disputed_bundles:\n if b.owner == original_owner:\n continue\n try_to_swap_bundle(cursor, b, original_owner)\n # Move the funds back to the original wallet\n currency = exchange.amount.currency\n chargebacks_account, credit_wallet = Participant.get_chargebacks_account(currency)\n LiberapayOrg = Participant.from_username('LiberapayOrg')\n assert LiberapayOrg\n grouped = group_by(disputed_bundles, lambda b: (b.owner, b.withdrawal))\n for (owner, withdrawal), bundles in grouped.items():\n assert owner != chargebacks_account.id\n if owner == original_owner:\n continue\n amount = sum(b.amount for b in bundles)\n if owner is None:\n bundles = None\n withdrawer = db.one(\"SELECT participant FROM exchanges WHERE id = %s\", (withdrawal,))\n payer = LiberapayOrg.id\n create_debt(db, withdrawer, payer, amount, exchange.id)\n create_debt(db, original_owner, withdrawer, amount, exchange.id)\n else:\n payer = owner\n create_debt(db, original_owner, payer, amount, exchange.id)\n transfer(db, payer, original_owner, amount, 'chargeback', bundles=bundles)\n # Add a debt for the fee\n create_debt(db, original_owner, LiberapayOrg.id, exchange.fee, exchange.id)\n # Send the funds to the credit wallet\n # We have to do a SettlementTransfer instead of a normal Transfer. The amount\n # can't exceed the original payin amount, so we can't settle the fee debt.\n original_owner = Participant.from_id(original_owner)\n from_wallet = original_owner.get_current_wallet(currency).remote_id\n to_wallet = credit_wallet.remote_id\n t_id = prepare_transfer(\n db, original_owner.id, chargebacks_account.id, exchange.amount, 'chargeback',\n from_wallet, to_wallet, prefer_bundles_from=exchange.id,\n )\n tr = SettlementTransfer()\n tr.AuthorId = original_owner.mangopay_user_id\n tr.CreditedUserId = chargebacks_account.mangopay_user_id\n tr.CreditedWalletId = to_wallet\n tr.DebitedFunds = exchange.amount.int()\n tr.DebitedWalletId = from_wallet\n tr.Fees = Money(0, currency)\n tr.RepudiationId = repudiation_id\n tr.Tag = str(t_id)\n tr.save()\n return record_transfer_result(db, t_id, tr)\n\n\ndef try_to_swap_bundle(cursor, b, original_owner):\n \"\"\"Attempt to switch a disputed cash bundle with a \"safe\" one.\n \"\"\"\n currency = b.amount.currency\n swappable_origin_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n AND amount::currency = %s\n ORDER BY ts ASC\n \"\"\", (original_owner, currency))]\n try_to_swap_bundle_with(cursor, b, swappable_origin_bundles)\n merge_cash_bundles(cursor, original_owner)\n if b.withdrawal:\n withdrawer = cursor.one(\n \"SELECT participant FROM exchanges WHERE id = %s\", (b.withdrawal,)\n )\n swappable_recipient_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n AND amount::currency = %s\n ORDER BY ts ASC, amount = %s DESC\n \"\"\", (withdrawer, currency, b.amount))]\n # Note: we don't restrict the date in the query above, so a swapped\n # bundle can end up \"withdrawn\" before it was even created\n try_to_swap_bundle_with(cursor, b, swappable_recipient_bundles)\n merge_cash_bundles(cursor, withdrawer)\n else:\n merge_cash_bundles(cursor, b.owner)\n\n\ndef try_to_swap_bundle_with(cursor, b1, swappable_bundles):\n \"\"\"Attempt to switch the disputed cash bundle `b1` with one (or more) from\n the `swappable_bundles` list.\n \"\"\"\n for b2 in swappable_bundles:\n if b2.amount == b1.amount:\n swap_bundles(cursor, b1, b2)\n break\n elif b2.amount > b1.amount:\n # Split the swappable bundle in two, then do the swap\n b3 = split_bundle(cursor, b2, b1.amount)\n swap_bundles(cursor, b1, b3)\n break\n else:\n # Split the disputed bundle in two, then do the swap\n b3 = split_bundle(cursor, b1, b2.amount)\n swap_bundles(cursor, b2, b3)\n\n\ndef split_bundle(cursor, b, amount):\n \"\"\"Cut a bundle in two.\n\n Returns the new second bundle, whose amount is `amount`.\n \"\"\"\n assert b.amount > amount\n assert not b.locked_for\n b.amount = cursor.one(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s\n RETURNING amount\n \"\"\", (amount, b.id))\n return NS(cursor.one(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, withdrawal, disputed, wallet_id)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n RETURNING *;\n \"\"\", (b.owner, b.origin, amount, b.ts, b.withdrawal, b.disputed, b.wallet_id))._asdict())\n\n\ndef swap_bundles(cursor, b1, b2):\n \"\"\"Switch the current locations of the two cash bundles `b1` and `b2`.\n \"\"\"\n assert not b1.locked_for\n assert not b2.locked_for\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET owner = %s\n , withdrawal = %s\n , wallet_id = %s\n WHERE id = %s;\n UPDATE cash_bundles\n SET owner = %s\n , withdrawal = %s\n , wallet_id = %s\n WHERE id = %s;\n \"\"\", (b2.owner, b2.withdrawal, b2.wallet_id, b1.id,\n b1.owner, b1.withdrawal, b1.wallet_id, b2.id))\n b1.owner, b2.owner = b2.owner, b1.owner\n b1.withdrawal, b2.withdrawal = b2.withdrawal, b1.withdrawal\n\n\ndef merge_cash_bundles(db, p_id):\n \"\"\"Regroup cash bundles who have the same origin and current location.\n \"\"\"\n return db.one(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n WITH regroup AS (\n SELECT owner, origin, wallet_id, sum(amount) AS amount, max(ts) AS ts\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n GROUP BY owner, origin, wallet_id\n HAVING count(*) > 1\n ),\n inserted AS (\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, wallet_id)\n SELECT owner, origin, amount, ts, wallet_id\n FROM regroup\n RETURNING *\n ),\n deleted AS (\n DELETE\n FROM cash_bundles b\n USING regroup g\n WHERE b.owner = g.owner\n AND b.origin = g.origin\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n AND b.wallet_id = g.wallet_id\n RETURNING b.*\n )\n SELECT (SELECT json_agg(d) FROM deleted d) AS before\n , (SELECT json_agg(i) FROM inserted i) AS after\n \"\"\", (p_id,))\n\n\ndef create_debt(db, debtor, creditor, amount, origin):\n return db.one(\"\"\"\n INSERT INTO debts\n (debtor, creditor, amount, status, origin)\n VALUES (%s, %s, %s, 'due', %s)\n RETURNING *\n \"\"\", (debtor, creditor, amount, origin))\n\n\ndef sync_with_mangopay(db):\n \"\"\"We can get out of sync with MangoPay if record_exchange_result wasn't\n completed. This is where we fix that.\n \"\"\"\n check_db(db)\n\n exchanges = db.all(\"\"\"\n SELECT *, (e.timestamp < current_timestamp - interval '1 day') AS is_old\n FROM exchanges e\n WHERE e.status = 'pre'\n \"\"\")\n for e in exchanges:\n p = Participant.from_id(e.participant)\n transactions = [x for x in User(id=p.mangopay_user_id).transactions.all(\n Sort='CreationDate:DESC', Type=('PAYIN' if e.amount > 0 else 'PAYOUT')\n ) if x.Tag == str(e.id)]\n assert len(transactions) < 2\n if transactions:\n t = transactions[0]\n error = repr_error(t)\n status = t.Status.lower()\n assert (not error) ^ (status == 'failed')\n record_exchange_result(db, e.id, t.Id, status, error, p)\n elif e.is_old:\n # The exchange didn't happen, mark it as failed\n record_exchange_result(db, e.id, '', 'failed', 'interrupted', p)\n\n transfers = db.all(\"\"\"\n SELECT *, (t.timestamp < current_timestamp - interval '1 day') AS is_old\n FROM transfers t\n WHERE t.status = 'pre'\n \"\"\")\n for t in transfers:\n tipper = Participant.from_id(t.tipper)\n transactions = [x for x in User(id=tipper.mangopay_user_id).transactions.all(\n Sort='CreationDate:DESC', Type='TRANSFER'\n ) if x.Tag == str(t.id)]\n assert len(transactions) < 2\n if transactions:\n record_transfer_result(db, t.id, transactions[0])\n elif t.is_old:\n # The transfer didn't happen, mark it as failed\n _record_transfer_result(db, t.id, 'failed', 'interrupted')\n\n check_db(db)\n\n\ndef check_wallet_balance(w, state={}):\n remote_wallet = Wallet.get(w.remote_id)\n remote_balance = remote_wallet.balance / 100\n try:\n assert remote_balance == w.balance, (\n \"balances don't match for user #%s (liberapay id %s), wallet #%s contains %s, we expected %s\" %\n (w.remote_owner_id, w.owner, w.remote_id, remote_balance, w.balance)\n )\n except AssertionError as e:\n from liberapay.website import website\n website.tell_sentry(e, state, allow_reraise=False)\n\n\ndef check_all_balances():\n from liberapay.website import website\n wallets = website.db.all(\"\"\"\n SELECT *\n FROM wallets\n WHERE NOT remote_id LIKE 'CREDIT_%'\n \"\"\")\n for w in wallets:\n check_wallet_balance(w)\n sleep(0.1)\n", "path": "liberapay/billing/transactions.py" } ]
[ { "content": "\"\"\"Functions for moving money into, out of, or between wallets.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom decimal import Decimal\nfrom time import sleep\n\nfrom mangopay.exceptions import APIError\nfrom mangopay.resources import (\n BankAccount, BankWirePayIn, BankWirePayOut, DirectPayIn, DirectDebitDirectPayIn,\n SettlementTransfer, Transfer, User, Wallet,\n)\nfrom mangopay.utils import Money\n\nfrom liberapay.billing.fees import (\n skim_bank_wire, skim_credit, upcharge_card, upcharge_direct_debit\n)\nfrom liberapay.constants import FEE_PAYOUT_WARN, QUARANTINE\nfrom liberapay.exceptions import (\n NegativeBalance, NotEnoughWithdrawableMoney, PaydayIsRunning,\n FeeExceedsAmount, TransactionFeeTooHigh, TransferError,\n AccountSuspended, Redirect,\n)\nfrom liberapay.models import check_db\nfrom liberapay.models.participant import Participant\nfrom liberapay.models.exchange_route import ExchangeRoute\nfrom liberapay.utils import group_by, NS\n\n\nQUARANTINE = '%s days' % QUARANTINE.days\n\n\ndef repr_error(o):\n r = o.ResultCode\n if r == '000000':\n return\n msg = getattr(o, 'ResultMessage', None)\n if msg:\n r += ': ' + msg\n return r\n\n\ndef repr_exception(e):\n if isinstance(e, APIError):\n return '%s %s' % (e.code, e.args[0])\n else:\n return repr(e)\n\n\ndef create_wallet(db, participant, currency):\n w = Wallet()\n w.Owners = [participant.mangopay_user_id]\n w.Description = str(participant.id)\n w.Currency = currency\n w.save()\n return db.one(\"\"\"\n INSERT INTO wallets\n (remote_id, balance, owner, remote_owner_id)\n VALUES (%s, %s, %s, %s)\n RETURNING *\n \"\"\", (w.Id, w.Balance, participant.id, participant.mangopay_user_id))\n\n\ndef test_hook():\n return\n\n\ndef payout(db, route, amount, ignore_high_fee=False):\n \"\"\"Withdraw money to the specified bank account (`route`).\n \"\"\"\n assert amount > 0\n assert route\n assert route.network == 'mango-ba'\n\n participant = route.participant\n if participant.is_suspended:\n raise AccountSuspended()\n\n payday = db.one(\"SELECT * FROM paydays WHERE ts_start > ts_end\")\n if payday:\n raise PaydayIsRunning\n\n ba = BankAccount.get(route.address, user_id=participant.mangopay_user_id)\n\n # Do final calculations\n amount = Money(amount, 'EUR') if isinstance(amount, Decimal) else amount\n credit_amount, fee, vat = skim_credit(amount, ba)\n if credit_amount <= 0 and fee > 0:\n raise FeeExceedsAmount\n fee_percent = fee / amount\n if fee_percent > FEE_PAYOUT_WARN and not ignore_high_fee:\n raise TransactionFeeTooHigh(fee_percent, fee, amount)\n\n # Try to dance with MangoPay\n e_id = record_exchange(db, route, -credit_amount, fee, vat, participant, 'pre').id\n payout = BankWirePayOut()\n payout.AuthorId = participant.mangopay_user_id\n payout.DebitedFunds = amount.int()\n payout.DebitedWalletId = participant.get_current_wallet(amount.currency).remote_id\n payout.Fees = fee.int()\n payout.BankAccountId = route.address\n payout.BankWireRef = str(e_id)\n payout.Tag = str(e_id)\n try:\n test_hook()\n payout.save()\n return record_exchange_result(\n db, e_id, payout.Id, payout.Status.lower(), repr_error(payout), participant\n )\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n\ndef charge(db, route, amount, return_url):\n \"\"\"Charge the given credit card (`route`).\n\n Amount should be the nominal amount. We'll compute fees below this function\n and add it to amount to end up with charge_amount.\n\n \"\"\"\n assert isinstance(amount, (Decimal, Money)), type(amount)\n assert route\n assert route.network == 'mango-cc'\n\n participant = route.participant\n\n amount = Money(amount, 'EUR') if isinstance(amount, Decimal) else amount\n charge_amount, fee, vat = upcharge_card(amount)\n amount = charge_amount - fee\n\n wallet = participant.get_current_wallet(amount.currency, create=True)\n e_id = record_exchange(db, route, amount, fee, vat, participant, 'pre').id\n payin = DirectPayIn()\n payin.AuthorId = participant.mangopay_user_id\n payin.CreditedWalletId = wallet.remote_id\n payin.DebitedFunds = charge_amount.int()\n payin.CardId = route.address\n payin.SecureModeReturnURL = return_url\n payin.Fees = fee.int()\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n if payin.SecureModeRedirectURL:\n raise Redirect(payin.SecureModeRedirectURL)\n\n return record_exchange_result(\n db, e_id, payin.Id, payin.Status.lower(), repr_error(payin), participant\n )\n\n\ndef prepare_direct_debit(db, route, amount):\n \"\"\"Prepare to debit a bank account.\n \"\"\"\n assert isinstance(amount, (Decimal, Money)), type(amount)\n\n assert route.network == 'mango-ba'\n\n participant = route.participant\n\n amount = Money(amount, 'EUR') if isinstance(amount, Decimal) else amount\n debit_amount, fee, vat = upcharge_direct_debit(amount)\n amount = debit_amount - fee\n\n status = 'pre' if route.mandate else 'pre-mandate'\n return record_exchange(db, route, amount, fee, vat, participant, status)\n\n\ndef execute_direct_debit(db, exchange, route):\n \"\"\"Execute a prepared direct debit.\n \"\"\"\n assert exchange.route == route.id\n assert route\n assert route.network == 'mango-ba'\n assert route.mandate\n\n participant = route.participant\n assert exchange.participant == participant.id\n\n if exchange.status == 'pre-mandate':\n exchange = db.one(\"\"\"\n UPDATE exchanges\n SET status = 'pre'\n WHERE id = %s\n AND status = %s\n RETURNING *\n \"\"\", (exchange.id, exchange.status))\n assert exchange, 'race condition'\n\n assert exchange.status == 'pre'\n\n amount, fee = exchange.amount, exchange.fee\n debit_amount = amount + fee\n\n e_id = exchange.id\n payin = DirectDebitDirectPayIn()\n payin.AuthorId = participant.mangopay_user_id\n payin.CreditedWalletId = exchange.wallet_id\n payin.DebitedFunds = debit_amount.int()\n payin.MandateId = route.mandate\n payin.Fees = fee.int()\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n return record_exchange_result(\n db, e_id, payin.Id, payin.Status.lower(), repr_error(payin), participant\n )\n\n\ndef payin_bank_wire(db, participant, debit_amount):\n \"\"\"Prepare to receive a bank wire payin.\n\n The amount should be how much the user intends to send, not how much will\n arrive in the wallet.\n \"\"\"\n\n route = ExchangeRoute.upsert_bankwire_route(participant)\n\n if not isinstance(debit_amount, Money):\n debit_amount = Money(debit_amount, 'EUR')\n amount, fee, vat = skim_bank_wire(debit_amount)\n\n wallet = participant.get_current_wallet(amount.currency, create=True)\n e_id = record_exchange(db, route, amount, fee, vat, participant, 'pre').id\n payin = BankWirePayIn()\n payin.AuthorId = participant.mangopay_user_id\n payin.CreditedWalletId = wallet.remote_id\n payin.DeclaredDebitedFunds = debit_amount.int()\n payin.DeclaredFees = fee.int()\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return None, record_exchange_result(db, e_id, '', 'failed', error, participant)\n\n e = record_exchange_result(\n db, e_id, payin.Id, payin.Status.lower(), repr_error(payin), participant\n )\n return payin, e\n\n\ndef cancel_bank_wire_payin(db, exchange, payin, participant):\n record_exchange_result(db, exchange.id, payin.Id, 'failed', \"canceled\", participant)\n\n\ndef record_unexpected_payin(db, payin):\n \"\"\"Record an unexpected bank wire payin.\n \"\"\"\n assert payin.PaymentType == 'BANK_WIRE'\n debited_amount = payin.DebitedFunds / Decimal(100)\n paid_fee = payin.Fees / Decimal(100)\n vat = skim_bank_wire(debited_amount)[2]\n wallet_id = payin.CreditedWalletId\n participant = Participant.from_mangopay_user_id(payin.AuthorId)\n current_wallet = participant.get_current_wallet(debited_amount.currency)\n assert current_wallet.remote_id == wallet_id\n route = ExchangeRoute.upsert_bankwire_route(participant)\n amount = debited_amount - paid_fee\n return db.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, remote_id, wallet_id)\n VALUES (%s, %s, %s, %s, 'created', %s, NULL, %s, %s)\n RETURNING id\n \"\"\", (amount, paid_fee, vat, participant.id, route.id, payin.Id, wallet_id))\n\n\ndef record_payout_refund(db, payout_refund):\n orig_payout = BankWirePayOut.get(payout_refund.InitialTransactionId)\n e_origin = db.one(\"SELECT * FROM exchanges WHERE id = %s\", (orig_payout.Tag,))\n e_refund_id = db.one(\"SELECT id FROM exchanges WHERE refund_ref = %s\", (e_origin.id,))\n if e_refund_id:\n # Already recorded\n return e_refund_id\n amount, fee, vat = -e_origin.amount, -e_origin.fee, -e_origin.vat\n assert payout_refund.DebitedFunds / 100 == amount\n assert payout_refund.Fees / 100 == fee\n route = ExchangeRoute.from_id(e_origin.route)\n participant = Participant.from_id(e_origin.participant)\n remote_id = payout_refund.Id\n wallet_id = e_origin.wallet_id\n return db.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, refund_ref, remote_id, wallet_id)\n VALUES (%s, %s, %s, %s, 'created', %s, NULL, %s, %s, %s)\n RETURNING id\n \"\"\", (amount, fee, vat, participant.id, route.id, e_origin.id, remote_id, wallet_id))\n\n\ndef record_exchange(db, route, amount, fee, vat, participant, status, error=None):\n \"\"\"Given a Bunch of Stuff, return an int (exchange_id).\n\n Records in the exchanges table have these characteristics:\n\n amount It's negative for credits (representing an outflow from\n Liberapay to you) and positive for charges.\n The sign is how we differentiate the two in, e.g., the\n history page.\n\n fee The payment processor's fee. It's always positive.\n\n vat The amount of VAT included in the fee. Always positive.\n\n \"\"\"\n assert status.startswith('pre')\n if participant.is_suspended:\n raise AccountSuspended()\n\n with db.get_cursor() as cursor:\n\n wallet_id = participant.get_current_wallet(amount.currency, create=True).remote_id\n e = cursor.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, wallet_id)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING *\n \"\"\", (amount, fee, vat, participant.id, status, route.id, error, wallet_id))\n\n if amount < 0:\n amount -= fee\n propagate_exchange(cursor, participant, e, '', amount)\n\n return e\n\n\ndef record_exchange_result(db, exchange_id, remote_id, status, error, participant):\n \"\"\"Updates the status of an exchange.\n \"\"\"\n with db.get_cursor() as cursor:\n e = cursor.one(\"\"\"\n UPDATE exchanges e\n SET status=%(status)s\n , note=%(error)s\n , remote_id=%(remote_id)s\n WHERE id=%(exchange_id)s\n AND status <> %(status)s\n RETURNING *\n \"\"\", locals())\n if not e:\n return\n assert participant.id == e.participant\n\n amount = e.amount\n if amount < 0:\n amount = -amount + max(e.fee, 0) if status == 'failed' else amount.zero()\n else:\n amount = amount - min(e.fee, 0) if status == 'succeeded' else amount.zero()\n propagate_exchange(cursor, participant, e, error, amount)\n\n return e\n\n\ndef propagate_exchange(cursor, participant, exchange, error, amount):\n \"\"\"Propagates an exchange's result to the participant's balance.\n \"\"\"\n wallet_id = exchange.wallet_id\n new_balance = cursor.one(\"\"\"\n UPDATE wallets\n SET balance = (balance + %s)\n WHERE remote_id = %s\n AND (balance + %s) >= 0\n RETURNING balance\n \"\"\", (amount, wallet_id, amount))\n\n if new_balance is None:\n raise NegativeBalance\n\n if amount < 0:\n bundles = cursor.all(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n SELECT b.*\n FROM cash_bundles b\n JOIN exchanges e ON e.id = b.origin\n WHERE b.owner = %s\n AND b.ts < now() - INTERVAL %s\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n AND b.amount::currency = %s\n ORDER BY b.owner = e.participant DESC, b.ts\n \"\"\", (participant.id, QUARANTINE, amount.currency))\n withdrawable = sum(b.amount for b in bundles)\n x = -amount\n if x > withdrawable:\n raise NotEnoughWithdrawableMoney(withdrawable)\n for b in bundles:\n if x >= b.amount:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET owner = NULL\n , withdrawal = %s\n , wallet_id = NULL\n WHERE id = %s\n \"\"\", (exchange.id, b.id))\n x -= b.amount\n if x == 0:\n break\n else:\n assert x > 0\n cursor.run(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, ts, amount, withdrawal, wallet_id)\n VALUES (NULL, %s, %s, %s, %s, NULL)\n \"\"\", (b.origin, b.ts, x, exchange.id))\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s\n \"\"\", (x, b.id))\n break\n elif amount > 0 and (exchange.amount < 0 or exchange.refund_ref):\n # failed withdrawal\n orig_exchange_id = exchange.refund_ref or exchange.id\n cursor.run(\"\"\"\n UPDATE cash_bundles b\n SET owner = %(p_id)s\n , withdrawal = NULL\n , wallet_id = %(wallet_id)s\n WHERE withdrawal = %(e_id)s\n \"\"\", dict(p_id=participant.id, e_id=orig_exchange_id, wallet_id=wallet_id))\n elif amount > 0:\n cursor.run(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, wallet_id)\n VALUES (%s, %s, %s, %s, %s)\n \"\"\", (participant.id, exchange.id, amount, exchange.timestamp, wallet_id))\n\n new_balance = cursor.one(\"SELECT recompute_balance(%s)\", (participant.id,))\n participant.set_attributes(balance=new_balance)\n\n if amount != 0:\n participant.update_giving_and_tippees(cursor)\n merge_cash_bundles(cursor, participant.id)\n\n\ndef transfer(db, tipper, tippee, amount, context, **kw):\n tipper_wallet = NS(remote_id=kw.get('tipper_wallet_id'), remote_owner_id=kw.get('tipper_mango_id'))\n if not all(tipper_wallet.__dict__.values()):\n tipper_wallet = Participant.from_id(tipper).get_current_wallet(amount.currency)\n tippee_wallet = NS(remote_id=kw.get('tippee_wallet_id'), remote_owner_id=kw.get('tippee_mango_id'))\n if not all(tippee_wallet.__dict__.values()):\n tippee_wallet = Participant.from_id(tippee).get_current_wallet(amount.currency, create=True)\n wallet_from = tipper_wallet.remote_id\n wallet_to = tippee_wallet.remote_id\n t_id = prepare_transfer(\n db, tipper, tippee, amount, context, wallet_from, wallet_to,\n team=kw.get('team'), invoice=kw.get('invoice'), bundles=kw.get('bundles'),\n )\n tr = Transfer()\n tr.AuthorId = tipper_wallet.remote_owner_id\n tr.CreditedUserId = tippee_wallet.remote_owner_id\n tr.CreditedWalletId = wallet_to\n tr.DebitedFunds = amount.int()\n tr.DebitedWalletId = wallet_from\n tr.Fees = Money(0, amount.currency)\n tr.Tag = str(t_id)\n tr.save()\n return record_transfer_result(db, t_id, tr), t_id\n\n\ndef prepare_transfer(db, tipper, tippee, amount, context, wallet_from, wallet_to,\n team=None, invoice=None, **kw):\n with db.get_cursor() as cursor:\n transfer = cursor.one(\"\"\"\n INSERT INTO transfers\n (tipper, tippee, amount, context, team, invoice, status,\n wallet_from, wallet_to)\n VALUES (%s, %s, %s, %s, %s, %s, 'pre',\n %s, %s)\n RETURNING *\n \"\"\", (tipper, tippee, amount, context, team, invoice, wallet_from, wallet_to))\n lock_bundles(cursor, transfer, **kw)\n return transfer.id\n\n\ndef lock_bundles(cursor, transfer, bundles=None, prefer_bundles_from=-1):\n assert transfer.status == 'pre'\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n tipper, tippee = transfer.tipper, transfer.tippee\n currency = transfer.amount.currency\n bundles = bundles or cursor.all(\"\"\"\n SELECT b.*\n FROM cash_bundles b\n JOIN exchanges e ON e.id = b.origin\n WHERE b.owner = %(tipper)s\n AND b.withdrawal IS NULL\n AND b.locked_for IS NULL\n AND b.amount::currency = %(currency)s\n ORDER BY b.origin = %(prefer_bundles_from)s DESC\n , e.participant = %(tippee)s DESC\n , b.ts\n \"\"\", locals())\n transferable = sum(b.amount for b in bundles)\n x = transfer.amount\n if x > transferable:\n raise NegativeBalance()\n for b in bundles:\n if x >= b.amount:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET locked_for = %s\n WHERE id = %s\n \"\"\", (transfer.id, b.id))\n x -= b.amount\n if x == 0:\n break\n else:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s;\n\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, locked_for, wallet_id)\n VALUES (%s, %s, %s, %s, %s, %s);\n \"\"\", (x, b.id, transfer.tipper, b.origin, x, b.ts, transfer.id, b.wallet_id))\n break\n\n\ndef record_transfer_result(db, t_id, tr):\n error = repr_error(tr)\n status = tr.Status.lower()\n assert (not error) ^ (status == 'failed')\n r = _record_transfer_result(db, t_id, status, error)\n if status == 'failed':\n raise TransferError(error)\n return r\n\n\ndef _record_transfer_result(db, t_id, status, error=None):\n balance = None\n with db.get_cursor() as c:\n tipper, tippee, amount, wallet_from, wallet_to = c.one(\"\"\"\n UPDATE transfers\n SET status = %s\n , error = %s\n WHERE id = %s\n RETURNING tipper, tippee, amount, wallet_from, wallet_to\n \"\"\", (status, error, t_id))\n if status == 'succeeded':\n # Update the balances\n balance = c.one(\"\"\"\n\n UPDATE wallets\n SET balance = balance + %(amount)s\n WHERE remote_id = %(wallet_to)s;\n\n UPDATE wallets\n SET balance = balance - %(amount)s\n WHERE remote_id = %(wallet_from)s;\n\n SELECT recompute_balance(%(tippee)s);\n SELECT recompute_balance(%(tipper)s);\n\n \"\"\", locals())\n # Transfer the locked bundles to the recipient\n bundles = c.all(\"\"\"\n UPDATE cash_bundles\n SET owner = %s\n , locked_for = NULL\n , wallet_id = %s\n WHERE owner = %s\n AND locked_for = %s\n RETURNING *\n \"\"\", (tippee, wallet_to, tipper, t_id))\n else:\n # Unlock the bundles\n bundles = c.all(\"\"\"\n UPDATE cash_bundles\n SET locked_for = NULL\n WHERE owner = %s\n AND locked_for = %s\n RETURNING *\n \"\"\", (tipper, t_id))\n bundles_sum = sum(b.amount for b in bundles)\n assert bundles_sum == amount\n merge_cash_bundles(db, tippee)\n return balance\n\n\ndef lock_disputed_funds(cursor, exchange, amount):\n \"\"\"Prevent money that is linked to a chargeback from being withdrawn.\n \"\"\"\n if amount != exchange.amount + exchange.fee:\n raise NotImplementedError(\"partial disputes are not implemented\")\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n disputed_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n UPDATE cash_bundles\n SET disputed = true\n WHERE origin = %s\n RETURNING *\n \"\"\", (exchange.id,))]\n disputed_bundles_sum = sum(b.amount for b in disputed_bundles)\n assert disputed_bundles_sum == exchange.amount\n original_owner = exchange.participant\n for b in disputed_bundles:\n if b.owner == original_owner:\n continue\n try_to_swap_bundle(cursor, b, original_owner)\n\n\ndef recover_lost_funds(db, exchange, lost_amount, repudiation_id):\n \"\"\"Recover as much money as possible from a payin which has been reverted.\n \"\"\"\n original_owner = exchange.participant\n # Try (again) to swap the disputed bundles\n with db.get_cursor() as cursor:\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n disputed_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE origin = %s\n AND disputed = true\n \"\"\", (exchange.id,))]\n bundles_sum = sum(b.amount for b in disputed_bundles)\n assert bundles_sum == lost_amount - exchange.fee\n for b in disputed_bundles:\n if b.owner == original_owner:\n continue\n try_to_swap_bundle(cursor, b, original_owner)\n # Move the funds back to the original wallet\n currency = exchange.amount.currency\n chargebacks_account, credit_wallet = Participant.get_chargebacks_account(currency)\n LiberapayOrg = Participant.from_username('LiberapayOrg')\n assert LiberapayOrg\n grouped = group_by(disputed_bundles, lambda b: (b.owner, b.withdrawal))\n for (owner, withdrawal), bundles in grouped.items():\n assert owner != chargebacks_account.id\n if owner == original_owner:\n continue\n amount = sum(b.amount for b in bundles)\n if owner is None:\n bundles = None\n withdrawer = db.one(\"SELECT participant FROM exchanges WHERE id = %s\", (withdrawal,))\n payer = LiberapayOrg.id\n create_debt(db, withdrawer, payer, amount, exchange.id)\n create_debt(db, original_owner, withdrawer, amount, exchange.id)\n else:\n payer = owner\n create_debt(db, original_owner, payer, amount, exchange.id)\n transfer(db, payer, original_owner, amount, 'chargeback', bundles=bundles)\n # Add a debt for the fee\n create_debt(db, original_owner, LiberapayOrg.id, exchange.fee, exchange.id)\n # Send the funds to the credit wallet\n # We have to do a SettlementTransfer instead of a normal Transfer. The amount\n # can't exceed the original payin amount, so we can't settle the fee debt.\n original_owner = Participant.from_id(original_owner)\n from_wallet = original_owner.get_current_wallet(currency).remote_id\n to_wallet = credit_wallet.remote_id\n t_id = prepare_transfer(\n db, original_owner.id, chargebacks_account.id, exchange.amount, 'chargeback',\n from_wallet, to_wallet, prefer_bundles_from=exchange.id,\n )\n tr = SettlementTransfer()\n tr.AuthorId = original_owner.mangopay_user_id\n tr.CreditedUserId = chargebacks_account.mangopay_user_id\n tr.CreditedWalletId = to_wallet\n tr.DebitedFunds = exchange.amount.int()\n tr.DebitedWalletId = from_wallet\n tr.Fees = Money(0, currency)\n tr.RepudiationId = repudiation_id\n tr.Tag = str(t_id)\n tr.save()\n return record_transfer_result(db, t_id, tr)\n\n\ndef try_to_swap_bundle(cursor, b, original_owner):\n \"\"\"Attempt to switch a disputed cash bundle with a \"safe\" one.\n \"\"\"\n currency = b.amount.currency\n swappable_origin_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n AND amount::currency = %s\n ORDER BY ts ASC\n \"\"\", (original_owner, currency))]\n try_to_swap_bundle_with(cursor, b, swappable_origin_bundles)\n merge_cash_bundles(cursor, original_owner)\n if b.withdrawal:\n withdrawer = cursor.one(\n \"SELECT participant FROM exchanges WHERE id = %s\", (b.withdrawal,)\n )\n swappable_recipient_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n AND amount::currency = %s\n ORDER BY ts ASC, amount = %s DESC\n \"\"\", (withdrawer, currency, b.amount))]\n # Note: we don't restrict the date in the query above, so a swapped\n # bundle can end up \"withdrawn\" before it was even created\n try_to_swap_bundle_with(cursor, b, swappable_recipient_bundles)\n merge_cash_bundles(cursor, withdrawer)\n else:\n merge_cash_bundles(cursor, b.owner)\n\n\ndef try_to_swap_bundle_with(cursor, b1, swappable_bundles):\n \"\"\"Attempt to switch the disputed cash bundle `b1` with one (or more) from\n the `swappable_bundles` list.\n \"\"\"\n for b2 in swappable_bundles:\n if b2.amount == b1.amount:\n swap_bundles(cursor, b1, b2)\n break\n elif b2.amount > b1.amount:\n # Split the swappable bundle in two, then do the swap\n b3 = split_bundle(cursor, b2, b1.amount)\n swap_bundles(cursor, b1, b3)\n break\n else:\n # Split the disputed bundle in two, then do the swap\n b3 = split_bundle(cursor, b1, b2.amount)\n swap_bundles(cursor, b2, b3)\n\n\ndef split_bundle(cursor, b, amount):\n \"\"\"Cut a bundle in two.\n\n Returns the new second bundle, whose amount is `amount`.\n \"\"\"\n assert b.amount > amount\n assert not b.locked_for\n b.amount = cursor.one(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s\n RETURNING amount\n \"\"\", (amount, b.id))\n return NS(cursor.one(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, withdrawal, disputed, wallet_id)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n RETURNING *;\n \"\"\", (b.owner, b.origin, amount, b.ts, b.withdrawal, b.disputed, b.wallet_id))._asdict())\n\n\ndef swap_bundles(cursor, b1, b2):\n \"\"\"Switch the current locations of the two cash bundles `b1` and `b2`.\n \"\"\"\n assert not b1.locked_for\n assert not b2.locked_for\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET owner = %s\n , withdrawal = %s\n , wallet_id = %s\n WHERE id = %s;\n UPDATE cash_bundles\n SET owner = %s\n , withdrawal = %s\n , wallet_id = %s\n WHERE id = %s;\n \"\"\", (b2.owner, b2.withdrawal, b2.wallet_id, b1.id,\n b1.owner, b1.withdrawal, b1.wallet_id, b2.id))\n b1.owner, b2.owner = b2.owner, b1.owner\n b1.withdrawal, b2.withdrawal = b2.withdrawal, b1.withdrawal\n\n\ndef merge_cash_bundles(db, p_id):\n \"\"\"Regroup cash bundles who have the same origin and current location.\n \"\"\"\n return db.one(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n WITH regroup AS (\n SELECT owner, origin, wallet_id, sum(amount) AS amount, max(ts) AS ts\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n GROUP BY owner, origin, wallet_id\n HAVING count(*) > 1\n ),\n inserted AS (\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, wallet_id)\n SELECT owner, origin, amount, ts, wallet_id\n FROM regroup\n RETURNING *\n ),\n deleted AS (\n DELETE\n FROM cash_bundles b\n USING regroup g\n WHERE b.owner = g.owner\n AND b.origin = g.origin\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n AND b.wallet_id = g.wallet_id\n RETURNING b.*\n )\n SELECT (SELECT json_agg(d) FROM deleted d) AS before\n , (SELECT json_agg(i) FROM inserted i) AS after\n \"\"\", (p_id,))\n\n\ndef create_debt(db, debtor, creditor, amount, origin):\n return db.one(\"\"\"\n INSERT INTO debts\n (debtor, creditor, amount, status, origin)\n VALUES (%s, %s, %s, 'due', %s)\n RETURNING *\n \"\"\", (debtor, creditor, amount, origin))\n\n\ndef sync_with_mangopay(db):\n \"\"\"We can get out of sync with MangoPay if record_exchange_result wasn't\n completed. This is where we fix that.\n \"\"\"\n check_db(db)\n\n exchanges = db.all(\"\"\"\n SELECT *, (e.timestamp < current_timestamp - interval '1 day') AS is_old\n FROM exchanges e\n WHERE e.status = 'pre'\n \"\"\")\n for e in exchanges:\n p = Participant.from_id(e.participant)\n transactions = [x for x in User(id=p.mangopay_user_id).transactions.all(\n Sort='CreationDate:DESC', Type=('PAYIN' if e.amount > 0 else 'PAYOUT')\n ) if x.Tag == str(e.id)]\n assert len(transactions) < 2\n if transactions:\n t = transactions[0]\n error = repr_error(t)\n status = t.Status.lower()\n assert (not error) ^ (status == 'failed')\n record_exchange_result(db, e.id, t.Id, status, error, p)\n elif e.is_old:\n # The exchange didn't happen, mark it as failed\n record_exchange_result(db, e.id, '', 'failed', 'interrupted', p)\n\n transfers = db.all(\"\"\"\n SELECT *, (t.timestamp < current_timestamp - interval '1 day') AS is_old\n FROM transfers t\n WHERE t.status = 'pre'\n \"\"\")\n for t in transfers:\n tipper = Participant.from_id(t.tipper)\n transactions = [x for x in User(id=tipper.mangopay_user_id).transactions.all(\n Sort='CreationDate:DESC', Type='TRANSFER'\n ) if x.Tag == str(t.id)]\n assert len(transactions) < 2\n if transactions:\n record_transfer_result(db, t.id, transactions[0])\n elif t.is_old:\n # The transfer didn't happen, mark it as failed\n _record_transfer_result(db, t.id, 'failed', 'interrupted')\n\n check_db(db)\n\n\ndef check_wallet_balance(w, state={}):\n remote_wallet = Wallet.get(w.remote_id)\n remote_balance = remote_wallet.balance / 100\n try:\n assert remote_balance == w.balance, (\n \"balances don't match for user #%s (liberapay id %s), wallet #%s contains %s, we expected %s\" %\n (w.remote_owner_id, w.owner, w.remote_id, remote_balance, w.balance)\n )\n except AssertionError as e:\n from liberapay.website import website\n website.tell_sentry(e, state, allow_reraise=False)\n\n\ndef check_all_balances():\n from liberapay.website import website\n wallets = website.db.all(\"\"\"\n SELECT *\n FROM wallets\n WHERE NOT remote_id LIKE 'CREDIT_%'\n \"\"\")\n for w in wallets:\n check_wallet_balance(w)\n sleep(0.1)\n", "path": "liberapay/billing/transactions.py" } ]
diff --git a/liberapay/billing/transactions.py b/liberapay/billing/transactions.py index d4f1065e0d..e939f30bd9 100644 --- a/liberapay/billing/transactions.py +++ b/liberapay/billing/transactions.py @@ -251,6 +251,10 @@ def payin_bank_wire(db, participant, debit_amount): return payin, e +def cancel_bank_wire_payin(db, exchange, payin, participant): + record_exchange_result(db, exchange.id, payin.Id, 'failed', "canceled", participant) + + def record_unexpected_payin(db, payin): """Record an unexpected bank wire payin. """ diff --git a/sql/branch.sql b/sql/branch.sql new file mode 100644 index 0000000000..3148547071 --- /dev/null +++ b/sql/branch.sql @@ -0,0 +1,6 @@ +UPDATE wallets + SET is_current = true + FROM participants p + WHERE p.id = owner + AND p.mangopay_user_id = remote_owner_id + AND is_current IS NULL; diff --git a/www/%username/identity.spt b/www/%username/identity.spt index 9412aa07e9..ba405465d0 100644 --- a/www/%username/identity.spt +++ b/www/%username/identity.spt @@ -157,6 +157,11 @@ if request.method == 'POST': WHERE remote_owner_id = %s RETURNING * """, (old_account.Id,)) + website.db.run(""" + UPDATE wallets + SET is_current = true + WHERE remote_owner_id = %s + """, (account.Id,)) for w in (w for w in old_wallets if w.balance): transfer( website.db, participant.id, participant.id, w.balance, diff --git a/www/%username/wallet/payin/bankwire/%back_to.spt b/www/%username/wallet/payin/bankwire/%back_to.spt index 4c661c8d83..a1d7d05bdf 100644 --- a/www/%username/wallet/payin/bankwire/%back_to.spt +++ b/www/%username/wallet/payin/bankwire/%back_to.spt @@ -6,7 +6,7 @@ from decimal import Decimal as D, InvalidOperation, ROUND_UP from mangopay.resources import BankWirePayIn from liberapay.billing.fees import upcharge_bank_wire -from liberapay.billing.transactions import payin_bank_wire +from liberapay.billing.transactions import cancel_bank_wire_payin, payin_bank_wire from liberapay.constants import EVENTS, PAYIN_BANK_WIRE_MIN from liberapay.exceptions import InvalidNumber from liberapay.utils import b64decode_s, get_participant @@ -33,19 +33,26 @@ def get_exchange_payin(participant, request): participant = get_participant(state, restrict=True, block_suspended_user=True) -if request.method == 'POST' and request.body.get('action') == 'email': +if request.method == 'POST' and 'action' in request.body: exchange, payin = get_exchange_payin(participant, request) - sent = participant.send_email( - 'payin_bankwire_created', - (participant.email or participant.get_any_email()), - exchange=exchange._asdict(), payin=payin, - ) - if not sent: - raise response.error(500, _("An unknown error occurred.")) - if request.headers.get(b'X-Requested-With') == b'XMLHttpRequest': - raise response.json({'msg': _("The email has been sent.")}) - else: + action = request.body['action'] + if action == 'email': + sent = participant.send_email( + 'payin_bankwire_created', + (participant.email or participant.get_any_email()), + exchange=exchange._asdict(), payin=payin, + ) + if not sent: + raise response.error(500, _("An unknown error occurred.")) + if request.headers.get(b'X-Requested-With') == b'XMLHttpRequest': + raise response.json({'msg': _("The email has been sent.")}) + else: + response.redirect(request.line.uri) + elif action == 'cancel': + cancel_bank_wire_payin(website.db, exchange, payin, participant) response.redirect(request.line.uri) + else: + raise response.error(400, "bad `action` value '%s' in body" % action) exchange, payin = None, None if 'exchange_id' in request.qs: @@ -121,10 +128,16 @@ title = _("Adding Money") % block thin_content % if exchange and exchange.status == 'failed' + % if exchange.note == 'canceled' + <div class="alert alert-info">{{ _("This bank wire has been canceled.") }}</div> + <a class="btn btn-default" href="{{ participant.path('wallet/payin') }}" + >{{ _("Go back") }}</a> + % else <div class="alert alert-danger">{{ _("The attempt to prepare a bank wire transfer of {0} has failed. Error message: {1}", exchange.amount + exchange.fee, exchange.note) }}</div> + % endif % endif % if not show_form and not payin @@ -150,7 +163,7 @@ title = _("Adding Money") % endif </p> - % elif payin + % elif payin and exchange.status == 'created' <p>{{ _( "We are ready to receive the funds. Please send exactly {0} to the " @@ -196,10 +209,12 @@ title = _("Adding Money") >{{ _("Change your email settings") }}</a> % endif - % if back_to - <a href="{{ response.sanitize_untrusted_url(back_to) }}" - class="btn btn-default pull-right">{{ _("Go back") }}</a> - % endif + <p>{{ _("Changed your mind? Cancel the payment to avoid receiving a failure notification next month:") }}</p> + <form action="" method="POST"> + <input type="hidden" name="csrf_token" value="{{ csrf_token }}" /> + <input type="hidden" name="action" value="cancel" /> + <button class="btn btn-danger">{{ _("Cancel") }}</button> + </form> % elif show_form <form id="payin" action="javascript:" method="POST"
mampfes__hacs_waste_collection_schedule-556
Add StadtService Brühl Add Source for StadtService Brühl Update stadtservice_bruehl_de.md
[ { "content": "import datetime\nimport logging\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"StadtService Brühl\"\nDESCRIPTION = \"Source für Abfallkalender StadtService Brühl\"\nURL = \"https://stadtservice-bruehl.de\"\nTEST_CASES = {\"TEST1\": {\"strasse\": \"Badorfer Straße\", \"hnr\": \"1\"}}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, strasse, hnr):\n self._strasse = strasse\n self._hnr = hnr\n self._ics = ICS()\n\n def fetch(self):\n\n today = datetime.date.today()\n year = today.year\n # Get District\n data = {\n \"street\": self._strasse,\n \"street_number\": self._hnr,\n \"send_street_and_nummber_data\": \"\",\n }\n\n r = requests.post(\n \"https://services.stadtservice-bruehl.de/abfallkalender/\", data=data\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n for tag in soup.find_all(\"input\", type=\"hidden\"):\n # print(tag[\"name\"])\n # print(tag[\"value\"])\n if tag[\"name\"] == \"post_district\":\n post_district = tag[\"value\"]\n\n if post_district == \"\":\n raise Exception(\"Unable to get district\")\n\n # print(post_district);\n # Get ICAL\n data = {\n \"post_year\": year,\n \"post_district\": post_district,\n \"post_street_name\": self._strasse,\n \"post_street_number\": self._hnr,\n \"checked_waste_type_hausmuell\": \"on\",\n \"checked_waste_type_gelber_sack\": \"on\",\n \"checked_waste_type_altpapier\": \"on\",\n \"checked_waste_type_bio\": \"on\",\n \"checked_waste_type_weihnachtsbaeume\": \"on\",\n \"checked_waste_type_strassenlaub\": \"on\",\n \"form_page_id\": \"9\",\n \"reminder_time\": \"8\",\n \"send_ics_download_configurator_data\": \"\",\n }\n\n r = requests.post(\n \"https://services.stadtservice-bruehl.de/abfallkalender/individuellen-abfuhrkalender-herunterladen/\",\n data=data,\n )\n r.raise_for_status()\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py" } ]
[ { "content": "import datetime\nimport logging\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"StadtService Brühl\"\nDESCRIPTION = \"Source für Abfallkalender StadtService Brühl\"\nURL = \"https://stadtservice-bruehl.de\"\nTEST_CASES = {\"TEST1\": {\"strasse\": \"Badorfer Straße\", \"hnr\": \"1\"}}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, strasse, hnr):\n self._strasse = strasse\n self._hnr = hnr\n self._ics = ICS(regex=\"(.*?) \\\\- \", split_at=\", \")\n\n def fetch(self):\n\n today = datetime.date.today()\n year = today.year\n # Get District\n data = {\n \"street\": self._strasse,\n \"street_number\": self._hnr,\n \"send_street_and_nummber_data\": \"\",\n }\n\n r = requests.post(\n \"https://services.stadtservice-bruehl.de/abfallkalender/\", data=data\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n for tag in soup.find_all(\"input\", type=\"hidden\"):\n # print(tag[\"name\"])\n # print(tag[\"value\"])\n if tag[\"name\"] == \"post_district\":\n post_district = tag[\"value\"]\n\n if post_district == \"\":\n raise Exception(\"Unable to get district\")\n\n # print(post_district);\n # Get ICAL\n data = {\n \"post_year\": year,\n \"post_district\": post_district,\n \"post_street_name\": self._strasse,\n \"post_street_number\": self._hnr,\n \"checked_waste_type_hausmuell\": \"on\",\n \"checked_waste_type_gelber_sack\": \"on\",\n \"checked_waste_type_altpapier\": \"on\",\n \"checked_waste_type_bio\": \"on\",\n \"checked_waste_type_weihnachtsbaeume\": \"on\",\n \"checked_waste_type_strassenlaub\": \"on\",\n \"form_page_id\": \"9\",\n \"reminder_time\": \"8\",\n \"send_ics_download_configurator_data\": \"\",\n }\n\n r = requests.post(\n \"https://services.stadtservice-bruehl.de/abfallkalender/individuellen-abfuhrkalender-herunterladen/\",\n data=data,\n )\n r.raise_for_status()\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py" } ]
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py index c000778cd..b9c2ec3c0 100644 --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/stadtservice_bruehl_de.py @@ -18,7 +18,7 @@ class Source: def __init__(self, strasse, hnr): self._strasse = strasse self._hnr = hnr - self._ics = ICS() + self._ics = ICS(regex="(.*?) \\- ", split_at=", ") def fetch(self):
google__timesketch-90
Importing of JSON timelines creates duplicate timelines with same name. Steps to reproduce 1) command line: echo '[ { "datetime": "2012-04-12T17:24:38-08:00", "timestamp_desc": "Test", "timestamp": 1334251478000000, "message": "Test message" } ]' > test_dupe.json tsctl json2ts --name test_dupe --file test_dupe.json tsctl json2ts --name test_dupe --file test_dupe.json 2) Create new sketch 3) Notice duplicate "test_dupe" timelines on list to select from. 4) Add both 5) Explore, using "*" as filter. 6) notice duplicate results.
[ { "content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is for creating the app for a WSGI server.\n\nExample with Gunicorn:\n$ gunicorn -b 127.0.0.1:4000 --log-file - wsgi:application\n\nExample configuration for Apache with mod_wsgi (a2enmod mod_wsgi):\n<VirtualHost *:443>\n ServerAdmin root@localhost\n SSLEngine On\n SSLCertificateFile /etc/apache2/cert.crt\n SSLCertificateKeyFile /etc/apache2/cert.key\n WSGIScriptAlias / /path/to/this/file/wsgi.py\n</VirtualHost>\n\"\"\"\n\n# If you installed Timesketch in a virtualenv you need to activate it.\n# This needs to be before any imports in order to import from the virtualenv.\n#activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py'\n#execfile(activate_virtualenv, dict(__file__=activate_virtualenv))\n\nfrom timesketch import create_app\nfrom timesketch.models import db_session\n\napplication = create_app()\n\n# Remove the session after every request or app shutdown.\[email protected]_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n", "path": "wsgi.py" } ]
[ { "content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is for creating the app for a WSGI server.\n\nExample with Gunicorn:\n$ gunicorn -b 127.0.0.1:4000 --log-file - wsgi:application\n\nExample configuration for Apache with mod_wsgi (a2enmod mod_wsgi):\n<VirtualHost *:443>\n ServerAdmin root@localhost\n SSLEngine On\n SSLCertificateFile /etc/apache2/cert.crt\n SSLCertificateKeyFile /etc/apache2/cert.key\n WSGIScriptAlias / /path/to/this/file/wsgi.py\n</VirtualHost>\n\"\"\"\n\n# If you installed Timesketch in a virtualenv you need to activate it.\n# This needs to be before any imports in order to import from the virtualenv.\n#activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py'\n#execfile(activate_virtualenv, dict(__file__=activate_virtualenv))\n\nfrom timesketch import create_app\nfrom timesketch.models import db_session\n\napplication = create_app()\n\n# pylint: disable=unused-argument\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Remove the database session after every request or app shutdown.\"\"\"\n db_session.remove()\n", "path": "wsgi.py" } ]
diff --git a/tsctl b/tsctl index 9882d1f4fb..51750bd599 100644 --- a/tsctl +++ b/tsctl @@ -248,15 +248,16 @@ class CreateTimelineFromJson(Command): events, index_name, event_type) # Create the searchindex in the Timesketch database. - searchindex = SearchIndex( + searchindex = SearchIndex.get_or_create( name=timeline_name, description=timeline_name, user=None, index_name=index_name) searchindex.grant_permission(None, u'read') db_session.add(searchindex) db_session.commit() sys.stdout.write( - u'Search index {0:s} created\n{1:d} events inserted\n'.format( - timeline_name, counter[u'events'])) + u'Timeline name: {0:s}\nElasticsearch index: {1:s}\n' + u'Events inserted: {2:d}\n'.format( + timeline_name, index_name, counter[u'events'])) except IOError as exception: sys.stderr.write(u'Error: {0:s}\n'.format(exception)) diff --git a/wsgi.py b/wsgi.py index 996e578b35..9b7fe31685 100644 --- a/wsgi.py +++ b/wsgi.py @@ -37,7 +37,8 @@ application = create_app() -# Remove the session after every request or app shutdown. +# pylint: disable=unused-argument @application.teardown_appcontext def shutdown_session(exception=None): + """Remove the database session after every request or app shutdown.""" db_session.remove()
easybuilders__easybuild-easyblocks-2267
enhanced extension filter for Python packages causes trouble for netcdf4-python sanity check The enhanced extension filter for Python packages made in #2224 causes trouble for `netcdf4-python-1.5.3-intel-2020a-Python-3.8.2.eb`: ``` == 2020-12-03 20:44:05,145 build_log.py:169 ERROR EasyBuild crashed with an error (at easybuild/base/exceptions.py:124 in __init__): Sanity check failed: command "mpirun -n 1 PYTHONNOUSERSITE=1 python -c "import netCDF4"" failed; output: [proxy:0:[email protected]] HYD_spawn (../../../../../src/pm/i_hydra/libhydra/spawn/intel/hydra_spawn.c:128): execvp error on file PYTHONNOUSERSITE=1 (No such file or directory) ``` We should set `$PYTHONNOUSERSITE` to `1` some other way, and make sure its set *again* after the environment is reset in the sanity check step... cc @Flamefire
[ { "content": "##\n# Copyright 2009-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing Python, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)\n\"\"\"\nimport glob\nimport os\nimport re\nimport fileinput\nimport sys\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError, print_warning\nfrom easybuild.tools.config import build_option, log_path\nfrom easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version\nfrom easybuild.tools.filetools import apply_regex_substitutions, change_dir, mkdir\nfrom easybuild.tools.filetools import read_file, remove_dir, symlink, write_file\nfrom easybuild.tools.run import run_cmd\nfrom easybuild.tools.systemtools import get_shared_lib_ext\nimport easybuild.tools.toolchain as toolchain\n\n\nEXTS_FILTER_PYTHON_PACKAGES = ('PYTHONNOUSERSITE=1 python -c \"import %(ext_name)s\"', \"\")\n\n# magic value for unlimited stack size\nUNLIMITED = 'unlimited'\n\nEBPYTHONPREFIXES = 'EBPYTHONPREFIXES'\n\nSITECUSTOMIZE = \"\"\"\n# sitecustomize.py script installed by EasyBuild,\n# to support picking up Python packages which were installed\n# for multiple Python versions in the same directory\n\nimport os\nimport site\nimport sys\n\n# print debug messages when $EBPYTHONPREFIXES_DEBUG is defined\ndebug = os.getenv('%(EBPYTHONPREFIXES)s_DEBUG')\n\n# use prefixes from $EBPYTHONPREFIXES, so they have lower priority than\n# virtualenv-installed packages, unlike $PYTHONPATH\n\nebpythonprefixes = os.getenv('%(EBPYTHONPREFIXES)s')\n\nif ebpythonprefixes:\n postfix = os.path.join('lib', 'python'+'.'.join(map(str,sys.version_info[:2])), 'site-packages')\n if debug:\n print(\"[%(EBPYTHONPREFIXES)s] postfix subdirectory to consider in installation directories: %%s\" %% postfix)\n\n for prefix in ebpythonprefixes.split(os.pathsep):\n if debug:\n print(\"[%(EBPYTHONPREFIXES)s] prefix: %%s\" %% prefix)\n sitedir = os.path.join(prefix, postfix)\n if os.path.isdir(sitedir):\n if debug:\n print(\"[%(EBPYTHONPREFIXES)s] adding site dir: %%s\" %% sitedir)\n site.addsitedir(sitedir)\n\"\"\" % {'EBPYTHONPREFIXES': EBPYTHONPREFIXES}\n\n\nclass EB_Python(ConfigureMake):\n \"\"\"Support for building/installing Python\n - default configure/build_step/make install works fine\n\n To extend Python by adding extra packages there are two ways:\n - list the packages in the exts_list, this will include the packages in this Python installation\n - create a seperate easyblock, so the packages can be loaded with module load\n\n e.g., you can include numpy and scipy in a default Python installation\n but also provide newer updated numpy and scipy versions by creating a PythonPackage-derived easyblock for it.\n \"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Add extra config options specific to Python.\"\"\"\n extra_vars = {\n 'ebpythonprefixes': [True, \"Create sitecustomize.py and allow use of $EBPYTHONPREFIXES\", CUSTOM],\n 'optimized': [True, \"Build with expensive, stable optimizations (PGO, etc.) (version >= 3.5.4)\", CUSTOM],\n 'ulimit_unlimited': [False, \"Ensure stack size limit is set to '%s' during build\" % UNLIMITED, CUSTOM],\n 'use_lto': [None, \"Build with Link Time Optimization (>= v3.7.0, potentially unstable on some toolchains). \"\n \"If None: auto-detect based on toolchain compiler (version)\", CUSTOM],\n }\n return ConfigureMake.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor for Python easyblock.\"\"\"\n super(EB_Python, self).__init__(*args, **kwargs)\n\n self.pyshortver = '.'.join(self.version.split('.')[:2])\n\n self.pythonpath = None\n if self.cfg['ebpythonprefixes']:\n easybuild_subdir = log_path()\n self.pythonpath = os.path.join(easybuild_subdir, 'python')\n\n def patch_step(self, *args, **kwargs):\n \"\"\"\n Custom patch step for Python:\n * patch setup.py when --sysroot EasyBuild configuration setting is used\n \"\"\"\n\n super(EB_Python, self).patch_step(*args, **kwargs)\n\n # if we're installing Python with an alternate sysroot,\n # we need to patch setup.py which includes hardcoded paths like /usr/include and /lib64;\n # this fixes problems like not being able to build the _ssl module (\"Could not build the ssl module\")\n sysroot = build_option('sysroot')\n if sysroot:\n sysroot_inc_dirs, sysroot_lib_dirs = [], []\n\n for pattern in ['include*', os.path.join('usr', 'include*')]:\n sysroot_inc_dirs.extend(glob.glob(os.path.join(sysroot, pattern)))\n\n if sysroot_inc_dirs:\n sysroot_inc_dirs = ', '.join([\"'%s'\" % x for x in sysroot_inc_dirs])\n else:\n raise EasyBuildError(\"No include directories found in sysroot %s!\", sysroot)\n\n for pattern in ['lib*', os.path.join('usr', 'lib*')]:\n sysroot_lib_dirs.extend(glob.glob(os.path.join(sysroot, pattern)))\n\n if sysroot_lib_dirs:\n sysroot_lib_dirs = ', '.join([\"'%s'\" % x for x in sysroot_lib_dirs])\n else:\n raise EasyBuildError(\"No lib directories found in sysroot %s!\", sysroot)\n\n setup_py_fn = 'setup.py'\n setup_py_txt = read_file(setup_py_fn)\n\n # newer Python versions (3.6+) have refactored code, requires different patching approach\n if \"system_include_dirs = \" in setup_py_txt:\n regex_subs = [\n (r\"(system_include_dirs = \\[).*\\]\", r\"\\1%s]\" % sysroot_inc_dirs),\n (r\"(system_lib_dirs = \\[).*\\]\", r\"\\1%s]\" % sysroot_lib_dirs),\n ]\n else:\n regex_subs = [\n (r\"^([ ]+)'/usr/include',\", r\"\\1%s,\" % sysroot_inc_dirs),\n (r\"\\['/usr/include'\\]\", r\"[%s]\" % sysroot_inc_dirs),\n (r\"^([ ]+)'/lib64', '/usr/lib64',\", r\"\\1%s,\" % sysroot_lib_dirs),\n (r\"^[ ]+'/lib', '/usr/lib',\", ''),\n ]\n\n # Replace remaining hardcoded paths like '/usr/include', '/usr/lib' or '/usr/local',\n # where these paths are appearing inside single quotes (').\n # Inject sysroot in front to avoid picking up anything outside of sysroot,\n # We can leverage the single quotes such that we do not accidentally fiddle with other entries,\n # like /prefix/usr/include .\n for usr_subdir in ('usr/include', 'usr/lib', 'usr/local'):\n sysroot_usr_subdir = os.path.join(sysroot, usr_subdir)\n regex_subs.append((r\"'/%s\" % usr_subdir, r\"'%s\" % sysroot_usr_subdir))\n regex_subs.append((r'\"/%s' % usr_subdir, r'\"%s' % sysroot_usr_subdir))\n\n apply_regex_substitutions(setup_py_fn, regex_subs)\n\n def prepare_for_extensions(self):\n \"\"\"\n Set default class and filter for Python packages\n \"\"\"\n # build and install additional packages with PythonPackage easyblock\n self.cfg['exts_defaultclass'] = \"PythonPackage\"\n self.cfg['exts_filter'] = EXTS_FILTER_PYTHON_PACKAGES\n\n # don't pass down any build/install options that may have been specified\n # 'make' options do not make sense for when building/installing Python libraries (usually via 'python setup.py')\n msg = \"Unsetting '%s' easyconfig parameter before building/installing extensions: %s\"\n for param in ['buildopts', 'installopts']:\n if self.cfg[param]:\n self.log.debug(msg, param, self.cfg[param])\n self.cfg[param] = ''\n\n def auto_detect_lto_support(self):\n \"\"\"Return True, if LTO should be enabled for current toolchain\"\"\"\n result = False\n # GCC >= 8 should be stable enough for LTO\n if self.toolchain.comp_family() == toolchain.GCC:\n gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')\n if gcc_ver and LooseVersion(gcc_ver) >= LooseVersion('8.0'):\n self.log.info(\"Auto-enabling LTO since GCC >= v8.0 is used as toolchain compiler\")\n result = True\n return result\n\n def configure_step(self):\n \"\"\"Set extra configure options.\"\"\"\n\n # Check for and report distutils user configs which may make the installation fail\n # See https://github.com/easybuilders/easybuild-easyconfigs/issues/11009\n for cfg in [os.path.join(os.path.expanduser('~'), name) for name in ('.pydistutils.cfg', 'pydistutils.cfg')]:\n if os.path.exists(cfg):\n raise EasyBuildError(\"Legacy distutils user configuration file found at %s. Aborting.\", cfg)\n\n self.cfg.update('configopts', \"--enable-shared\")\n\n # Explicitely enable thread support on < 3.7 (always on 3.7+)\n if LooseVersion(self.version) < LooseVersion('3.7'):\n self.cfg.update('configopts', \"--with-threads\")\n\n # Explicitely enable unicode on Python 2, always on for Python 3\n # Need to be careful to match the unicode settings to the underlying python\n if LooseVersion(self.version) < LooseVersion('3.0'):\n if sys.maxunicode == 1114111:\n self.cfg.update('configopts', \"--enable-unicode=ucs4\")\n elif sys.maxunicode == 65535:\n self.cfg.update('configopts', \"--enable-unicode=ucs2\")\n else:\n raise EasyBuildError(\"Unknown maxunicode value for your python: %d\" % sys.maxunicode)\n\n # LTO introduced in 3.7.0\n if LooseVersion(self.version) >= LooseVersion('3.7.0'):\n use_lto = self.cfg['use_lto']\n if use_lto is None:\n use_lto = self.auto_detect_lto_support()\n if use_lto:\n self.cfg.update('configopts', \"--with-lto\")\n\n # Enable further optimizations at the cost of a longer build\n # Introduced in 3.5.3, fixed in 3.5.4: https://docs.python.org/3.5/whatsnew/changelog.html\n if self.cfg['optimized'] and LooseVersion(self.version) >= LooseVersion('3.5.4'):\n # only configure with --enable-optimizations when compiling Python with (recent) GCC compiler\n if self.toolchain.comp_family() == toolchain.GCC:\n gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')\n if LooseVersion(gcc_ver) >= LooseVersion('8.0'):\n self.cfg.update('configopts', \"--enable-optimizations\")\n\n # Pip is included since 3.4 via ensurepip https://docs.python.org/3.4/whatsnew/changelog.html\n if LooseVersion(self.version) >= LooseVersion('3.4.0'):\n # Default, but do it explicitly\n self.cfg.update('configopts', \"--with-ensurepip=upgrade\")\n\n modules_setup = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup')\n if LooseVersion(self.version) < LooseVersion('3.8.0'):\n modules_setup += '.dist'\n\n libreadline = get_software_root('libreadline')\n if libreadline:\n ncurses = get_software_root('ncurses')\n if ncurses:\n readline_libdir = get_software_libdir('libreadline')\n ncurses_libdir = get_software_libdir('ncurses')\n readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a')\n ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a')\n readline = \"readline readline.c %s %s\" % (readline_static_lib, ncurses_static_lib)\n for line in fileinput.input(modules_setup, inplace='1', backup='.readline'):\n line = re.sub(r\"^#readline readline.c.*\", readline, line)\n sys.stdout.write(line)\n else:\n raise EasyBuildError(\"Both libreadline and ncurses are required to ensure readline support\")\n\n openssl = get_software_root('OpenSSL')\n if openssl:\n for line in fileinput.input(modules_setup, inplace='1', backup='.ssl'):\n line = re.sub(r\"^#SSL=.*\", \"SSL=%s\" % openssl, line)\n line = re.sub(r\"^#(\\s*-DUSE_SSL -I)\", r\"\\1\", line)\n line = re.sub(r\"^#(\\s*-L\\$\\(SSL\\)/lib )\", r\"\\1 -L$(SSL)/lib64 \", line)\n sys.stdout.write(line)\n\n tcl = get_software_root('Tcl')\n tk = get_software_root('Tk')\n if tcl and tk:\n tclver = get_software_version('Tcl')\n tkver = get_software_version('Tk')\n tcltk_maj_min_ver = '.'.join(tclver.split('.')[:2])\n if tcltk_maj_min_ver != '.'.join(tkver.split('.')[:2]):\n raise EasyBuildError(\"Tcl and Tk major/minor versions don't match: %s vs %s\", tclver, tkver)\n\n self.cfg.update('configopts', \"--with-tcltk-includes='-I%s/include -I%s/include'\" % (tcl, tk))\n\n tcl_libdir = os.path.join(tcl, get_software_libdir('Tcl'))\n tk_libdir = os.path.join(tk, get_software_libdir('Tk'))\n tcltk_libs = \"-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s\" % {\n 'tcl_libdir': tcl_libdir,\n 'tk_libdir': tk_libdir,\n 'maj_min_ver': tcltk_maj_min_ver,\n }\n self.cfg.update('configopts', \"--with-tcltk-libs='%s'\" % tcltk_libs)\n\n # don't add user site directory to sys.path (equivalent to python -s)\n # This matters e.g. when python installs the bundled pip & setuptools (for >= 3.4)\n env.setvar('PYTHONNOUSERSITE', '1', verbose=False)\n\n super(EB_Python, self).configure_step()\n\n def build_step(self, *args, **kwargs):\n \"\"\"Custom build procedure for Python, ensure stack size limit is set to 'unlimited' (if desired).\"\"\"\n\n # make sure installation directory doesn't already exist when building with --rpath and\n # configuring with --enable-optimizations, since that leads to errors like:\n # ./python: symbol lookup error: ./python: undefined symbol: __gcov_indirect_call\n # see also https://bugs.python.org/issue29712\n enable_opts_flag = '--enable-optimizations'\n if build_option('rpath') and enable_opts_flag in self.cfg['configopts']:\n if os.path.exists(self.installdir):\n warning_msg = \"Removing existing installation directory '%s', \"\n warning_msg += \"because EasyBuild is configured to use RPATH linking \"\n warning_msg += \"and %s configure option is used.\" % enable_opts_flag\n print_warning(warning_msg % self.installdir)\n remove_dir(self.installdir)\n\n if self.cfg['ulimit_unlimited']:\n # determine current stack size limit\n (out, _) = run_cmd(\"ulimit -s\")\n curr_ulimit_s = out.strip()\n\n # figure out hard limit for stack size limit;\n # this determines whether or not we can use \"ulimit -s unlimited\"\n (out, _) = run_cmd(\"ulimit -s -H\")\n max_ulimit_s = out.strip()\n\n if curr_ulimit_s == UNLIMITED:\n self.log.info(\"Current stack size limit is %s: OK\", curr_ulimit_s)\n elif max_ulimit_s == UNLIMITED:\n self.log.info(\"Current stack size limit is %s, setting it to %s for build...\",\n curr_ulimit_s, UNLIMITED)\n self.cfg.update('prebuildopts', \"ulimit -s %s && \" % UNLIMITED)\n else:\n msg = \"Current stack size limit is %s, and can not be set to %s due to hard limit of %s;\"\n msg += \" setting stack size limit to %s instead, \"\n msg += \" this may break part of the compilation (e.g. hashlib)...\"\n print_warning(msg % (curr_ulimit_s, UNLIMITED, max_ulimit_s, max_ulimit_s))\n self.cfg.update('prebuildopts', \"ulimit -s %s && \" % max_ulimit_s)\n\n super(EB_Python, self).build_step(*args, **kwargs)\n\n def install_step(self):\n \"\"\"Extend make install to make sure that the 'python' command is present.\"\"\"\n\n # avoid that pip (ab)uses $HOME/.cache/pip\n # cfr. https://pip.pypa.io/en/stable/reference/pip_install/#caching\n env.setvar('XDG_CACHE_HOME', tempfile.gettempdir())\n self.log.info(\"Using %s as pip cache directory\", os.environ['XDG_CACHE_HOME'])\n\n super(EB_Python, self).install_step()\n\n python_binary_path = os.path.join(self.installdir, 'bin', 'python')\n if not os.path.isfile(python_binary_path):\n symlink(python_binary_path + self.pyshortver, python_binary_path)\n\n if self.cfg['ebpythonprefixes']:\n write_file(os.path.join(self.installdir, self.pythonpath, 'sitecustomize.py'), SITECUSTOMIZE)\n\n # symlink lib/python*/lib-dynload to lib64/python*/lib-dynload if it doesn't exist;\n # see https://github.com/easybuilders/easybuild-easyblocks/issues/1957\n lib_dynload = 'lib-dynload'\n python_lib_dynload = os.path.join('python%s' % self.pyshortver, lib_dynload)\n lib_dynload_path = os.path.join(self.installdir, 'lib', python_lib_dynload)\n if not os.path.exists(lib_dynload_path):\n lib64_dynload_path = os.path.join('lib64', python_lib_dynload)\n if os.path.exists(os.path.join(self.installdir, lib64_dynload_path)):\n lib_dynload_parent = os.path.dirname(lib_dynload_path)\n mkdir(lib_dynload_parent, parents=True)\n cwd = change_dir(lib_dynload_parent)\n # use relative path as target, to avoid hardcoding path to install directory\n target_lib_dynload = os.path.join('..', '..', lib64_dynload_path)\n symlink(target_lib_dynload, lib_dynload)\n change_dir(cwd)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Python.\"\"\"\n\n shlib_ext = get_shared_lib_ext()\n\n try:\n fake_mod_data = self.load_fake_module()\n except EasyBuildError as err:\n raise EasyBuildError(\"Loading fake module failed: %s\", err)\n\n abiflags = ''\n if LooseVersion(self.version) >= LooseVersion(\"3\"):\n run_cmd(\"which python\", log_all=True, simple=False, trace=False)\n cmd = 'python -c \"import sysconfig; print(sysconfig.get_config_var(\\'abiflags\\'));\"'\n (abiflags, _) = run_cmd(cmd, log_all=True, simple=False, trace=False)\n if not abiflags:\n raise EasyBuildError(\"Failed to determine abiflags: %s\", abiflags)\n else:\n abiflags = abiflags.strip()\n\n # make sure hashlib is installed correctly, there should be no errors/output when 'import hashlib' is run\n # (python will exit with 0 regardless of whether or not errors are printed...)\n # cfr. https://github.com/easybuilders/easybuild-easyconfigs/issues/6484\n cmd = \"python -c 'import hashlib'\"\n (out, _) = run_cmd(cmd)\n regex = re.compile('error', re.I)\n if regex.search(out):\n raise EasyBuildError(\"Found one or more errors in output of %s: %s\", cmd, out)\n else:\n self.log.info(\"No errors found in output of %s: %s\", cmd, out)\n\n pyver = 'python' + self.pyshortver\n custom_paths = {\n 'files': [os.path.join('bin', pyver), os.path.join('lib', 'lib' + pyver + abiflags + '.' + shlib_ext)],\n 'dirs': [os.path.join('include', pyver + abiflags), os.path.join('lib', pyver, 'lib-dynload')],\n }\n\n # cleanup\n self.clean_up_fake_module(fake_mod_data)\n\n custom_commands = [\n \"python --version\",\n \"python -c 'import _ctypes'\", # make sure that foreign function interface (libffi) works\n \"python -c 'import _ssl'\", # make sure SSL support is enabled one way or another\n \"python -c 'import readline'\", # make sure readline support was built correctly\n ]\n\n if LooseVersion(self.version) >= LooseVersion('3.4.0'):\n # Check that pip and setuptools are installed\n custom_paths['files'].extend([\n os.path.join('bin', pip) for pip in ('pip', 'pip3', 'pip' + self.pyshortver)\n ])\n custom_commands.extend([\n \"python -c 'import pip'\",\n \"python -c 'import setuptools'\",\n ])\n\n if get_software_root('Tk'):\n # also check whether importing tkinter module works, name is different for Python v2.x and v3.x\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkinter = 'tkinter'\n else:\n tkinter = 'Tkinter'\n custom_commands.append(\"python -c 'import %s'\" % tkinter)\n\n # check whether _tkinter*.so is found, exact filename doesn't matter\n tkinter_so = os.path.join(self.installdir, 'lib', pyver, 'lib-dynload', '_tkinter*.' + shlib_ext)\n tkinter_so_hits = glob.glob(tkinter_so)\n if len(tkinter_so_hits) == 1:\n self.log.info(\"Found exactly one _tkinter*.so: %s\", tkinter_so_hits[0])\n else:\n raise EasyBuildError(\"Expected to find exactly one _tkinter*.so: %s\", tkinter_so_hits)\n\n super(EB_Python, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self, *args, **kwargs):\n \"\"\"Add path to sitecustomize.py to $PYTHONPATH\"\"\"\n txt = super(EB_Python, self).make_module_extra()\n\n if self.pythonpath:\n txt += self.module_generator.prepend_paths('PYTHONPATH', self.pythonpath)\n\n return txt\n", "path": "easybuild/easyblocks/p/python.py" } ]
[ { "content": "##\n# Copyright 2009-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing Python, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)\n\"\"\"\nimport glob\nimport os\nimport re\nimport fileinput\nimport sys\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError, print_warning\nfrom easybuild.tools.config import build_option, log_path\nfrom easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version\nfrom easybuild.tools.filetools import apply_regex_substitutions, change_dir, mkdir\nfrom easybuild.tools.filetools import read_file, remove_dir, symlink, write_file\nfrom easybuild.tools.run import run_cmd\nfrom easybuild.tools.systemtools import get_shared_lib_ext\nimport easybuild.tools.toolchain as toolchain\n\n\nEXTS_FILTER_PYTHON_PACKAGES = ('python -c \"import %(ext_name)s\"', \"\")\n\n# magic value for unlimited stack size\nUNLIMITED = 'unlimited'\n\nEBPYTHONPREFIXES = 'EBPYTHONPREFIXES'\n\nSITECUSTOMIZE = \"\"\"\n# sitecustomize.py script installed by EasyBuild,\n# to support picking up Python packages which were installed\n# for multiple Python versions in the same directory\n\nimport os\nimport site\nimport sys\n\n# print debug messages when $EBPYTHONPREFIXES_DEBUG is defined\ndebug = os.getenv('%(EBPYTHONPREFIXES)s_DEBUG')\n\n# use prefixes from $EBPYTHONPREFIXES, so they have lower priority than\n# virtualenv-installed packages, unlike $PYTHONPATH\n\nebpythonprefixes = os.getenv('%(EBPYTHONPREFIXES)s')\n\nif ebpythonprefixes:\n postfix = os.path.join('lib', 'python'+'.'.join(map(str,sys.version_info[:2])), 'site-packages')\n if debug:\n print(\"[%(EBPYTHONPREFIXES)s] postfix subdirectory to consider in installation directories: %%s\" %% postfix)\n\n for prefix in ebpythonprefixes.split(os.pathsep):\n if debug:\n print(\"[%(EBPYTHONPREFIXES)s] prefix: %%s\" %% prefix)\n sitedir = os.path.join(prefix, postfix)\n if os.path.isdir(sitedir):\n if debug:\n print(\"[%(EBPYTHONPREFIXES)s] adding site dir: %%s\" %% sitedir)\n site.addsitedir(sitedir)\n\"\"\" % {'EBPYTHONPREFIXES': EBPYTHONPREFIXES}\n\n\nclass EB_Python(ConfigureMake):\n \"\"\"Support for building/installing Python\n - default configure/build_step/make install works fine\n\n To extend Python by adding extra packages there are two ways:\n - list the packages in the exts_list, this will include the packages in this Python installation\n - create a seperate easyblock, so the packages can be loaded with module load\n\n e.g., you can include numpy and scipy in a default Python installation\n but also provide newer updated numpy and scipy versions by creating a PythonPackage-derived easyblock for it.\n \"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Add extra config options specific to Python.\"\"\"\n extra_vars = {\n 'ebpythonprefixes': [True, \"Create sitecustomize.py and allow use of $EBPYTHONPREFIXES\", CUSTOM],\n 'optimized': [True, \"Build with expensive, stable optimizations (PGO, etc.) (version >= 3.5.4)\", CUSTOM],\n 'ulimit_unlimited': [False, \"Ensure stack size limit is set to '%s' during build\" % UNLIMITED, CUSTOM],\n 'use_lto': [None, \"Build with Link Time Optimization (>= v3.7.0, potentially unstable on some toolchains). \"\n \"If None: auto-detect based on toolchain compiler (version)\", CUSTOM],\n }\n return ConfigureMake.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor for Python easyblock.\"\"\"\n super(EB_Python, self).__init__(*args, **kwargs)\n\n self.pyshortver = '.'.join(self.version.split('.')[:2])\n\n self.pythonpath = None\n if self.cfg['ebpythonprefixes']:\n easybuild_subdir = log_path()\n self.pythonpath = os.path.join(easybuild_subdir, 'python')\n\n def patch_step(self, *args, **kwargs):\n \"\"\"\n Custom patch step for Python:\n * patch setup.py when --sysroot EasyBuild configuration setting is used\n \"\"\"\n\n super(EB_Python, self).patch_step(*args, **kwargs)\n\n # if we're installing Python with an alternate sysroot,\n # we need to patch setup.py which includes hardcoded paths like /usr/include and /lib64;\n # this fixes problems like not being able to build the _ssl module (\"Could not build the ssl module\")\n sysroot = build_option('sysroot')\n if sysroot:\n sysroot_inc_dirs, sysroot_lib_dirs = [], []\n\n for pattern in ['include*', os.path.join('usr', 'include*')]:\n sysroot_inc_dirs.extend(glob.glob(os.path.join(sysroot, pattern)))\n\n if sysroot_inc_dirs:\n sysroot_inc_dirs = ', '.join([\"'%s'\" % x for x in sysroot_inc_dirs])\n else:\n raise EasyBuildError(\"No include directories found in sysroot %s!\", sysroot)\n\n for pattern in ['lib*', os.path.join('usr', 'lib*')]:\n sysroot_lib_dirs.extend(glob.glob(os.path.join(sysroot, pattern)))\n\n if sysroot_lib_dirs:\n sysroot_lib_dirs = ', '.join([\"'%s'\" % x for x in sysroot_lib_dirs])\n else:\n raise EasyBuildError(\"No lib directories found in sysroot %s!\", sysroot)\n\n setup_py_fn = 'setup.py'\n setup_py_txt = read_file(setup_py_fn)\n\n # newer Python versions (3.6+) have refactored code, requires different patching approach\n if \"system_include_dirs = \" in setup_py_txt:\n regex_subs = [\n (r\"(system_include_dirs = \\[).*\\]\", r\"\\1%s]\" % sysroot_inc_dirs),\n (r\"(system_lib_dirs = \\[).*\\]\", r\"\\1%s]\" % sysroot_lib_dirs),\n ]\n else:\n regex_subs = [\n (r\"^([ ]+)'/usr/include',\", r\"\\1%s,\" % sysroot_inc_dirs),\n (r\"\\['/usr/include'\\]\", r\"[%s]\" % sysroot_inc_dirs),\n (r\"^([ ]+)'/lib64', '/usr/lib64',\", r\"\\1%s,\" % sysroot_lib_dirs),\n (r\"^[ ]+'/lib', '/usr/lib',\", ''),\n ]\n\n # Replace remaining hardcoded paths like '/usr/include', '/usr/lib' or '/usr/local',\n # where these paths are appearing inside single quotes (').\n # Inject sysroot in front to avoid picking up anything outside of sysroot,\n # We can leverage the single quotes such that we do not accidentally fiddle with other entries,\n # like /prefix/usr/include .\n for usr_subdir in ('usr/include', 'usr/lib', 'usr/local'):\n sysroot_usr_subdir = os.path.join(sysroot, usr_subdir)\n regex_subs.append((r\"'/%s\" % usr_subdir, r\"'%s\" % sysroot_usr_subdir))\n regex_subs.append((r'\"/%s' % usr_subdir, r'\"%s' % sysroot_usr_subdir))\n\n apply_regex_substitutions(setup_py_fn, regex_subs)\n\n def prepare_for_extensions(self):\n \"\"\"\n Set default class and filter for Python packages\n \"\"\"\n # build and install additional packages with PythonPackage easyblock\n self.cfg['exts_defaultclass'] = \"PythonPackage\"\n self.cfg['exts_filter'] = EXTS_FILTER_PYTHON_PACKAGES\n\n # don't pass down any build/install options that may have been specified\n # 'make' options do not make sense for when building/installing Python libraries (usually via 'python setup.py')\n msg = \"Unsetting '%s' easyconfig parameter before building/installing extensions: %s\"\n for param in ['buildopts', 'installopts']:\n if self.cfg[param]:\n self.log.debug(msg, param, self.cfg[param])\n self.cfg[param] = ''\n\n def auto_detect_lto_support(self):\n \"\"\"Return True, if LTO should be enabled for current toolchain\"\"\"\n result = False\n # GCC >= 8 should be stable enough for LTO\n if self.toolchain.comp_family() == toolchain.GCC:\n gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')\n if gcc_ver and LooseVersion(gcc_ver) >= LooseVersion('8.0'):\n self.log.info(\"Auto-enabling LTO since GCC >= v8.0 is used as toolchain compiler\")\n result = True\n return result\n\n def configure_step(self):\n \"\"\"Set extra configure options.\"\"\"\n\n # Check for and report distutils user configs which may make the installation fail\n # See https://github.com/easybuilders/easybuild-easyconfigs/issues/11009\n for cfg in [os.path.join(os.path.expanduser('~'), name) for name in ('.pydistutils.cfg', 'pydistutils.cfg')]:\n if os.path.exists(cfg):\n raise EasyBuildError(\"Legacy distutils user configuration file found at %s. Aborting.\", cfg)\n\n self.cfg.update('configopts', \"--enable-shared\")\n\n # Explicitely enable thread support on < 3.7 (always on 3.7+)\n if LooseVersion(self.version) < LooseVersion('3.7'):\n self.cfg.update('configopts', \"--with-threads\")\n\n # Explicitely enable unicode on Python 2, always on for Python 3\n # Need to be careful to match the unicode settings to the underlying python\n if LooseVersion(self.version) < LooseVersion('3.0'):\n if sys.maxunicode == 1114111:\n self.cfg.update('configopts', \"--enable-unicode=ucs4\")\n elif sys.maxunicode == 65535:\n self.cfg.update('configopts', \"--enable-unicode=ucs2\")\n else:\n raise EasyBuildError(\"Unknown maxunicode value for your python: %d\" % sys.maxunicode)\n\n # LTO introduced in 3.7.0\n if LooseVersion(self.version) >= LooseVersion('3.7.0'):\n use_lto = self.cfg['use_lto']\n if use_lto is None:\n use_lto = self.auto_detect_lto_support()\n if use_lto:\n self.cfg.update('configopts', \"--with-lto\")\n\n # Enable further optimizations at the cost of a longer build\n # Introduced in 3.5.3, fixed in 3.5.4: https://docs.python.org/3.5/whatsnew/changelog.html\n if self.cfg['optimized'] and LooseVersion(self.version) >= LooseVersion('3.5.4'):\n # only configure with --enable-optimizations when compiling Python with (recent) GCC compiler\n if self.toolchain.comp_family() == toolchain.GCC:\n gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')\n if LooseVersion(gcc_ver) >= LooseVersion('8.0'):\n self.cfg.update('configopts', \"--enable-optimizations\")\n\n # Pip is included since 3.4 via ensurepip https://docs.python.org/3.4/whatsnew/changelog.html\n if LooseVersion(self.version) >= LooseVersion('3.4.0'):\n # Default, but do it explicitly\n self.cfg.update('configopts', \"--with-ensurepip=upgrade\")\n\n modules_setup = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup')\n if LooseVersion(self.version) < LooseVersion('3.8.0'):\n modules_setup += '.dist'\n\n libreadline = get_software_root('libreadline')\n if libreadline:\n ncurses = get_software_root('ncurses')\n if ncurses:\n readline_libdir = get_software_libdir('libreadline')\n ncurses_libdir = get_software_libdir('ncurses')\n readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a')\n ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a')\n readline = \"readline readline.c %s %s\" % (readline_static_lib, ncurses_static_lib)\n for line in fileinput.input(modules_setup, inplace='1', backup='.readline'):\n line = re.sub(r\"^#readline readline.c.*\", readline, line)\n sys.stdout.write(line)\n else:\n raise EasyBuildError(\"Both libreadline and ncurses are required to ensure readline support\")\n\n openssl = get_software_root('OpenSSL')\n if openssl:\n for line in fileinput.input(modules_setup, inplace='1', backup='.ssl'):\n line = re.sub(r\"^#SSL=.*\", \"SSL=%s\" % openssl, line)\n line = re.sub(r\"^#(\\s*-DUSE_SSL -I)\", r\"\\1\", line)\n line = re.sub(r\"^#(\\s*-L\\$\\(SSL\\)/lib )\", r\"\\1 -L$(SSL)/lib64 \", line)\n sys.stdout.write(line)\n\n tcl = get_software_root('Tcl')\n tk = get_software_root('Tk')\n if tcl and tk:\n tclver = get_software_version('Tcl')\n tkver = get_software_version('Tk')\n tcltk_maj_min_ver = '.'.join(tclver.split('.')[:2])\n if tcltk_maj_min_ver != '.'.join(tkver.split('.')[:2]):\n raise EasyBuildError(\"Tcl and Tk major/minor versions don't match: %s vs %s\", tclver, tkver)\n\n self.cfg.update('configopts', \"--with-tcltk-includes='-I%s/include -I%s/include'\" % (tcl, tk))\n\n tcl_libdir = os.path.join(tcl, get_software_libdir('Tcl'))\n tk_libdir = os.path.join(tk, get_software_libdir('Tk'))\n tcltk_libs = \"-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s\" % {\n 'tcl_libdir': tcl_libdir,\n 'tk_libdir': tk_libdir,\n 'maj_min_ver': tcltk_maj_min_ver,\n }\n self.cfg.update('configopts', \"--with-tcltk-libs='%s'\" % tcltk_libs)\n\n # don't add user site directory to sys.path (equivalent to python -s)\n # This matters e.g. when python installs the bundled pip & setuptools (for >= 3.4)\n env.setvar('PYTHONNOUSERSITE', '1', verbose=False)\n\n super(EB_Python, self).configure_step()\n\n def build_step(self, *args, **kwargs):\n \"\"\"Custom build procedure for Python, ensure stack size limit is set to 'unlimited' (if desired).\"\"\"\n\n # make sure installation directory doesn't already exist when building with --rpath and\n # configuring with --enable-optimizations, since that leads to errors like:\n # ./python: symbol lookup error: ./python: undefined symbol: __gcov_indirect_call\n # see also https://bugs.python.org/issue29712\n enable_opts_flag = '--enable-optimizations'\n if build_option('rpath') and enable_opts_flag in self.cfg['configopts']:\n if os.path.exists(self.installdir):\n warning_msg = \"Removing existing installation directory '%s', \"\n warning_msg += \"because EasyBuild is configured to use RPATH linking \"\n warning_msg += \"and %s configure option is used.\" % enable_opts_flag\n print_warning(warning_msg % self.installdir)\n remove_dir(self.installdir)\n\n if self.cfg['ulimit_unlimited']:\n # determine current stack size limit\n (out, _) = run_cmd(\"ulimit -s\")\n curr_ulimit_s = out.strip()\n\n # figure out hard limit for stack size limit;\n # this determines whether or not we can use \"ulimit -s unlimited\"\n (out, _) = run_cmd(\"ulimit -s -H\")\n max_ulimit_s = out.strip()\n\n if curr_ulimit_s == UNLIMITED:\n self.log.info(\"Current stack size limit is %s: OK\", curr_ulimit_s)\n elif max_ulimit_s == UNLIMITED:\n self.log.info(\"Current stack size limit is %s, setting it to %s for build...\",\n curr_ulimit_s, UNLIMITED)\n self.cfg.update('prebuildopts', \"ulimit -s %s && \" % UNLIMITED)\n else:\n msg = \"Current stack size limit is %s, and can not be set to %s due to hard limit of %s;\"\n msg += \" setting stack size limit to %s instead, \"\n msg += \" this may break part of the compilation (e.g. hashlib)...\"\n print_warning(msg % (curr_ulimit_s, UNLIMITED, max_ulimit_s, max_ulimit_s))\n self.cfg.update('prebuildopts', \"ulimit -s %s && \" % max_ulimit_s)\n\n super(EB_Python, self).build_step(*args, **kwargs)\n\n def install_step(self):\n \"\"\"Extend make install to make sure that the 'python' command is present.\"\"\"\n\n # avoid that pip (ab)uses $HOME/.cache/pip\n # cfr. https://pip.pypa.io/en/stable/reference/pip_install/#caching\n env.setvar('XDG_CACHE_HOME', tempfile.gettempdir())\n self.log.info(\"Using %s as pip cache directory\", os.environ['XDG_CACHE_HOME'])\n\n super(EB_Python, self).install_step()\n\n python_binary_path = os.path.join(self.installdir, 'bin', 'python')\n if not os.path.isfile(python_binary_path):\n symlink(python_binary_path + self.pyshortver, python_binary_path)\n\n if self.cfg['ebpythonprefixes']:\n write_file(os.path.join(self.installdir, self.pythonpath, 'sitecustomize.py'), SITECUSTOMIZE)\n\n # symlink lib/python*/lib-dynload to lib64/python*/lib-dynload if it doesn't exist;\n # see https://github.com/easybuilders/easybuild-easyblocks/issues/1957\n lib_dynload = 'lib-dynload'\n python_lib_dynload = os.path.join('python%s' % self.pyshortver, lib_dynload)\n lib_dynload_path = os.path.join(self.installdir, 'lib', python_lib_dynload)\n if not os.path.exists(lib_dynload_path):\n lib64_dynload_path = os.path.join('lib64', python_lib_dynload)\n if os.path.exists(os.path.join(self.installdir, lib64_dynload_path)):\n lib_dynload_parent = os.path.dirname(lib_dynload_path)\n mkdir(lib_dynload_parent, parents=True)\n cwd = change_dir(lib_dynload_parent)\n # use relative path as target, to avoid hardcoding path to install directory\n target_lib_dynload = os.path.join('..', '..', lib64_dynload_path)\n symlink(target_lib_dynload, lib_dynload)\n change_dir(cwd)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Python.\"\"\"\n\n shlib_ext = get_shared_lib_ext()\n\n try:\n fake_mod_data = self.load_fake_module()\n except EasyBuildError as err:\n raise EasyBuildError(\"Loading fake module failed: %s\", err)\n\n abiflags = ''\n if LooseVersion(self.version) >= LooseVersion(\"3\"):\n run_cmd(\"which python\", log_all=True, simple=False, trace=False)\n cmd = 'python -c \"import sysconfig; print(sysconfig.get_config_var(\\'abiflags\\'));\"'\n (abiflags, _) = run_cmd(cmd, log_all=True, simple=False, trace=False)\n if not abiflags:\n raise EasyBuildError(\"Failed to determine abiflags: %s\", abiflags)\n else:\n abiflags = abiflags.strip()\n\n # make sure hashlib is installed correctly, there should be no errors/output when 'import hashlib' is run\n # (python will exit with 0 regardless of whether or not errors are printed...)\n # cfr. https://github.com/easybuilders/easybuild-easyconfigs/issues/6484\n cmd = \"python -c 'import hashlib'\"\n (out, _) = run_cmd(cmd)\n regex = re.compile('error', re.I)\n if regex.search(out):\n raise EasyBuildError(\"Found one or more errors in output of %s: %s\", cmd, out)\n else:\n self.log.info(\"No errors found in output of %s: %s\", cmd, out)\n\n pyver = 'python' + self.pyshortver\n custom_paths = {\n 'files': [os.path.join('bin', pyver), os.path.join('lib', 'lib' + pyver + abiflags + '.' + shlib_ext)],\n 'dirs': [os.path.join('include', pyver + abiflags), os.path.join('lib', pyver, 'lib-dynload')],\n }\n\n # cleanup\n self.clean_up_fake_module(fake_mod_data)\n\n custom_commands = [\n \"python --version\",\n \"python -c 'import _ctypes'\", # make sure that foreign function interface (libffi) works\n \"python -c 'import _ssl'\", # make sure SSL support is enabled one way or another\n \"python -c 'import readline'\", # make sure readline support was built correctly\n ]\n\n if LooseVersion(self.version) >= LooseVersion('3.4.0'):\n # Check that pip and setuptools are installed\n custom_paths['files'].extend([\n os.path.join('bin', pip) for pip in ('pip', 'pip3', 'pip' + self.pyshortver)\n ])\n custom_commands.extend([\n \"python -c 'import pip'\",\n \"python -c 'import setuptools'\",\n ])\n\n if get_software_root('Tk'):\n # also check whether importing tkinter module works, name is different for Python v2.x and v3.x\n if LooseVersion(self.version) >= LooseVersion('3'):\n tkinter = 'tkinter'\n else:\n tkinter = 'Tkinter'\n custom_commands.append(\"python -c 'import %s'\" % tkinter)\n\n # check whether _tkinter*.so is found, exact filename doesn't matter\n tkinter_so = os.path.join(self.installdir, 'lib', pyver, 'lib-dynload', '_tkinter*.' + shlib_ext)\n tkinter_so_hits = glob.glob(tkinter_so)\n if len(tkinter_so_hits) == 1:\n self.log.info(\"Found exactly one _tkinter*.so: %s\", tkinter_so_hits[0])\n else:\n raise EasyBuildError(\"Expected to find exactly one _tkinter*.so: %s\", tkinter_so_hits)\n\n super(EB_Python, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self, *args, **kwargs):\n \"\"\"Add path to sitecustomize.py to $PYTHONPATH\"\"\"\n txt = super(EB_Python, self).make_module_extra()\n\n if self.pythonpath:\n txt += self.module_generator.prepend_paths('PYTHONPATH', self.pythonpath)\n\n return txt\n", "path": "easybuild/easyblocks/p/python.py" } ]
diff --git a/easybuild/easyblocks/p/python.py b/easybuild/easyblocks/p/python.py index 64a2774642a..a167cc1b1fe 100644 --- a/easybuild/easyblocks/p/python.py +++ b/easybuild/easyblocks/p/python.py @@ -53,7 +53,7 @@ import easybuild.tools.toolchain as toolchain -EXTS_FILTER_PYTHON_PACKAGES = ('PYTHONNOUSERSITE=1 python -c "import %(ext_name)s"', "") +EXTS_FILTER_PYTHON_PACKAGES = ('python -c "import %(ext_name)s"', "") # magic value for unlimited stack size UNLIMITED = 'unlimited'
aws__aws-sdk-pandas-2799
Question regarding wr.athena.to_iceberg I am having a hard time trying to figure where the to_iceberg method tries to create and subsequently destroy the temporary table needed for the INSERT INTO … SELECT statement. `wr.athena.to_iceberg(df=data, index=True, database=os.getenv("GLUE_DATABASE"), table=os.getenv("GLUE_TABLE").lower(), merge_cols=[time_measure], workgroup=athena_workgroup, encryption="SSE-KMS", kms_key=kms_key)` This is how I am using the method. I have set up the IAM role to have necessary IAM permissions on the destination bucket, the corresponding glue db and table, as well as LakeFormation permissions on said db and table. The code still raises the following exceptions. `botocore.errorfactory.AccessDeniedException: An error occurred (AccessDeniedException) when calling the GetTable operation: Insufficient Lake Formation permission(s) on temp_table_dca47e409f4a494781e27ea08cc1f74c` `botocore.errorfactory.AccessDeniedException: An error occurred (AccessDeniedException) when calling the DeleteTable operation: Insufficient Lake Formation permission(s): Required Drop on temp_table_dca47e409f4a494781e27ea08cc1f74c` I was wondering, could it be that the temp tables are created in a different db on which I did not set enough LakeFormation permissions? I tried adding LakeFormation permissions to the relevant IAM role even on default db, but nothing changed. Any help is appreciated!
[ { "content": "\"\"\"Amazon Athena Module containing all to_* write functions.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport re\nimport typing\nimport uuid\nfrom typing import Any, Dict, Literal, TypedDict, cast\n\nimport boto3\nimport pandas as pd\n\nfrom awswrangler import _data_types, _utils, catalog, exceptions, s3\nfrom awswrangler._config import apply_configs\nfrom awswrangler.athena._executions import wait_query\nfrom awswrangler.athena._utils import (\n _get_workgroup_config,\n _start_query_execution,\n _WorkGroupConfig,\n)\nfrom awswrangler.typing import GlueTableSettings\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef _create_iceberg_table(\n df: pd.DataFrame,\n database: str,\n table: str,\n path: str | None,\n wg_config: _WorkGroupConfig,\n partition_cols: list[str] | None,\n additional_table_properties: dict[str, Any] | None,\n index: bool = False,\n data_source: str | None = None,\n workgroup: str | None = None,\n s3_output: str | None = None,\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n dtype: dict[str, str] | None = None,\n columns_comments: dict[str, Any] | None = None,\n) -> None:\n if not path:\n raise exceptions.InvalidArgumentValue(\"Must specify table location to create the table.\")\n\n columns_types, _ = catalog.extract_athena_types(df=df, index=index, dtype=dtype)\n cols_str: str = \", \".join(\n [\n f\"{k} {v}\"\n if (columns_comments is None or columns_comments.get(k) is None)\n else f\"{k} {v} COMMENT '{columns_comments[k]}'\"\n for k, v in columns_types.items()\n ]\n )\n partition_cols_str: str = f\"PARTITIONED BY ({', '.join([col for col in partition_cols])})\" if partition_cols else \"\"\n table_properties_str: str = (\n \", \" + \", \".join([f\"'{key}'='{value}'\" for key, value in additional_table_properties.items()])\n if additional_table_properties\n else \"\"\n )\n\n create_sql: str = (\n f\"CREATE TABLE IF NOT EXISTS `{table}` ({cols_str}) \"\n f\"{partition_cols_str} \"\n f\"LOCATION '{path}' \"\n f\"TBLPROPERTIES ('table_type' ='ICEBERG', 'format'='parquet'{table_properties_str})\"\n )\n\n query_execution_id: str = _start_query_execution(\n sql=create_sql,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n\nclass _SchemaChanges(TypedDict):\n new_columns: dict[str, str]\n modified_columns: dict[str, str]\n missing_columns: dict[str, str]\n\n\ndef _determine_differences(\n df: pd.DataFrame,\n database: str,\n table: str,\n index: bool,\n partition_cols: list[str] | None,\n boto3_session: boto3.Session | None,\n dtype: dict[str, str] | None,\n catalog_id: str | None,\n) -> tuple[_SchemaChanges, list[str]]:\n if partition_cols:\n # Remove columns using partition transform function,\n # as they won't be found in the DataFrame or the Glue catalog.\n # Examples include day(column_name) and truncate(10, column_name).\n pattern = r\"[A-Za-z0-9_]+\\(.+\\)\"\n partition_cols = [col for col in partition_cols if re.match(pattern, col) is None]\n\n frame_columns_types, frame_partitions_types = _data_types.athena_types_from_pandas_partitioned(\n df=df, index=index, partition_cols=partition_cols, dtype=dtype\n )\n frame_columns_types.update(frame_partitions_types)\n\n # lowercase DataFrame columns, as all the column names from Athena will be lowercased\n frame_columns_types = {k.lower(): v for k, v in frame_columns_types.items()}\n\n catalog_column_types = typing.cast(\n Dict[str, str],\n catalog.get_table_types(database=database, table=table, catalog_id=catalog_id, boto3_session=boto3_session),\n )\n\n original_column_names = set(catalog_column_types)\n new_column_names = set(frame_columns_types)\n\n new_columns = {col: frame_columns_types[col] for col in new_column_names - original_column_names}\n missing_columns = {col: catalog_column_types[col] for col in original_column_names - new_column_names}\n\n columns_to_change = [\n col\n for col in original_column_names.intersection(new_column_names)\n if frame_columns_types[col] != catalog_column_types[col]\n ]\n modified_columns = {col: frame_columns_types[col] for col in columns_to_change}\n\n return (\n _SchemaChanges(new_columns=new_columns, modified_columns=modified_columns, missing_columns=missing_columns),\n [key for key in catalog_column_types],\n )\n\n\ndef _alter_iceberg_table(\n database: str,\n table: str,\n schema_changes: _SchemaChanges,\n fill_missing_columns_in_df: bool,\n wg_config: _WorkGroupConfig,\n data_source: str | None = None,\n workgroup: str | None = None,\n s3_output: str | None = None,\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n) -> None:\n sql_statements: list[str] = []\n\n if schema_changes[\"new_columns\"]:\n sql_statements += _alter_iceberg_table_add_columns_sql(\n table=table,\n columns_to_add=schema_changes[\"new_columns\"],\n )\n\n if schema_changes[\"modified_columns\"]:\n sql_statements += _alter_iceberg_table_change_columns_sql(\n table=table,\n columns_to_change=schema_changes[\"modified_columns\"],\n )\n\n if schema_changes[\"missing_columns\"] and not fill_missing_columns_in_df:\n raise exceptions.InvalidArgumentCombination(\n f\"Dropping columns of Iceberg tables is not supported: {schema_changes['missing_columns']}. \"\n \"Please use `fill_missing_columns_in_df=True` to fill missing columns with N/A.\"\n )\n\n for statement in sql_statements:\n query_execution_id: str = _start_query_execution(\n sql=statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n\ndef _alter_iceberg_table_add_columns_sql(\n table: str,\n columns_to_add: dict[str, str],\n) -> list[str]:\n add_cols_str = \", \".join([f\"{col_name} {columns_to_add[col_name]}\" for col_name in columns_to_add])\n\n return [f\"ALTER TABLE {table} ADD COLUMNS ({add_cols_str})\"]\n\n\ndef _alter_iceberg_table_change_columns_sql(\n table: str,\n columns_to_change: dict[str, str],\n) -> list[str]:\n sql_statements = []\n\n for col_name, col_type in columns_to_change.items():\n sql_statements.append(f\"ALTER TABLE {table} CHANGE COLUMN {col_name} {col_name} {col_type}\")\n\n return sql_statements\n\n\ndef _validate_args(\n df: pd.DataFrame,\n temp_path: str | None,\n wg_config: _WorkGroupConfig,\n mode: Literal[\"append\", \"overwrite\", \"overwrite_partitions\"],\n partition_cols: list[str] | None,\n merge_cols: list[str] | None,\n) -> None:\n if df.empty is True:\n raise exceptions.EmptyDataFrame(\"DataFrame cannot be empty.\")\n\n if not temp_path and not wg_config.s3_output:\n raise exceptions.InvalidArgumentCombination(\n \"Either path or workgroup path must be specified to store the temporary results.\"\n )\n\n if mode == \"overwrite_partitions\":\n if not partition_cols:\n raise exceptions.InvalidArgumentCombination(\n \"When mode is 'overwrite_partitions' partition_cols must be specified.\"\n )\n if merge_cols:\n raise exceptions.InvalidArgumentCombination(\n \"When mode is 'overwrite_partitions' merge_cols must not be specified.\"\n )\n\n\n@apply_configs\n@_utils.validate_distributed_kwargs(\n unsupported_kwargs=[\"boto3_session\", \"s3_additional_kwargs\"],\n)\ndef to_iceberg(\n df: pd.DataFrame,\n database: str,\n table: str,\n temp_path: str | None = None,\n index: bool = False,\n table_location: str | None = None,\n partition_cols: list[str] | None = None,\n merge_cols: list[str] | None = None,\n keep_files: bool = True,\n data_source: str | None = None,\n s3_output: str | None = None,\n workgroup: str = \"primary\",\n mode: Literal[\"append\", \"overwrite\", \"overwrite_partitions\"] = \"append\",\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n s3_additional_kwargs: dict[str, Any] | None = None,\n additional_table_properties: dict[str, Any] | None = None,\n dtype: dict[str, str] | None = None,\n catalog_id: str | None = None,\n schema_evolution: bool = False,\n fill_missing_columns_in_df: bool = True,\n glue_table_settings: GlueTableSettings | None = None,\n) -> None:\n \"\"\"\n Insert into Athena Iceberg table using INSERT INTO ... SELECT. Will create Iceberg table if it does not exist.\n\n Creates temporary external table, writes staged files and inserts via INSERT INTO ... SELECT.\n\n Parameters\n ----------\n df : pd.DataFrame\n Pandas DataFrame.\n database : str\n AWS Glue/Athena database name - It is only the origin database from where the query will be launched.\n You can still using and mixing several databases writing the full table name within the sql\n (e.g. `database.table`).\n table : str\n AWS Glue/Athena table name.\n temp_path : str\n Amazon S3 location to store temporary results. Workgroup config will be used if not provided.\n index: bool\n Should consider the DataFrame index as a column?.\n table_location : str, optional\n Amazon S3 location for the table. Will only be used to create a new table if it does not exist.\n partition_cols: List[str], optional\n List of column names that will be used to create partitions, including support for transform\n functions (e.g. \"day(ts)\").\n\n https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html#querying-iceberg-partitioning\n merge_cols: List[str], optional\n List of column names that will be used for conditional inserts and updates.\n\n https://docs.aws.amazon.com/athena/latest/ug/merge-into-statement.html\n keep_files : bool\n Whether staging files produced by Athena are retained. 'True' by default.\n data_source : str, optional\n Data Source / Catalog name. If None, 'AwsDataCatalog' will be used by default.\n s3_output : str, optional\n Amazon S3 path used for query execution.\n workgroup : str\n Athena workgroup. Primary by default.\n mode: str\n ``append`` (default), ``overwrite``, ``overwrite_partitions``.\n encryption : str, optional\n Valid values: [None, 'SSE_S3', 'SSE_KMS']. Notice: 'CSE_KMS' is not supported.\n kms_key : str, optional\n For SSE-KMS, this is the KMS key ARN or ID.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs: dict[str, Any], optional\n Forwarded to botocore requests.\n e.g. s3_additional_kwargs={'RequestPayer': 'requester'}\n additional_table_properties: dict[str, Any], optional\n Additional table properties.\n e.g. additional_table_properties={'write_target_data_file_size_bytes': '536870912'}\n\n https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html#querying-iceberg-table-properties\n dtype: dict[str, str], optional\n Dictionary of columns names and Athena/Glue types to be casted.\n Useful when you have columns with undetermined or mixed data types.\n e.g. {'col name': 'bigint', 'col2 name': 'int'}\n catalog_id : str, optional\n The ID of the Data Catalog from which to retrieve Databases.\n If none is provided, the AWS account ID is used by default\n schema_evolution: bool, optional\n If ``True`` allows schema evolution for new columns or changes in column types.\n Columns missing from the DataFrame that are present in the Iceberg schema\n will throw an error unless ``fill_missing_columns_in_df`` is set to ``True``.\n Default is ``False``.\n fill_missing_columns_in_df: bool, optional\n If ``True``, fill columns that was missing in the DataFrame with ``NULL`` values.\n Default is ``True``.\n columns_comments: GlueTableSettings, optional\n Glue/Athena catalog: Settings for writing to the Glue table.\n Currently only the 'columns_comments' attribute is supported for this function.\n Columns comments can only be added with this function when creating a new table.\n\n Returns\n -------\n None\n\n Examples\n --------\n Insert into an existing Iceberg table\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.athena.to_iceberg(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... database='my_database',\n ... table='my_table',\n ... temp_path='s3://bucket/temp/',\n ... )\n\n Create Iceberg table and insert data (table doesn't exist, requires table_location)\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.athena.to_iceberg(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... database='my_database',\n ... table='my_table2',\n ... table_location='s3://bucket/my_table2/',\n ... temp_path='s3://bucket/temp/',\n ... )\n\n \"\"\"\n wg_config: _WorkGroupConfig = _get_workgroup_config(session=boto3_session, workgroup=workgroup)\n temp_table: str = f\"temp_table_{uuid.uuid4().hex}\"\n\n _validate_args(\n df=df,\n temp_path=temp_path,\n wg_config=wg_config,\n mode=mode,\n partition_cols=partition_cols,\n merge_cols=merge_cols,\n )\n\n glue_table_settings = cast(\n GlueTableSettings,\n glue_table_settings if glue_table_settings else {},\n )\n\n try:\n # Create Iceberg table if it doesn't exist\n if not catalog.does_table_exist(\n database=database, table=table, boto3_session=boto3_session, catalog_id=catalog_id\n ):\n _create_iceberg_table(\n df=df,\n database=database,\n table=table,\n path=table_location,\n wg_config=wg_config,\n partition_cols=partition_cols,\n additional_table_properties=additional_table_properties,\n index=index,\n data_source=data_source,\n workgroup=workgroup,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n dtype=dtype,\n columns_comments=glue_table_settings.get(\"columns_comments\"),\n )\n else:\n schema_differences, catalog_cols = _determine_differences(\n df=df,\n database=database,\n table=table,\n index=index,\n partition_cols=partition_cols,\n boto3_session=boto3_session,\n dtype=dtype,\n catalog_id=catalog_id,\n )\n\n # Add missing columns to the DataFrame\n if fill_missing_columns_in_df and schema_differences[\"missing_columns\"]:\n for col_name, col_type in schema_differences[\"missing_columns\"].items():\n df[col_name] = None\n df[col_name] = df[col_name].astype(_data_types.athena2pandas(col_type))\n\n schema_differences[\"missing_columns\"] = {}\n\n # Ensure that the ordering of the DF is the same as in the catalog.\n # This is required for the INSERT command to work.\n df = df[catalog_cols]\n\n if schema_evolution is False and any([schema_differences[x] for x in schema_differences]): # type: ignore[literal-required]\n raise exceptions.InvalidArgumentValue(f\"Schema change detected: {schema_differences}\")\n\n _alter_iceberg_table(\n database=database,\n table=table,\n schema_changes=schema_differences,\n fill_missing_columns_in_df=fill_missing_columns_in_df,\n wg_config=wg_config,\n data_source=data_source,\n workgroup=workgroup,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n\n # if mode == \"overwrite_partitions\", drop matched partitions\n if mode == \"overwrite_partitions\":\n delete_from_iceberg_table(\n df=df,\n database=database,\n table=table,\n merge_cols=partition_cols, # type: ignore[arg-type]\n temp_path=temp_path,\n keep_files=False,\n data_source=data_source,\n workgroup=workgroup,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n catalog_id=catalog_id,\n )\n # if mode == \"overwrite\", delete whole data from table (but not table itself)\n elif mode == \"overwrite\":\n delete_sql_statement = f\"DELETE FROM {table}\"\n delete_query_execution_id: str = _start_query_execution(\n sql=delete_sql_statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=delete_query_execution_id, boto3_session=boto3_session)\n\n # Create temporary external table, write the results\n s3.to_parquet(\n df=df,\n path=temp_path or wg_config.s3_output,\n dataset=True,\n database=database,\n table=temp_table,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n dtype=dtype,\n catalog_id=catalog_id,\n glue_table_settings=glue_table_settings,\n )\n\n # Insert or merge into Iceberg table\n sql_statement: str\n if merge_cols:\n sql_statement = f\"\"\"\n MERGE INTO \"{database}\".\"{table}\" target\n USING \"{database}\".\"{temp_table}\" source\n ON {' AND '.join([f'target.\"{x}\" = source.\"{x}\"' for x in merge_cols])}\n WHEN MATCHED THEN\n UPDATE SET {', '.join([f'\"{x}\" = source.\"{x}\"' for x in df.columns])}\n WHEN NOT MATCHED THEN\n INSERT ({', '.join([f'\"{x}\"' for x in df.columns])})\n VALUES ({', '.join([f'source.\"{x}\"' for x in df.columns])})\n \"\"\"\n else:\n sql_statement = f\"\"\"\n INSERT INTO \"{database}\".\"{table}\" ({', '.join([f'\"{x}\"' for x in df.columns])})\n SELECT {', '.join([f'\"{x}\"' for x in df.columns])}\n FROM \"{database}\".\"{temp_table}\"\n \"\"\"\n\n query_execution_id: str = _start_query_execution(\n sql=sql_statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n except Exception as ex:\n _logger.error(ex)\n\n raise\n finally:\n catalog.delete_table_if_exists(\n database=database, table=temp_table, boto3_session=boto3_session, catalog_id=catalog_id\n )\n\n if keep_files is False:\n s3.delete_objects(\n path=temp_path or wg_config.s3_output, # type: ignore[arg-type]\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n\n\n@apply_configs\n@_utils.validate_distributed_kwargs(\n unsupported_kwargs=[\"boto3_session\", \"s3_additional_kwargs\"],\n)\ndef delete_from_iceberg_table(\n df: pd.DataFrame,\n database: str,\n table: str,\n merge_cols: list[str],\n temp_path: str | None = None,\n keep_files: bool = True,\n data_source: str | None = None,\n workgroup: str = \"primary\",\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n s3_additional_kwargs: dict[str, Any] | None = None,\n catalog_id: str | None = None,\n) -> None:\n \"\"\"\n Delete rows from an Iceberg table.\n\n Creates temporary external table, writes staged files and then deletes any rows which match the contents of the temporary table.\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame containing the IDs of rows that are to be deleted from the Iceberg table.\n database: str\n Database name.\n table: str\n Table name.\n merge_cols: list[str]\n List of columns to be used to determine which rows of the Iceberg table should be deleted.\n\n `MERGE INTO <https://docs.aws.amazon.com/athena/latest/ug/merge-into-statement.html>`_\n temp_path: str, optional\n S3 path to temporarily store the DataFrame.\n keep_files: bool\n Whether staging files produced by Athena are retained. ``True`` by default.\n data_source: str, optional\n The AWS KMS key ID or alias used to encrypt the data.\n workgroup: str, optional\n Athena workgroup name.\n encryption: str, optional\n Valid values: [``None``, ``\"SSE_S3\"``, ``\"SSE_KMS\"``]. Notice: ``\"CSE_KMS\"`` is not supported.\n kms_key: str, optional\n For SSE-KMS, this is the KMS key ARN or ID.\n boto3_session: boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if ``boto3_session`` receive None.\n s3_additional_kwargs: Optional[Dict[str, Any]]\n Forwarded to botocore requests.\n e.g. ```s3_additional_kwargs={\"RequestPayer\": \"requester\"}```\n catalog_id: str, optional\n The ID of the Data Catalog which contains the database and table.\n If none is provided, the AWS account ID is used by default.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> df = pd.DataFrame({\"id\": [1, 2, 3], \"col\": [\"foo\", \"bar\", \"baz\"]})\n >>> wr.athena.to_iceberg(\n ... df=df,\n ... database=\"my_database\",\n ... table=\"my_table\",\n ... temp_path=\"s3://bucket/temp/\",\n ... )\n >>> df_delete = pd.DataFrame({\"id\": [1, 3]})\n >>> wr.athena.delete_from_iceberg_table(\n ... df=df_delete,\n ... database=\"my_database\",\n ... table=\"my_table\",\n ... merge_cols=[\"id\"],\n ... )\n >>> wr.athena.read_sql_table(table=\"my_table\", database=\"my_database\")\n id col\n 0 2 bar\n \"\"\"\n if df.empty is True:\n raise exceptions.EmptyDataFrame(\"DataFrame cannot be empty.\")\n\n if not merge_cols:\n raise exceptions.InvalidArgumentValue(\"Merge columns must be specified.\")\n\n wg_config: _WorkGroupConfig = _get_workgroup_config(session=boto3_session, workgroup=workgroup)\n temp_table: str = f\"temp_table_{uuid.uuid4().hex}\"\n\n if not temp_path and not wg_config.s3_output:\n raise exceptions.InvalidArgumentCombination(\n \"Either path or workgroup path must be specified to store the temporary results.\"\n )\n\n if not catalog.does_table_exist(database=database, table=table, boto3_session=boto3_session, catalog_id=catalog_id):\n raise exceptions.InvalidTable(f\"Table {table} does not exist in database {database}.\")\n\n df = df[merge_cols].drop_duplicates(ignore_index=True)\n\n try:\n # Create temporary external table, write the results\n s3.to_parquet(\n df=df,\n path=temp_path or wg_config.s3_output,\n dataset=True,\n database=database,\n table=temp_table,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n catalog_id=catalog_id,\n index=False,\n )\n\n sql_statement = f\"\"\"\n MERGE INTO \"{database}\".\"{table}\" target\n USING \"{database}\".\"{temp_table}\" source\n ON {' AND '.join([f'target.\"{x}\" = source.\"{x}\"' for x in merge_cols])}\n WHEN MATCHED THEN\n DELETE\n \"\"\"\n\n query_execution_id: str = _start_query_execution(\n sql=sql_statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n except Exception as ex:\n _logger.error(ex)\n\n raise\n\n finally:\n catalog.delete_table_if_exists(\n database=database, table=temp_table, boto3_session=boto3_session, catalog_id=catalog_id\n )\n\n if keep_files is False:\n s3.delete_objects(\n path=temp_path or wg_config.s3_output, # type: ignore[arg-type]\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n", "path": "awswrangler/athena/_write_iceberg.py" } ]
[ { "content": "\"\"\"Amazon Athena Module containing all to_* write functions.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport re\nimport typing\nimport uuid\nfrom typing import Any, Dict, Literal, TypedDict, cast\n\nimport boto3\nimport pandas as pd\n\nfrom awswrangler import _data_types, _utils, catalog, exceptions, s3\nfrom awswrangler._config import apply_configs\nfrom awswrangler.athena._executions import wait_query\nfrom awswrangler.athena._utils import (\n _get_workgroup_config,\n _start_query_execution,\n _WorkGroupConfig,\n)\nfrom awswrangler.typing import GlueTableSettings\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef _create_iceberg_table(\n df: pd.DataFrame,\n database: str,\n table: str,\n path: str | None,\n wg_config: _WorkGroupConfig,\n partition_cols: list[str] | None,\n additional_table_properties: dict[str, Any] | None,\n index: bool = False,\n data_source: str | None = None,\n workgroup: str | None = None,\n s3_output: str | None = None,\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n dtype: dict[str, str] | None = None,\n columns_comments: dict[str, Any] | None = None,\n) -> None:\n if not path:\n raise exceptions.InvalidArgumentValue(\"Must specify table location to create the table.\")\n\n columns_types, _ = catalog.extract_athena_types(df=df, index=index, dtype=dtype)\n cols_str: str = \", \".join(\n [\n f\"{k} {v}\"\n if (columns_comments is None or columns_comments.get(k) is None)\n else f\"{k} {v} COMMENT '{columns_comments[k]}'\"\n for k, v in columns_types.items()\n ]\n )\n partition_cols_str: str = f\"PARTITIONED BY ({', '.join([col for col in partition_cols])})\" if partition_cols else \"\"\n table_properties_str: str = (\n \", \" + \", \".join([f\"'{key}'='{value}'\" for key, value in additional_table_properties.items()])\n if additional_table_properties\n else \"\"\n )\n\n create_sql: str = (\n f\"CREATE TABLE IF NOT EXISTS `{table}` ({cols_str}) \"\n f\"{partition_cols_str} \"\n f\"LOCATION '{path}' \"\n f\"TBLPROPERTIES ('table_type' ='ICEBERG', 'format'='parquet'{table_properties_str})\"\n )\n\n query_execution_id: str = _start_query_execution(\n sql=create_sql,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n\nclass _SchemaChanges(TypedDict):\n new_columns: dict[str, str]\n modified_columns: dict[str, str]\n missing_columns: dict[str, str]\n\n\ndef _determine_differences(\n df: pd.DataFrame,\n database: str,\n table: str,\n index: bool,\n partition_cols: list[str] | None,\n boto3_session: boto3.Session | None,\n dtype: dict[str, str] | None,\n catalog_id: str | None,\n) -> tuple[_SchemaChanges, list[str]]:\n if partition_cols:\n # Remove columns using partition transform function,\n # as they won't be found in the DataFrame or the Glue catalog.\n # Examples include day(column_name) and truncate(10, column_name).\n pattern = r\"[A-Za-z0-9_]+\\(.+\\)\"\n partition_cols = [col for col in partition_cols if re.match(pattern, col) is None]\n\n frame_columns_types, frame_partitions_types = _data_types.athena_types_from_pandas_partitioned(\n df=df, index=index, partition_cols=partition_cols, dtype=dtype\n )\n frame_columns_types.update(frame_partitions_types)\n\n # lowercase DataFrame columns, as all the column names from Athena will be lowercased\n frame_columns_types = {k.lower(): v for k, v in frame_columns_types.items()}\n\n catalog_column_types = typing.cast(\n Dict[str, str],\n catalog.get_table_types(database=database, table=table, catalog_id=catalog_id, boto3_session=boto3_session),\n )\n\n original_column_names = set(catalog_column_types)\n new_column_names = set(frame_columns_types)\n\n new_columns = {col: frame_columns_types[col] for col in new_column_names - original_column_names}\n missing_columns = {col: catalog_column_types[col] for col in original_column_names - new_column_names}\n\n columns_to_change = [\n col\n for col in original_column_names.intersection(new_column_names)\n if frame_columns_types[col] != catalog_column_types[col]\n ]\n modified_columns = {col: frame_columns_types[col] for col in columns_to_change}\n\n return (\n _SchemaChanges(new_columns=new_columns, modified_columns=modified_columns, missing_columns=missing_columns),\n [key for key in catalog_column_types],\n )\n\n\ndef _alter_iceberg_table(\n database: str,\n table: str,\n schema_changes: _SchemaChanges,\n fill_missing_columns_in_df: bool,\n wg_config: _WorkGroupConfig,\n data_source: str | None = None,\n workgroup: str | None = None,\n s3_output: str | None = None,\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n) -> None:\n sql_statements: list[str] = []\n\n if schema_changes[\"new_columns\"]:\n sql_statements += _alter_iceberg_table_add_columns_sql(\n table=table,\n columns_to_add=schema_changes[\"new_columns\"],\n )\n\n if schema_changes[\"modified_columns\"]:\n sql_statements += _alter_iceberg_table_change_columns_sql(\n table=table,\n columns_to_change=schema_changes[\"modified_columns\"],\n )\n\n if schema_changes[\"missing_columns\"] and not fill_missing_columns_in_df:\n raise exceptions.InvalidArgumentCombination(\n f\"Dropping columns of Iceberg tables is not supported: {schema_changes['missing_columns']}. \"\n \"Please use `fill_missing_columns_in_df=True` to fill missing columns with N/A.\"\n )\n\n for statement in sql_statements:\n query_execution_id: str = _start_query_execution(\n sql=statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n\ndef _alter_iceberg_table_add_columns_sql(\n table: str,\n columns_to_add: dict[str, str],\n) -> list[str]:\n add_cols_str = \", \".join([f\"{col_name} {columns_to_add[col_name]}\" for col_name in columns_to_add])\n\n return [f\"ALTER TABLE {table} ADD COLUMNS ({add_cols_str})\"]\n\n\ndef _alter_iceberg_table_change_columns_sql(\n table: str,\n columns_to_change: dict[str, str],\n) -> list[str]:\n sql_statements = []\n\n for col_name, col_type in columns_to_change.items():\n sql_statements.append(f\"ALTER TABLE {table} CHANGE COLUMN {col_name} {col_name} {col_type}\")\n\n return sql_statements\n\n\ndef _validate_args(\n df: pd.DataFrame,\n temp_path: str | None,\n wg_config: _WorkGroupConfig,\n mode: Literal[\"append\", \"overwrite\", \"overwrite_partitions\"],\n partition_cols: list[str] | None,\n merge_cols: list[str] | None,\n) -> None:\n if df.empty is True:\n raise exceptions.EmptyDataFrame(\"DataFrame cannot be empty.\")\n\n if not temp_path and not wg_config.s3_output:\n raise exceptions.InvalidArgumentCombination(\n \"Either path or workgroup path must be specified to store the temporary results.\"\n )\n\n if mode == \"overwrite_partitions\":\n if not partition_cols:\n raise exceptions.InvalidArgumentCombination(\n \"When mode is 'overwrite_partitions' partition_cols must be specified.\"\n )\n if merge_cols:\n raise exceptions.InvalidArgumentCombination(\n \"When mode is 'overwrite_partitions' merge_cols must not be specified.\"\n )\n\n\n@apply_configs\n@_utils.validate_distributed_kwargs(\n unsupported_kwargs=[\"boto3_session\", \"s3_additional_kwargs\"],\n)\ndef to_iceberg(\n df: pd.DataFrame,\n database: str,\n table: str,\n temp_path: str | None = None,\n index: bool = False,\n table_location: str | None = None,\n partition_cols: list[str] | None = None,\n merge_cols: list[str] | None = None,\n keep_files: bool = True,\n data_source: str | None = None,\n s3_output: str | None = None,\n workgroup: str = \"primary\",\n mode: Literal[\"append\", \"overwrite\", \"overwrite_partitions\"] = \"append\",\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n s3_additional_kwargs: dict[str, Any] | None = None,\n additional_table_properties: dict[str, Any] | None = None,\n dtype: dict[str, str] | None = None,\n catalog_id: str | None = None,\n schema_evolution: bool = False,\n fill_missing_columns_in_df: bool = True,\n glue_table_settings: GlueTableSettings | None = None,\n) -> None:\n \"\"\"\n Insert into Athena Iceberg table using INSERT INTO ... SELECT. Will create Iceberg table if it does not exist.\n\n Creates temporary external table, writes staged files and inserts via INSERT INTO ... SELECT.\n\n Parameters\n ----------\n df : pd.DataFrame\n Pandas DataFrame.\n database : str\n AWS Glue/Athena database name - It is only the origin database from where the query will be launched.\n You can still using and mixing several databases writing the full table name within the sql\n (e.g. `database.table`).\n table : str\n AWS Glue/Athena table name.\n temp_path : str\n Amazon S3 location to store temporary results. Workgroup config will be used if not provided.\n index: bool\n Should consider the DataFrame index as a column?.\n table_location : str, optional\n Amazon S3 location for the table. Will only be used to create a new table if it does not exist.\n partition_cols: List[str], optional\n List of column names that will be used to create partitions, including support for transform\n functions (e.g. \"day(ts)\").\n\n https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html#querying-iceberg-partitioning\n merge_cols: List[str], optional\n List of column names that will be used for conditional inserts and updates.\n\n https://docs.aws.amazon.com/athena/latest/ug/merge-into-statement.html\n keep_files : bool\n Whether staging files produced by Athena are retained. 'True' by default.\n data_source : str, optional\n Data Source / Catalog name. If None, 'AwsDataCatalog' will be used by default.\n s3_output : str, optional\n Amazon S3 path used for query execution.\n workgroup : str\n Athena workgroup. Primary by default.\n mode: str\n ``append`` (default), ``overwrite``, ``overwrite_partitions``.\n encryption : str, optional\n Valid values: [None, 'SSE_S3', 'SSE_KMS']. Notice: 'CSE_KMS' is not supported.\n kms_key : str, optional\n For SSE-KMS, this is the KMS key ARN or ID.\n boto3_session : boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if boto3_session receive None.\n s3_additional_kwargs: dict[str, Any], optional\n Forwarded to botocore requests.\n e.g. s3_additional_kwargs={'RequestPayer': 'requester'}\n additional_table_properties: dict[str, Any], optional\n Additional table properties.\n e.g. additional_table_properties={'write_target_data_file_size_bytes': '536870912'}\n\n https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html#querying-iceberg-table-properties\n dtype: dict[str, str], optional\n Dictionary of columns names and Athena/Glue types to be casted.\n Useful when you have columns with undetermined or mixed data types.\n e.g. {'col name': 'bigint', 'col2 name': 'int'}\n catalog_id : str, optional\n The ID of the Data Catalog from which to retrieve Databases.\n If none is provided, the AWS account ID is used by default\n schema_evolution: bool, optional\n If ``True`` allows schema evolution for new columns or changes in column types.\n Columns missing from the DataFrame that are present in the Iceberg schema\n will throw an error unless ``fill_missing_columns_in_df`` is set to ``True``.\n Default is ``False``.\n fill_missing_columns_in_df: bool, optional\n If ``True``, fill columns that was missing in the DataFrame with ``NULL`` values.\n Default is ``True``.\n columns_comments: GlueTableSettings, optional\n Glue/Athena catalog: Settings for writing to the Glue table.\n Currently only the 'columns_comments' attribute is supported for this function.\n Columns comments can only be added with this function when creating a new table.\n\n Returns\n -------\n None\n\n Examples\n --------\n Insert into an existing Iceberg table\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.athena.to_iceberg(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... database='my_database',\n ... table='my_table',\n ... temp_path='s3://bucket/temp/',\n ... )\n\n Create Iceberg table and insert data (table doesn't exist, requires table_location)\n\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> wr.athena.to_iceberg(\n ... df=pd.DataFrame({'col': [1, 2, 3]}),\n ... database='my_database',\n ... table='my_table2',\n ... table_location='s3://bucket/my_table2/',\n ... temp_path='s3://bucket/temp/',\n ... )\n\n \"\"\"\n wg_config: _WorkGroupConfig = _get_workgroup_config(session=boto3_session, workgroup=workgroup)\n temp_table: str = f\"temp_table_{uuid.uuid4().hex}\"\n\n _validate_args(\n df=df,\n temp_path=temp_path,\n wg_config=wg_config,\n mode=mode,\n partition_cols=partition_cols,\n merge_cols=merge_cols,\n )\n\n glue_table_settings = cast(\n GlueTableSettings,\n glue_table_settings if glue_table_settings else {},\n )\n\n try:\n # Create Iceberg table if it doesn't exist\n if not catalog.does_table_exist(\n database=database, table=table, boto3_session=boto3_session, catalog_id=catalog_id\n ):\n _create_iceberg_table(\n df=df,\n database=database,\n table=table,\n path=table_location,\n wg_config=wg_config,\n partition_cols=partition_cols,\n additional_table_properties=additional_table_properties,\n index=index,\n data_source=data_source,\n workgroup=workgroup,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n dtype=dtype,\n columns_comments=glue_table_settings.get(\"columns_comments\"),\n )\n else:\n schema_differences, catalog_cols = _determine_differences(\n df=df,\n database=database,\n table=table,\n index=index,\n partition_cols=partition_cols,\n boto3_session=boto3_session,\n dtype=dtype,\n catalog_id=catalog_id,\n )\n\n # Add missing columns to the DataFrame\n if fill_missing_columns_in_df and schema_differences[\"missing_columns\"]:\n for col_name, col_type in schema_differences[\"missing_columns\"].items():\n df[col_name] = None\n df[col_name] = df[col_name].astype(_data_types.athena2pandas(col_type))\n\n schema_differences[\"missing_columns\"] = {}\n\n # Ensure that the ordering of the DF is the same as in the catalog.\n # This is required for the INSERT command to work.\n df = df[catalog_cols]\n\n if schema_evolution is False and any([schema_differences[x] for x in schema_differences]): # type: ignore[literal-required]\n raise exceptions.InvalidArgumentValue(f\"Schema change detected: {schema_differences}\")\n\n _alter_iceberg_table(\n database=database,\n table=table,\n schema_changes=schema_differences,\n fill_missing_columns_in_df=fill_missing_columns_in_df,\n wg_config=wg_config,\n data_source=data_source,\n workgroup=workgroup,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n\n # if mode == \"overwrite_partitions\", drop matched partitions\n if mode == \"overwrite_partitions\":\n delete_from_iceberg_table(\n df=df,\n database=database,\n table=table,\n merge_cols=partition_cols, # type: ignore[arg-type]\n temp_path=temp_path,\n keep_files=False,\n data_source=data_source,\n workgroup=workgroup,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n catalog_id=catalog_id,\n )\n # if mode == \"overwrite\", delete whole data from table (but not table itself)\n elif mode == \"overwrite\":\n delete_sql_statement = f\"DELETE FROM {table}\"\n delete_query_execution_id: str = _start_query_execution(\n sql=delete_sql_statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=delete_query_execution_id, boto3_session=boto3_session)\n\n # Create temporary external table, write the results\n s3.to_parquet(\n df=df,\n path=temp_path or wg_config.s3_output,\n index=index,\n dataset=True,\n database=database,\n table=temp_table,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n dtype=dtype,\n catalog_id=catalog_id,\n glue_table_settings=glue_table_settings,\n )\n\n # Insert or merge into Iceberg table\n sql_statement: str\n if merge_cols:\n sql_statement = f\"\"\"\n MERGE INTO \"{database}\".\"{table}\" target\n USING \"{database}\".\"{temp_table}\" source\n ON {' AND '.join([f'target.\"{x}\" = source.\"{x}\"' for x in merge_cols])}\n WHEN MATCHED THEN\n UPDATE SET {', '.join([f'\"{x}\" = source.\"{x}\"' for x in df.columns])}\n WHEN NOT MATCHED THEN\n INSERT ({', '.join([f'\"{x}\"' for x in df.columns])})\n VALUES ({', '.join([f'source.\"{x}\"' for x in df.columns])})\n \"\"\"\n else:\n sql_statement = f\"\"\"\n INSERT INTO \"{database}\".\"{table}\" ({', '.join([f'\"{x}\"' for x in df.columns])})\n SELECT {', '.join([f'\"{x}\"' for x in df.columns])}\n FROM \"{database}\".\"{temp_table}\"\n \"\"\"\n\n query_execution_id: str = _start_query_execution(\n sql=sql_statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n s3_output=s3_output,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n except Exception as ex:\n _logger.error(ex)\n\n raise\n finally:\n catalog.delete_table_if_exists(\n database=database, table=temp_table, boto3_session=boto3_session, catalog_id=catalog_id\n )\n\n if keep_files is False:\n s3.delete_objects(\n path=temp_path or wg_config.s3_output, # type: ignore[arg-type]\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n\n\n@apply_configs\n@_utils.validate_distributed_kwargs(\n unsupported_kwargs=[\"boto3_session\", \"s3_additional_kwargs\"],\n)\ndef delete_from_iceberg_table(\n df: pd.DataFrame,\n database: str,\n table: str,\n merge_cols: list[str],\n temp_path: str | None = None,\n keep_files: bool = True,\n data_source: str | None = None,\n workgroup: str = \"primary\",\n encryption: str | None = None,\n kms_key: str | None = None,\n boto3_session: boto3.Session | None = None,\n s3_additional_kwargs: dict[str, Any] | None = None,\n catalog_id: str | None = None,\n) -> None:\n \"\"\"\n Delete rows from an Iceberg table.\n\n Creates temporary external table, writes staged files and then deletes any rows which match the contents of the temporary table.\n\n Parameters\n ----------\n df: pandas.DataFrame\n Pandas DataFrame containing the IDs of rows that are to be deleted from the Iceberg table.\n database: str\n Database name.\n table: str\n Table name.\n merge_cols: list[str]\n List of columns to be used to determine which rows of the Iceberg table should be deleted.\n\n `MERGE INTO <https://docs.aws.amazon.com/athena/latest/ug/merge-into-statement.html>`_\n temp_path: str, optional\n S3 path to temporarily store the DataFrame.\n keep_files: bool\n Whether staging files produced by Athena are retained. ``True`` by default.\n data_source: str, optional\n The AWS KMS key ID or alias used to encrypt the data.\n workgroup: str, optional\n Athena workgroup name.\n encryption: str, optional\n Valid values: [``None``, ``\"SSE_S3\"``, ``\"SSE_KMS\"``]. Notice: ``\"CSE_KMS\"`` is not supported.\n kms_key: str, optional\n For SSE-KMS, this is the KMS key ARN or ID.\n boto3_session: boto3.Session(), optional\n Boto3 Session. The default boto3 session will be used if ``boto3_session`` receive None.\n s3_additional_kwargs: Optional[Dict[str, Any]]\n Forwarded to botocore requests.\n e.g. ```s3_additional_kwargs={\"RequestPayer\": \"requester\"}```\n catalog_id: str, optional\n The ID of the Data Catalog which contains the database and table.\n If none is provided, the AWS account ID is used by default.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> import awswrangler as wr\n >>> import pandas as pd\n >>> df = pd.DataFrame({\"id\": [1, 2, 3], \"col\": [\"foo\", \"bar\", \"baz\"]})\n >>> wr.athena.to_iceberg(\n ... df=df,\n ... database=\"my_database\",\n ... table=\"my_table\",\n ... temp_path=\"s3://bucket/temp/\",\n ... )\n >>> df_delete = pd.DataFrame({\"id\": [1, 3]})\n >>> wr.athena.delete_from_iceberg_table(\n ... df=df_delete,\n ... database=\"my_database\",\n ... table=\"my_table\",\n ... merge_cols=[\"id\"],\n ... )\n >>> wr.athena.read_sql_table(table=\"my_table\", database=\"my_database\")\n id col\n 0 2 bar\n \"\"\"\n if df.empty is True:\n raise exceptions.EmptyDataFrame(\"DataFrame cannot be empty.\")\n\n if not merge_cols:\n raise exceptions.InvalidArgumentValue(\"Merge columns must be specified.\")\n\n wg_config: _WorkGroupConfig = _get_workgroup_config(session=boto3_session, workgroup=workgroup)\n temp_table: str = f\"temp_table_{uuid.uuid4().hex}\"\n\n if not temp_path and not wg_config.s3_output:\n raise exceptions.InvalidArgumentCombination(\n \"Either path or workgroup path must be specified to store the temporary results.\"\n )\n\n if not catalog.does_table_exist(database=database, table=table, boto3_session=boto3_session, catalog_id=catalog_id):\n raise exceptions.InvalidTable(f\"Table {table} does not exist in database {database}.\")\n\n df = df[merge_cols].drop_duplicates(ignore_index=True)\n\n try:\n # Create temporary external table, write the results\n s3.to_parquet(\n df=df,\n path=temp_path or wg_config.s3_output,\n dataset=True,\n database=database,\n table=temp_table,\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n catalog_id=catalog_id,\n index=False,\n )\n\n sql_statement = f\"\"\"\n MERGE INTO \"{database}\".\"{table}\" target\n USING \"{database}\".\"{temp_table}\" source\n ON {' AND '.join([f'target.\"{x}\" = source.\"{x}\"' for x in merge_cols])}\n WHEN MATCHED THEN\n DELETE\n \"\"\"\n\n query_execution_id: str = _start_query_execution(\n sql=sql_statement,\n workgroup=workgroup,\n wg_config=wg_config,\n database=database,\n data_source=data_source,\n encryption=encryption,\n kms_key=kms_key,\n boto3_session=boto3_session,\n )\n wait_query(query_execution_id=query_execution_id, boto3_session=boto3_session)\n\n except Exception as ex:\n _logger.error(ex)\n\n raise\n\n finally:\n catalog.delete_table_if_exists(\n database=database, table=temp_table, boto3_session=boto3_session, catalog_id=catalog_id\n )\n\n if keep_files is False:\n s3.delete_objects(\n path=temp_path or wg_config.s3_output, # type: ignore[arg-type]\n boto3_session=boto3_session,\n s3_additional_kwargs=s3_additional_kwargs,\n )\n", "path": "awswrangler/athena/_write_iceberg.py" } ]
diff --git a/awswrangler/athena/_write_iceberg.py b/awswrangler/athena/_write_iceberg.py index 28eae686b..22fdf95c9 100644 --- a/awswrangler/athena/_write_iceberg.py +++ b/awswrangler/athena/_write_iceberg.py @@ -483,6 +483,7 @@ def to_iceberg( s3.to_parquet( df=df, path=temp_path or wg_config.s3_output, + index=index, dataset=True, database=database, table=temp_table,